kern_cpuset.c revision 180098
1176730Sjeff/*-
2176730Sjeff * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
3176730Sjeff * All rights reserved.
4177904Sjeff *
5177904Sjeff * Copyright (c) 2008 Nokia Corporation
6177904Sjeff * All rights reserved.
7176730Sjeff *
8176730Sjeff * Redistribution and use in source and binary forms, with or without
9176730Sjeff * modification, are permitted provided that the following conditions
10176730Sjeff * are met:
11176730Sjeff * 1. Redistributions of source code must retain the above copyright
12176730Sjeff *    notice unmodified, this list of conditions, and the following
13176730Sjeff *    disclaimer.
14176730Sjeff * 2. Redistributions in binary form must reproduce the above copyright
15176730Sjeff *    notice, this list of conditions and the following disclaimer in the
16176730Sjeff *    documentation and/or other materials provided with the distribution.
17176730Sjeff *
18176730Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19176730Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20176730Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21176730Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22176730Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23176730Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24176730Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25176730Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26176730Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27176730Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28176730Sjeff *
29176730Sjeff */
30176730Sjeff
31176730Sjeff#include <sys/cdefs.h>
32176730Sjeff__FBSDID("$FreeBSD: head/sys/kern/kern_cpuset.c 180098 2008-06-29 17:58:16Z bz $");
33176730Sjeff
34176730Sjeff#include <sys/param.h>
35176730Sjeff#include <sys/systm.h>
36176730Sjeff#include <sys/sysproto.h>
37176730Sjeff#include <sys/kernel.h>
38176730Sjeff#include <sys/lock.h>
39176730Sjeff#include <sys/malloc.h>
40176730Sjeff#include <sys/mutex.h>
41176730Sjeff#include <sys/priv.h>
42176730Sjeff#include <sys/proc.h>
43176730Sjeff#include <sys/refcount.h>
44176730Sjeff#include <sys/sched.h>
45176730Sjeff#include <sys/smp.h>
46176730Sjeff#include <sys/syscallsubr.h>
47176730Sjeff#include <sys/cpuset.h>
48176730Sjeff#include <sys/sx.h>
49176730Sjeff#include <sys/refcount.h>
50176730Sjeff#include <sys/queue.h>
51176730Sjeff#include <sys/limits.h>
52177738Sjeff#include <sys/bus.h>
53177738Sjeff#include <sys/interrupt.h>
54176730Sjeff
55176730Sjeff#include <vm/uma.h>
56176730Sjeff
57176730Sjeff/*
58176730Sjeff * cpusets provide a mechanism for creating and manipulating sets of
59176730Sjeff * processors for the purpose of constraining the scheduling of threads to
60176730Sjeff * specific processors.
61176730Sjeff *
62176730Sjeff * Each process belongs to an identified set, by default this is set 1.  Each
63176730Sjeff * thread may further restrict the cpus it may run on to a subset of this
64176730Sjeff * named set.  This creates an anonymous set which other threads and processes
65176730Sjeff * may not join by number.
66176730Sjeff *
67176730Sjeff * The named set is referred to herein as the 'base' set to avoid ambiguity.
68176730Sjeff * This set is usually a child of a 'root' set while the anonymous set may
69176730Sjeff * simply be referred to as a mask.  In the syscall api these are referred to
70176730Sjeff * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
71176730Sjeff *
72176730Sjeff * Threads inherit their set from their creator whether it be anonymous or
73176730Sjeff * not.  This means that anonymous sets are immutable because they may be
74176730Sjeff * shared.  To modify an anonymous set a new set is created with the desired
75176730Sjeff * mask and the same parent as the existing anonymous set.  This gives the
76176730Sjeff * illusion of each thread having a private mask.A
77176730Sjeff *
78176730Sjeff * Via the syscall apis a user may ask to retrieve or modify the root, base,
79176730Sjeff * or mask that is discovered via a pid, tid, or setid.  Modifying a set
80176730Sjeff * modifies all numbered and anonymous child sets to comply with the new mask.
81176730Sjeff * Modifying a pid or tid's mask applies only to that tid but must still
82176730Sjeff * exist within the assigned parent set.
83176730Sjeff *
84176730Sjeff * A thread may not be assigned to a a group seperate from other threads in
85176730Sjeff * the process.  This is to remove ambiguity when the setid is queried with
86176730Sjeff * a pid argument.  There is no other technical limitation.
87176730Sjeff *
88176730Sjeff * This somewhat complex arrangement is intended to make it easy for
89176730Sjeff * applications to query available processors and bind their threads to
90176730Sjeff * specific processors while also allowing administrators to dynamically
91176730Sjeff * reprovision by changing sets which apply to groups of processes.
92176730Sjeff *
93176730Sjeff * A simple application should not concern itself with sets at all and
94176730Sjeff * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
95176730Sjeff * meaning 'curthread'.  It may query availble cpus for that tid with a
96176730Sjeff * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
97176730Sjeff */
98176730Sjeffstatic uma_zone_t cpuset_zone;
99176730Sjeffstatic struct mtx cpuset_lock;
100176730Sjeffstatic struct setlist cpuset_ids;
101176730Sjeffstatic struct unrhdr *cpuset_unr;
102177738Sjeffstatic struct cpuset *cpuset_zero;
103176730Sjeff
104177738Sjeffcpuset_t *cpuset_root;
105177738Sjeff
106176730Sjeff/*
107176730Sjeff * Acquire a reference to a cpuset, all pointers must be tracked with refs.
108176730Sjeff */
109176730Sjeffstruct cpuset *
110176730Sjeffcpuset_ref(struct cpuset *set)
111176730Sjeff{
112176730Sjeff
113176730Sjeff	refcount_acquire(&set->cs_ref);
114176730Sjeff	return (set);
115176730Sjeff}
116176730Sjeff
117176730Sjeff/*
118176730Sjeff * Release a reference in a context where it is safe to allocte.
119176730Sjeff */
120176730Sjeffvoid
121176730Sjeffcpuset_rel(struct cpuset *set)
122176730Sjeff{
123176730Sjeff	cpusetid_t id;
124176730Sjeff
125176730Sjeff	if (refcount_release(&set->cs_ref) == 0)
126176730Sjeff		return;
127176730Sjeff	mtx_lock_spin(&cpuset_lock);
128176730Sjeff	LIST_REMOVE(set, cs_siblings);
129176730Sjeff	id = set->cs_id;
130176730Sjeff	if (id != CPUSET_INVALID)
131176730Sjeff		LIST_REMOVE(set, cs_link);
132176730Sjeff	mtx_unlock_spin(&cpuset_lock);
133176730Sjeff	cpuset_rel(set->cs_parent);
134176730Sjeff	uma_zfree(cpuset_zone, set);
135176730Sjeff	if (id != CPUSET_INVALID)
136176730Sjeff		free_unr(cpuset_unr, id);
137176730Sjeff}
138176730Sjeff
139176730Sjeff/*
140176730Sjeff * Deferred release must be used when in a context that is not safe to
141176730Sjeff * allocate/free.  This places any unreferenced sets on the list 'head'.
142176730Sjeff */
143176730Sjeffstatic void
144176730Sjeffcpuset_rel_defer(struct setlist *head, struct cpuset *set)
145176730Sjeff{
146176730Sjeff
147176730Sjeff	if (refcount_release(&set->cs_ref) == 0)
148176730Sjeff		return;
149176730Sjeff	mtx_lock_spin(&cpuset_lock);
150176730Sjeff	LIST_REMOVE(set, cs_siblings);
151176730Sjeff	if (set->cs_id != CPUSET_INVALID)
152176730Sjeff		LIST_REMOVE(set, cs_link);
153176730Sjeff	LIST_INSERT_HEAD(head, set, cs_link);
154176730Sjeff	mtx_unlock_spin(&cpuset_lock);
155176730Sjeff}
156176730Sjeff
157176730Sjeff/*
158176730Sjeff * Complete a deferred release.  Removes the set from the list provided to
159176730Sjeff * cpuset_rel_defer.
160176730Sjeff */
161176730Sjeffstatic void
162176730Sjeffcpuset_rel_complete(struct cpuset *set)
163176730Sjeff{
164176730Sjeff	LIST_REMOVE(set, cs_link);
165176730Sjeff	cpuset_rel(set->cs_parent);
166176730Sjeff	uma_zfree(cpuset_zone, set);
167176730Sjeff}
168176730Sjeff
169176730Sjeff/*
170176730Sjeff * Find a set based on an id.  Returns it with a ref.
171176730Sjeff */
172176730Sjeffstatic struct cpuset *
173176730Sjeffcpuset_lookup(cpusetid_t setid)
174176730Sjeff{
175176730Sjeff	struct cpuset *set;
176176730Sjeff
177176730Sjeff	if (setid == CPUSET_INVALID)
178176730Sjeff		return (NULL);
179176730Sjeff	mtx_lock_spin(&cpuset_lock);
180176730Sjeff	LIST_FOREACH(set, &cpuset_ids, cs_link)
181176730Sjeff		if (set->cs_id == setid)
182176730Sjeff			break;
183176730Sjeff	if (set)
184176730Sjeff		cpuset_ref(set);
185176730Sjeff	mtx_unlock_spin(&cpuset_lock);
186176730Sjeff	return (set);
187176730Sjeff}
188176730Sjeff
189176730Sjeff/*
190176730Sjeff * Create a set in the space provided in 'set' with the provided parameters.
191176730Sjeff * The set is returned with a single ref.  May return EDEADLK if the set
192176730Sjeff * will have no valid cpu based on restrictions from the parent.
193176730Sjeff */
194176730Sjeffstatic int
195176730Sjeff_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask,
196176730Sjeff    cpusetid_t id)
197176730Sjeff{
198176730Sjeff
199176811Sjeff	if (!CPU_OVERLAP(&parent->cs_mask, mask))
200176811Sjeff		return (EDEADLK);
201176730Sjeff	CPU_COPY(mask, &set->cs_mask);
202176730Sjeff	LIST_INIT(&set->cs_children);
203176730Sjeff	refcount_init(&set->cs_ref, 1);
204176730Sjeff	set->cs_flags = 0;
205176730Sjeff	mtx_lock_spin(&cpuset_lock);
206176730Sjeff	CPU_AND(mask, &parent->cs_mask);
207176811Sjeff	set->cs_id = id;
208176811Sjeff	set->cs_parent = cpuset_ref(parent);
209176811Sjeff	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
210176811Sjeff	if (set->cs_id != CPUSET_INVALID)
211176811Sjeff		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
212176730Sjeff	mtx_unlock_spin(&cpuset_lock);
213176730Sjeff
214176811Sjeff	return (0);
215176730Sjeff}
216176730Sjeff
217176730Sjeff/*
218176730Sjeff * Create a new non-anonymous set with the requested parent and mask.  May
219176730Sjeff * return failures if the mask is invalid or a new number can not be
220176730Sjeff * allocated.
221176730Sjeff */
222176730Sjeffstatic int
223176730Sjeffcpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask)
224176730Sjeff{
225176730Sjeff	struct cpuset *set;
226176730Sjeff	cpusetid_t id;
227176730Sjeff	int error;
228176730Sjeff
229176730Sjeff	id = alloc_unr(cpuset_unr);
230176730Sjeff	if (id == -1)
231176730Sjeff		return (ENFILE);
232176730Sjeff	*setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
233176730Sjeff	error = _cpuset_create(set, parent, mask, id);
234176730Sjeff	if (error == 0)
235176730Sjeff		return (0);
236176730Sjeff	free_unr(cpuset_unr, id);
237176730Sjeff	uma_zfree(cpuset_zone, set);
238176730Sjeff
239176730Sjeff	return (error);
240176730Sjeff}
241176730Sjeff
242176730Sjeff/*
243176730Sjeff * Recursively check for errors that would occur from applying mask to
244176730Sjeff * the tree of sets starting at 'set'.  Checks for sets that would become
245176730Sjeff * empty as well as RDONLY flags.
246176730Sjeff */
247176730Sjeffstatic int
248176730Sjeffcpuset_testupdate(struct cpuset *set, cpuset_t *mask)
249176730Sjeff{
250176730Sjeff	struct cpuset *nset;
251176730Sjeff	cpuset_t newmask;
252176730Sjeff	int error;
253176730Sjeff
254176730Sjeff	mtx_assert(&cpuset_lock, MA_OWNED);
255176730Sjeff	if (set->cs_flags & CPU_SET_RDONLY)
256176730Sjeff		return (EPERM);
257176811Sjeff	if (!CPU_OVERLAP(&set->cs_mask, mask))
258176811Sjeff		return (EDEADLK);
259176730Sjeff	CPU_COPY(&set->cs_mask, &newmask);
260176730Sjeff	CPU_AND(&newmask, mask);
261176811Sjeff	error = 0;
262176730Sjeff	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
263176730Sjeff		if ((error = cpuset_testupdate(nset, &newmask)) != 0)
264176730Sjeff			break;
265176730Sjeff	return (error);
266176730Sjeff}
267176730Sjeff
268176730Sjeff/*
269176730Sjeff * Applies the mask 'mask' without checking for empty sets or permissions.
270176730Sjeff */
271176730Sjeffstatic void
272176730Sjeffcpuset_update(struct cpuset *set, cpuset_t *mask)
273176730Sjeff{
274176730Sjeff	struct cpuset *nset;
275176730Sjeff
276176730Sjeff	mtx_assert(&cpuset_lock, MA_OWNED);
277176730Sjeff	CPU_AND(&set->cs_mask, mask);
278176730Sjeff	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
279176730Sjeff		cpuset_update(nset, &set->cs_mask);
280176730Sjeff
281176730Sjeff	return;
282176730Sjeff}
283176730Sjeff
284176730Sjeff/*
285176730Sjeff * Modify the set 'set' to use a copy of the mask provided.  Apply this new
286176730Sjeff * mask to restrict all children in the tree.  Checks for validity before
287176730Sjeff * applying the changes.
288176730Sjeff */
289176730Sjeffstatic int
290176730Sjeffcpuset_modify(struct cpuset *set, cpuset_t *mask)
291176730Sjeff{
292176811Sjeff	struct cpuset *root;
293176730Sjeff	int error;
294176730Sjeff
295180098Sbz	error = priv_check(curthread, PRIV_SCHED_CPUSET);
296176730Sjeff	if (error)
297176730Sjeff		return (error);
298176811Sjeff	/*
299176811Sjeff	 * Verify that we have access to this set of
300176811Sjeff	 * cpus.
301176811Sjeff	 */
302176811Sjeff	root = set->cs_parent;
303176811Sjeff	if (root && !CPU_SUBSET(&root->cs_mask, mask))
304176811Sjeff		return (EINVAL);
305176730Sjeff	mtx_lock_spin(&cpuset_lock);
306176730Sjeff	error = cpuset_testupdate(set, mask);
307176730Sjeff	if (error)
308176730Sjeff		goto out;
309176730Sjeff	cpuset_update(set, mask);
310176730Sjeff	CPU_COPY(mask, &set->cs_mask);
311176730Sjeffout:
312176730Sjeff	mtx_unlock_spin(&cpuset_lock);
313176730Sjeff
314176730Sjeff	return (error);
315176730Sjeff}
316176730Sjeff
317176730Sjeff/*
318176730Sjeff * Walks up the tree from 'set' to find the root.  Returns the root
319176730Sjeff * referenced.
320176730Sjeff */
321176730Sjeffstatic struct cpuset *
322177738Sjeffcpuset_refroot(struct cpuset *set)
323176730Sjeff{
324176730Sjeff
325176730Sjeff	for (; set->cs_parent != NULL; set = set->cs_parent)
326176730Sjeff		if (set->cs_flags & CPU_SET_ROOT)
327176730Sjeff			break;
328176730Sjeff	cpuset_ref(set);
329176730Sjeff
330176730Sjeff	return (set);
331176730Sjeff}
332176730Sjeff
333176730Sjeff/*
334176730Sjeff * Find the first non-anonymous set starting from 'set'.  Returns this set
335176730Sjeff * referenced.  May return the passed in set with an extra ref if it is
336176730Sjeff * not anonymous.
337176730Sjeff */
338176730Sjeffstatic struct cpuset *
339177738Sjeffcpuset_refbase(struct cpuset *set)
340176730Sjeff{
341176730Sjeff
342176730Sjeff	if (set->cs_id == CPUSET_INVALID)
343176730Sjeff		set = set->cs_parent;
344176730Sjeff	cpuset_ref(set);
345176730Sjeff
346176730Sjeff	return (set);
347176730Sjeff}
348176730Sjeff
349176730Sjeff/*
350176730Sjeff * Resolve the 'which' parameter of several cpuset apis.
351176730Sjeff *
352176730Sjeff * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
353176730Sjeff * checks for permission via p_cansched().
354176730Sjeff *
355176730Sjeff * For WHICH_SET returns a valid set with a new reference.
356176730Sjeff *
357176730Sjeff * -1 may be supplied for any argument to mean the current proc/thread or
358176730Sjeff * the base set of the current thread.  May fail with ESRCH/EPERM.
359176730Sjeff */
360176730Sjeffstatic int
361176730Sjeffcpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
362176730Sjeff    struct cpuset **setp)
363176730Sjeff{
364176730Sjeff	struct cpuset *set;
365176730Sjeff	struct thread *td;
366176730Sjeff	struct proc *p;
367176730Sjeff	int error;
368176730Sjeff
369176730Sjeff	*pp = p = NULL;
370176730Sjeff	*tdp = td = NULL;
371176730Sjeff	*setp = set = NULL;
372176730Sjeff	switch (which) {
373176730Sjeff	case CPU_WHICH_PID:
374176730Sjeff		if (id == -1) {
375176730Sjeff			PROC_LOCK(curproc);
376176730Sjeff			p = curproc;
377176730Sjeff			break;
378176730Sjeff		}
379176730Sjeff		if ((p = pfind(id)) == NULL)
380176730Sjeff			return (ESRCH);
381176730Sjeff		break;
382176730Sjeff	case CPU_WHICH_TID:
383176730Sjeff		if (id == -1) {
384176730Sjeff			PROC_LOCK(curproc);
385176730Sjeff			p = curproc;
386176730Sjeff			td = curthread;
387176730Sjeff			break;
388176730Sjeff		}
389176730Sjeff		sx_slock(&allproc_lock);
390176730Sjeff		FOREACH_PROC_IN_SYSTEM(p) {
391176730Sjeff			PROC_LOCK(p);
392176730Sjeff			FOREACH_THREAD_IN_PROC(p, td)
393176730Sjeff				if (td->td_tid == id)
394176730Sjeff					break;
395176730Sjeff			if (td != NULL)
396176730Sjeff				break;
397176730Sjeff			PROC_UNLOCK(p);
398176730Sjeff		}
399176730Sjeff		sx_sunlock(&allproc_lock);
400176730Sjeff		if (td == NULL)
401176730Sjeff			return (ESRCH);
402176730Sjeff		break;
403176730Sjeff	case CPU_WHICH_CPUSET:
404176730Sjeff		if (id == -1) {
405176730Sjeff			thread_lock(curthread);
406177738Sjeff			set = cpuset_refbase(curthread->td_cpuset);
407176730Sjeff			thread_unlock(curthread);
408176730Sjeff		} else
409176730Sjeff			set = cpuset_lookup(id);
410176730Sjeff		if (set) {
411176730Sjeff			*setp = set;
412176730Sjeff			return (0);
413176730Sjeff		}
414176730Sjeff		return (ESRCH);
415178092Sjeff	case CPU_WHICH_IRQ:
416178092Sjeff		return (0);
417176730Sjeff	default:
418176730Sjeff		return (EINVAL);
419176730Sjeff	}
420176730Sjeff	error = p_cansched(curthread, p);
421176730Sjeff	if (error) {
422176730Sjeff		PROC_UNLOCK(p);
423176730Sjeff		return (error);
424176730Sjeff	}
425176730Sjeff	if (td == NULL)
426176730Sjeff		td = FIRST_THREAD_IN_PROC(p);
427176730Sjeff	*pp = p;
428176730Sjeff	*tdp = td;
429176730Sjeff	return (0);
430176730Sjeff}
431176730Sjeff
432176730Sjeff/*
433176730Sjeff * Create an anonymous set with the provided mask in the space provided by
434176730Sjeff * 'fset'.  If the passed in set is anonymous we use its parent otherwise
435176730Sjeff * the new set is a child of 'set'.
436176730Sjeff */
437176730Sjeffstatic int
438176730Sjeffcpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask)
439176730Sjeff{
440176730Sjeff	struct cpuset *parent;
441176730Sjeff
442176730Sjeff	if (set->cs_id == CPUSET_INVALID)
443176730Sjeff		parent = set->cs_parent;
444176730Sjeff	else
445176730Sjeff		parent = set;
446176811Sjeff	if (!CPU_SUBSET(&parent->cs_mask, mask))
447177738Sjeff		return (EDEADLK);
448176730Sjeff	return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
449176730Sjeff}
450176730Sjeff
451176730Sjeff/*
452176730Sjeff * Handle two cases for replacing the base set or mask of an entire process.
453176730Sjeff *
454176730Sjeff * 1) Set is non-null and mask is null.  This reparents all anonymous sets
455176730Sjeff *    to the provided set and replaces all non-anonymous td_cpusets with the
456176730Sjeff *    provided set.
457176730Sjeff * 2) Mask is non-null and set is null.  This replaces or creates anonymous
458176730Sjeff *    sets for every thread with the existing base as a parent.
459176730Sjeff *
460176730Sjeff * This is overly complicated because we can't allocate while holding a
461176730Sjeff * spinlock and spinlocks must be held while changing and examining thread
462176730Sjeff * state.
463176730Sjeff */
464176730Sjeffstatic int
465176730Sjeffcpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
466176730Sjeff{
467176730Sjeff	struct setlist freelist;
468176730Sjeff	struct setlist droplist;
469176811Sjeff	struct cpuset *tdset;
470176730Sjeff	struct cpuset *nset;
471176730Sjeff	struct thread *td;
472176730Sjeff	struct proc *p;
473176730Sjeff	int threads;
474176730Sjeff	int nfree;
475176730Sjeff	int error;
476176730Sjeff	/*
477176730Sjeff	 * The algorithm requires two passes due to locking considerations.
478176730Sjeff	 *
479176730Sjeff	 * 1) Lookup the process and acquire the locks in the required order.
480176730Sjeff	 * 2) If enough cpusets have not been allocated release the locks and
481176730Sjeff	 *    allocate them.  Loop.
482176730Sjeff	 */
483176730Sjeff	LIST_INIT(&freelist);
484176730Sjeff	LIST_INIT(&droplist);
485176730Sjeff	nfree = 0;
486176730Sjeff	for (;;) {
487176730Sjeff		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
488176730Sjeff		if (error)
489176730Sjeff			goto out;
490176730Sjeff		if (nfree >= p->p_numthreads)
491176730Sjeff			break;
492176730Sjeff		threads = p->p_numthreads;
493176730Sjeff		PROC_UNLOCK(p);
494176730Sjeff		for (; nfree < threads; nfree++) {
495176730Sjeff			nset = uma_zalloc(cpuset_zone, M_WAITOK);
496176730Sjeff			LIST_INSERT_HEAD(&freelist, nset, cs_link);
497176730Sjeff		}
498176730Sjeff	}
499176730Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
500176730Sjeff	/*
501176730Sjeff	 * Now that the appropriate locks are held and we have enough cpusets,
502176811Sjeff	 * make sure the operation will succeed before applying changes.  The
503176811Sjeff	 * proc lock prevents td_cpuset from changing between calls.
504176811Sjeff	 */
505176811Sjeff	error = 0;
506176811Sjeff	FOREACH_THREAD_IN_PROC(p, td) {
507176811Sjeff		thread_lock(td);
508176811Sjeff		tdset = td->td_cpuset;
509176811Sjeff		/*
510176811Sjeff		 * Verify that a new mask doesn't specify cpus outside of
511176811Sjeff		 * the set the thread is a member of.
512176811Sjeff		 */
513176811Sjeff		if (mask) {
514176811Sjeff			if (tdset->cs_id == CPUSET_INVALID)
515176811Sjeff				tdset = tdset->cs_parent;
516176811Sjeff			if (!CPU_SUBSET(&tdset->cs_mask, mask))
517177738Sjeff				error = EDEADLK;
518176811Sjeff		/*
519176811Sjeff		 * Verify that a new set won't leave an existing thread
520176811Sjeff		 * mask without a cpu to run on.  It can, however, restrict
521176811Sjeff		 * the set.
522176811Sjeff		 */
523176811Sjeff		} else if (tdset->cs_id == CPUSET_INVALID) {
524176811Sjeff			if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
525177738Sjeff				error = EDEADLK;
526176811Sjeff		}
527176811Sjeff		thread_unlock(td);
528176811Sjeff		if (error)
529176811Sjeff			goto unlock_out;
530176811Sjeff	}
531176811Sjeff	/*
532176811Sjeff	 * Replace each thread's cpuset while using deferred release.  We
533177368Sjeff	 * must do this because the thread lock must be held while operating
534177368Sjeff	 * on the thread and this limits the type of operations allowed.
535176730Sjeff	 */
536176730Sjeff	FOREACH_THREAD_IN_PROC(p, td) {
537176730Sjeff		thread_lock(td);
538176730Sjeff		/*
539176730Sjeff		 * If we presently have an anonymous set or are applying a
540176730Sjeff		 * mask we must create an anonymous shadow set.  That is
541176730Sjeff		 * either parented to our existing base or the supplied set.
542176730Sjeff		 *
543176730Sjeff		 * If we have a base set with no anonymous shadow we simply
544176730Sjeff		 * replace it outright.
545176730Sjeff		 */
546176730Sjeff		tdset = td->td_cpuset;
547176730Sjeff		if (tdset->cs_id == CPUSET_INVALID || mask) {
548176730Sjeff			nset = LIST_FIRST(&freelist);
549176730Sjeff			LIST_REMOVE(nset, cs_link);
550176730Sjeff			if (mask)
551176730Sjeff				error = cpuset_shadow(tdset, nset, mask);
552176730Sjeff			else
553176730Sjeff				error = _cpuset_create(nset, set,
554176730Sjeff				    &tdset->cs_mask, CPUSET_INVALID);
555176730Sjeff			if (error) {
556176730Sjeff				LIST_INSERT_HEAD(&freelist, nset, cs_link);
557176730Sjeff				thread_unlock(td);
558176730Sjeff				break;
559176730Sjeff			}
560176730Sjeff		} else
561176730Sjeff			nset = cpuset_ref(set);
562176730Sjeff		cpuset_rel_defer(&droplist, tdset);
563176730Sjeff		td->td_cpuset = nset;
564176730Sjeff		sched_affinity(td);
565176730Sjeff		thread_unlock(td);
566176730Sjeff	}
567176811Sjeffunlock_out:
568176730Sjeff	PROC_UNLOCK(p);
569176730Sjeffout:
570176730Sjeff	while ((nset = LIST_FIRST(&droplist)) != NULL)
571176730Sjeff		cpuset_rel_complete(nset);
572176730Sjeff	while ((nset = LIST_FIRST(&freelist)) != NULL) {
573176730Sjeff		LIST_REMOVE(nset, cs_link);
574176730Sjeff		uma_zfree(cpuset_zone, nset);
575176730Sjeff	}
576176730Sjeff	return (error);
577176730Sjeff}
578176730Sjeff
579176730Sjeff/*
580176730Sjeff * Apply an anonymous mask to a single thread.
581176730Sjeff */
582177738Sjeffint
583176730Sjeffcpuset_setthread(lwpid_t id, cpuset_t *mask)
584176730Sjeff{
585176730Sjeff	struct cpuset *nset;
586176730Sjeff	struct cpuset *set;
587176730Sjeff	struct thread *td;
588176730Sjeff	struct proc *p;
589176730Sjeff	int error;
590176730Sjeff
591176730Sjeff	nset = uma_zalloc(cpuset_zone, M_WAITOK);
592176821Sjeff	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
593176730Sjeff	if (error)
594176730Sjeff		goto out;
595177738Sjeff	set = NULL;
596176730Sjeff	thread_lock(td);
597177738Sjeff	error = cpuset_shadow(td->td_cpuset, nset, mask);
598176730Sjeff	if (error == 0) {
599177738Sjeff		set = td->td_cpuset;
600176730Sjeff		td->td_cpuset = nset;
601176730Sjeff		sched_affinity(td);
602176730Sjeff		nset = NULL;
603176730Sjeff	}
604176730Sjeff	thread_unlock(td);
605176730Sjeff	PROC_UNLOCK(p);
606177738Sjeff	if (set)
607177738Sjeff		cpuset_rel(set);
608176730Sjeffout:
609176730Sjeff	if (nset)
610176730Sjeff		uma_zfree(cpuset_zone, nset);
611176730Sjeff	return (error);
612176730Sjeff}
613176730Sjeff
614176730Sjeff/*
615176730Sjeff * Creates the cpuset for thread0.  We make two sets:
616176730Sjeff *
617176730Sjeff * 0 - The root set which should represent all valid processors in the
618176730Sjeff *     system.  It is initially created with a mask of all processors
619176730Sjeff *     because we don't know what processors are valid until cpuset_init()
620176730Sjeff *     runs.  This set is immutable.
621176730Sjeff * 1 - The default set which all processes are a member of until changed.
622176730Sjeff *     This allows an administrator to move all threads off of given cpus to
623176730Sjeff *     dedicate them to high priority tasks or save power etc.
624176730Sjeff */
625176730Sjeffstruct cpuset *
626176730Sjeffcpuset_thread0(void)
627176730Sjeff{
628176730Sjeff	struct cpuset *set;
629176730Sjeff	int error;
630176730Sjeff
631176730Sjeff	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
632176730Sjeff	    NULL, NULL, UMA_ALIGN_PTR, 0);
633176730Sjeff	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
634176730Sjeff	/*
635176730Sjeff	 * Create the root system set for the whole machine.  Doesn't use
636176730Sjeff	 * cpuset_create() due to NULL parent.
637176730Sjeff	 */
638176730Sjeff	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
639176730Sjeff	set->cs_mask.__bits[0] = -1;
640176730Sjeff	LIST_INIT(&set->cs_children);
641176730Sjeff	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
642176730Sjeff	set->cs_ref = 1;
643176730Sjeff	set->cs_flags = CPU_SET_ROOT;
644176730Sjeff	cpuset_zero = set;
645177738Sjeff	cpuset_root = &set->cs_mask;
646176730Sjeff	/*
647176730Sjeff	 * Now derive a default, modifiable set from that to give out.
648176730Sjeff	 */
649176730Sjeff	set = uma_zalloc(cpuset_zone, M_WAITOK);
650176730Sjeff	error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
651176730Sjeff	KASSERT(error == 0, ("Error creating default set: %d\n", error));
652176730Sjeff	/*
653176730Sjeff	 * Initialize the unit allocator. 0 and 1 are allocated above.
654176730Sjeff	 */
655176730Sjeff	cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
656176730Sjeff
657176730Sjeff	return (set);
658176730Sjeff}
659176730Sjeff
660176730Sjeff/*
661176730Sjeff * This is called once the final set of system cpus is known.  Modifies
662176730Sjeff * the root set and all children and mark the root readonly.
663176730Sjeff */
664176730Sjeffstatic void
665176730Sjeffcpuset_init(void *arg)
666176730Sjeff{
667176730Sjeff	cpuset_t mask;
668176730Sjeff
669176730Sjeff	CPU_ZERO(&mask);
670176730Sjeff#ifdef SMP
671176730Sjeff	mask.__bits[0] = all_cpus;
672176730Sjeff#else
673176730Sjeff	mask.__bits[0] = 1;
674176730Sjeff#endif
675176730Sjeff	if (cpuset_modify(cpuset_zero, &mask))
676176730Sjeff		panic("Can't set initial cpuset mask.\n");
677176730Sjeff	cpuset_zero->cs_flags |= CPU_SET_RDONLY;
678176730Sjeff}
679176730SjeffSYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
680176730Sjeff
681176730Sjeff#ifndef _SYS_SYSPROTO_H_
682176730Sjeffstruct cpuset_args {
683176730Sjeff	cpusetid_t	*setid;
684176730Sjeff};
685176730Sjeff#endif
686176730Sjeffint
687176730Sjeffcpuset(struct thread *td, struct cpuset_args *uap)
688176730Sjeff{
689176730Sjeff	struct cpuset *root;
690176730Sjeff	struct cpuset *set;
691176730Sjeff	int error;
692176730Sjeff
693176730Sjeff	thread_lock(td);
694177738Sjeff	root = cpuset_refroot(td->td_cpuset);
695176730Sjeff	thread_unlock(td);
696176730Sjeff	error = cpuset_create(&set, root, &root->cs_mask);
697176730Sjeff	cpuset_rel(root);
698176730Sjeff	if (error)
699176730Sjeff		return (error);
700177738Sjeff	error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
701176730Sjeff	if (error == 0)
702177738Sjeff		error = cpuset_setproc(-1, set, NULL);
703176730Sjeff	cpuset_rel(set);
704176730Sjeff	return (error);
705176730Sjeff}
706176730Sjeff
707176730Sjeff#ifndef _SYS_SYSPROTO_H_
708176730Sjeffstruct cpuset_setid_args {
709176730Sjeff	cpuwhich_t	which;
710176730Sjeff	id_t		id;
711176730Sjeff	cpusetid_t	setid;
712176730Sjeff};
713176730Sjeff#endif
714176730Sjeffint
715176730Sjeffcpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
716176730Sjeff{
717176730Sjeff	struct cpuset *set;
718176730Sjeff	int error;
719176730Sjeff
720176730Sjeff	/*
721176730Sjeff	 * Presently we only support per-process sets.
722176730Sjeff	 */
723176730Sjeff	if (uap->which != CPU_WHICH_PID)
724176730Sjeff		return (EINVAL);
725176730Sjeff	set = cpuset_lookup(uap->setid);
726176730Sjeff	if (set == NULL)
727176730Sjeff		return (ESRCH);
728176730Sjeff	error = cpuset_setproc(uap->id, set, NULL);
729176730Sjeff	cpuset_rel(set);
730176730Sjeff	return (error);
731176730Sjeff}
732176730Sjeff
733176730Sjeff#ifndef _SYS_SYSPROTO_H_
734176730Sjeffstruct cpuset_getid_args {
735176730Sjeff	cpulevel_t	level;
736176730Sjeff	cpuwhich_t	which;
737176730Sjeff	id_t		id;
738176730Sjeff	cpusetid_t	*setid;
739176730Sjeff#endif
740176730Sjeffint
741176730Sjeffcpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
742176730Sjeff{
743176730Sjeff	struct cpuset *nset;
744176730Sjeff	struct cpuset *set;
745176730Sjeff	struct thread *ttd;
746176730Sjeff	struct proc *p;
747176730Sjeff	cpusetid_t id;
748176730Sjeff	int error;
749176730Sjeff
750176730Sjeff	if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET)
751176730Sjeff		return (EINVAL);
752176730Sjeff	error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
753176730Sjeff	if (error)
754176730Sjeff		return (error);
755176730Sjeff	switch (uap->which) {
756176730Sjeff	case CPU_WHICH_TID:
757176730Sjeff	case CPU_WHICH_PID:
758176730Sjeff		thread_lock(ttd);
759177738Sjeff		set = cpuset_refbase(ttd->td_cpuset);
760176730Sjeff		thread_unlock(ttd);
761176730Sjeff		PROC_UNLOCK(p);
762176730Sjeff		break;
763176730Sjeff	case CPU_WHICH_CPUSET:
764176730Sjeff		break;
765178092Sjeff	case CPU_WHICH_IRQ:
766178092Sjeff		return (EINVAL);
767176730Sjeff	}
768176730Sjeff	switch (uap->level) {
769176730Sjeff	case CPU_LEVEL_ROOT:
770177738Sjeff		nset = cpuset_refroot(set);
771176730Sjeff		cpuset_rel(set);
772176730Sjeff		set = nset;
773176730Sjeff		break;
774176730Sjeff	case CPU_LEVEL_CPUSET:
775176730Sjeff		break;
776176730Sjeff	case CPU_LEVEL_WHICH:
777176730Sjeff		break;
778176730Sjeff	}
779176730Sjeff	id = set->cs_id;
780176730Sjeff	cpuset_rel(set);
781176730Sjeff	if (error == 0)
782176730Sjeff		error = copyout(&id, uap->setid, sizeof(id));
783176730Sjeff
784176730Sjeff	return (error);
785176730Sjeff}
786176730Sjeff
787176730Sjeff#ifndef _SYS_SYSPROTO_H_
788176730Sjeffstruct cpuset_getaffinity_args {
789177597Sru	cpulevel_t	level;
790177597Sru	cpuwhich_t	which;
791177597Sru	id_t		id;
792177597Sru	size_t		cpusetsize;
793177597Sru	cpuset_t	*mask;
794176730Sjeff};
795176730Sjeff#endif
796176730Sjeffint
797176730Sjeffcpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
798176730Sjeff{
799176730Sjeff	struct thread *ttd;
800176730Sjeff	struct cpuset *nset;
801176730Sjeff	struct cpuset *set;
802176730Sjeff	struct proc *p;
803176730Sjeff	cpuset_t *mask;
804176730Sjeff	int error;
805177597Sru	size_t size;
806176730Sjeff
807176811Sjeff	if (uap->cpusetsize < sizeof(cpuset_t) ||
808179313Skib	    uap->cpusetsize > CPU_MAXSIZE / NBBY)
809176730Sjeff		return (ERANGE);
810176811Sjeff	size = uap->cpusetsize;
811176730Sjeff	mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
812176730Sjeff	error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
813176730Sjeff	if (error)
814176730Sjeff		goto out;
815176730Sjeff	switch (uap->level) {
816176730Sjeff	case CPU_LEVEL_ROOT:
817176730Sjeff	case CPU_LEVEL_CPUSET:
818176730Sjeff		switch (uap->which) {
819176730Sjeff		case CPU_WHICH_TID:
820176730Sjeff		case CPU_WHICH_PID:
821176730Sjeff			thread_lock(ttd);
822176730Sjeff			set = cpuset_ref(ttd->td_cpuset);
823176730Sjeff			thread_unlock(ttd);
824176730Sjeff			break;
825176730Sjeff		case CPU_WHICH_CPUSET:
826176730Sjeff			break;
827178092Sjeff		case CPU_WHICH_IRQ:
828178092Sjeff			error = EINVAL;
829178092Sjeff			goto out;
830176730Sjeff		}
831176730Sjeff		if (uap->level == CPU_LEVEL_ROOT)
832177738Sjeff			nset = cpuset_refroot(set);
833176730Sjeff		else
834177738Sjeff			nset = cpuset_refbase(set);
835176730Sjeff		CPU_COPY(&nset->cs_mask, mask);
836176730Sjeff		cpuset_rel(nset);
837176730Sjeff		break;
838176730Sjeff	case CPU_LEVEL_WHICH:
839176730Sjeff		switch (uap->which) {
840176730Sjeff		case CPU_WHICH_TID:
841176730Sjeff			thread_lock(ttd);
842176730Sjeff			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
843176730Sjeff			thread_unlock(ttd);
844176730Sjeff			break;
845176730Sjeff		case CPU_WHICH_PID:
846176730Sjeff			FOREACH_THREAD_IN_PROC(p, ttd) {
847176730Sjeff				thread_lock(ttd);
848176730Sjeff				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
849176730Sjeff				thread_unlock(ttd);
850176730Sjeff			}
851176730Sjeff			break;
852176730Sjeff		case CPU_WHICH_CPUSET:
853176730Sjeff			CPU_COPY(&set->cs_mask, mask);
854176730Sjeff			break;
855178092Sjeff		case CPU_WHICH_IRQ:
856178092Sjeff			error = intr_getaffinity(uap->id, mask);
857178092Sjeff			break;
858176730Sjeff		}
859176730Sjeff		break;
860176730Sjeff	default:
861176730Sjeff		error = EINVAL;
862176730Sjeff		break;
863176730Sjeff	}
864176730Sjeff	if (set)
865176730Sjeff		cpuset_rel(set);
866176730Sjeff	if (p)
867176730Sjeff		PROC_UNLOCK(p);
868176730Sjeff	if (error == 0)
869176730Sjeff		error = copyout(mask, uap->mask, size);
870176730Sjeffout:
871176730Sjeff	free(mask, M_TEMP);
872176730Sjeff	return (error);
873176730Sjeff}
874176730Sjeff
875176730Sjeff#ifndef _SYS_SYSPROTO_H_
876176730Sjeffstruct cpuset_setaffinity_args {
877176730Sjeff	cpulevel_t	level;
878177597Sru	cpuwhich_t	which;
879177597Sru	id_t		id;
880177597Sru	size_t		cpusetsize;
881177597Sru	const cpuset_t	*mask;
882176730Sjeff};
883176730Sjeff#endif
884176730Sjeffint
885176730Sjeffcpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
886176730Sjeff{
887176730Sjeff	struct cpuset *nset;
888176730Sjeff	struct cpuset *set;
889176730Sjeff	struct thread *ttd;
890176730Sjeff	struct proc *p;
891176730Sjeff	cpuset_t *mask;
892176730Sjeff	int error;
893176730Sjeff
894176811Sjeff	if (uap->cpusetsize < sizeof(cpuset_t) ||
895179313Skib	    uap->cpusetsize > CPU_MAXSIZE / NBBY)
896176730Sjeff		return (ERANGE);
897176811Sjeff	mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
898176811Sjeff	error = copyin(uap->mask, mask, uap->cpusetsize);
899176730Sjeff	if (error)
900176730Sjeff		goto out;
901176811Sjeff	/*
902176811Sjeff	 * Verify that no high bits are set.
903176811Sjeff	 */
904176811Sjeff	if (uap->cpusetsize > sizeof(cpuset_t)) {
905176811Sjeff		char *end;
906176811Sjeff		char *cp;
907176811Sjeff
908176811Sjeff		end = cp = (char *)&mask->__bits;
909176811Sjeff		end += uap->cpusetsize;
910176811Sjeff		cp += sizeof(cpuset_t);
911176811Sjeff		while (cp != end)
912176811Sjeff			if (*cp++ != 0) {
913176811Sjeff				error = EINVAL;
914176811Sjeff				goto out;
915176811Sjeff			}
916176811Sjeff
917176811Sjeff	}
918176730Sjeff	switch (uap->level) {
919176730Sjeff	case CPU_LEVEL_ROOT:
920176730Sjeff	case CPU_LEVEL_CPUSET:
921176730Sjeff		error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
922176730Sjeff		if (error)
923176730Sjeff			break;
924176730Sjeff		switch (uap->which) {
925176730Sjeff		case CPU_WHICH_TID:
926176730Sjeff		case CPU_WHICH_PID:
927176730Sjeff			thread_lock(ttd);
928176730Sjeff			set = cpuset_ref(ttd->td_cpuset);
929176730Sjeff			thread_unlock(ttd);
930176880Sjeff			PROC_UNLOCK(p);
931176730Sjeff			break;
932176730Sjeff		case CPU_WHICH_CPUSET:
933176730Sjeff			break;
934178092Sjeff		case CPU_WHICH_IRQ:
935178092Sjeff			error = EINVAL;
936178092Sjeff			goto out;
937176730Sjeff		}
938176730Sjeff		if (uap->level == CPU_LEVEL_ROOT)
939177738Sjeff			nset = cpuset_refroot(set);
940176730Sjeff		else
941177738Sjeff			nset = cpuset_refbase(set);
942176730Sjeff		error = cpuset_modify(nset, mask);
943176730Sjeff		cpuset_rel(nset);
944176730Sjeff		cpuset_rel(set);
945176730Sjeff		break;
946176730Sjeff	case CPU_LEVEL_WHICH:
947176730Sjeff		switch (uap->which) {
948176730Sjeff		case CPU_WHICH_TID:
949176730Sjeff			error = cpuset_setthread(uap->id, mask);
950176730Sjeff			break;
951176730Sjeff		case CPU_WHICH_PID:
952176730Sjeff			error = cpuset_setproc(uap->id, NULL, mask);
953176730Sjeff			break;
954176730Sjeff		case CPU_WHICH_CPUSET:
955176730Sjeff			error = cpuset_which(CPU_WHICH_CPUSET, uap->id, &p,
956176730Sjeff			    &ttd, &set);
957176730Sjeff			if (error == 0) {
958176730Sjeff				error = cpuset_modify(set, mask);
959176730Sjeff				cpuset_rel(set);
960176730Sjeff			}
961176730Sjeff			break;
962178092Sjeff		case CPU_WHICH_IRQ:
963178092Sjeff			error = intr_setaffinity(uap->id, mask);
964178092Sjeff			break;
965176730Sjeff		default:
966176730Sjeff			error = EINVAL;
967176730Sjeff			break;
968176730Sjeff		}
969176730Sjeff		break;
970176730Sjeff	default:
971176730Sjeff		error = EINVAL;
972176730Sjeff		break;
973176730Sjeff	}
974176730Sjeffout:
975176730Sjeff	free(mask, M_TEMP);
976176730Sjeff	return (error);
977176730Sjeff}
978