1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
5 * All rights reserved.
6 *
7 * Copyright (c) 2008 Nokia Corporation
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice unmodified, this list of conditions, and the following
15 *    disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/cdefs.h>
34#include "opt_ddb.h"
35#include "opt_ktrace.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/sysctl.h>
40#include <sys/ctype.h>
41#include <sys/sysproto.h>
42#include <sys/jail.h>
43#include <sys/kernel.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/mutex.h>
47#include <sys/priv.h>
48#include <sys/proc.h>
49#include <sys/refcount.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52#include <sys/syscallsubr.h>
53#include <sys/sysent.h>
54#include <sys/capsicum.h>
55#include <sys/cpuset.h>
56#include <sys/domainset.h>
57#include <sys/sx.h>
58#include <sys/queue.h>
59#include <sys/libkern.h>
60#include <sys/limits.h>
61#include <sys/bus.h>
62#include <sys/interrupt.h>
63#include <sys/vmmeter.h>
64#include <sys/ktrace.h>
65
66#include <vm/uma.h>
67#include <vm/vm.h>
68#include <vm/vm_object.h>
69#include <vm/vm_page.h>
70#include <vm/vm_pageout.h>
71#include <vm/vm_extern.h>
72#include <vm/vm_param.h>
73#include <vm/vm_phys.h>
74#include <vm/vm_pagequeue.h>
75
76#ifdef DDB
77#include <ddb/ddb.h>
78#endif /* DDB */
79
80/*
81 * cpusets provide a mechanism for creating and manipulating sets of
82 * processors for the purpose of constraining the scheduling of threads to
83 * specific processors.
84 *
85 * Each process belongs to an identified set, by default this is set 1.  Each
86 * thread may further restrict the cpus it may run on to a subset of this
87 * named set.  This creates an anonymous set which other threads and processes
88 * may not join by number.
89 *
90 * The named set is referred to herein as the 'base' set to avoid ambiguity.
91 * This set is usually a child of a 'root' set while the anonymous set may
92 * simply be referred to as a mask.  In the syscall api these are referred to
93 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
94 *
95 * Threads inherit their set from their creator whether it be anonymous or
96 * not.  This means that anonymous sets are immutable because they may be
97 * shared.  To modify an anonymous set a new set is created with the desired
98 * mask and the same parent as the existing anonymous set.  This gives the
99 * illusion of each thread having a private mask.
100 *
101 * Via the syscall apis a user may ask to retrieve or modify the root, base,
102 * or mask that is discovered via a pid, tid, or setid.  Modifying a set
103 * modifies all numbered and anonymous child sets to comply with the new mask.
104 * Modifying a pid or tid's mask applies only to that tid but must still
105 * exist within the assigned parent set.
106 *
107 * A thread may not be assigned to a group separate from other threads in
108 * the process.  This is to remove ambiguity when the setid is queried with
109 * a pid argument.  There is no other technical limitation.
110 *
111 * This somewhat complex arrangement is intended to make it easy for
112 * applications to query available processors and bind their threads to
113 * specific processors while also allowing administrators to dynamically
114 * reprovision by changing sets which apply to groups of processes.
115 *
116 * A simple application should not concern itself with sets at all and
117 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
118 * meaning 'curthread'.  It may query available cpus for that tid with a
119 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
120 */
121
122LIST_HEAD(domainlist, domainset);
123struct domainset __read_mostly domainset_firsttouch;
124struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
125struct domainset __read_mostly domainset_interleave;
126struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
127struct domainset __read_mostly domainset_roundrobin;
128
129static uma_zone_t cpuset_zone;
130static uma_zone_t domainset_zone;
131static struct mtx cpuset_lock;
132static struct setlist cpuset_ids;
133static struct domainlist cpuset_domains;
134static struct unrhdr *cpuset_unr;
135static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel;
136static struct domainset *domainset0, *domainset2;
137u_int cpusetsizemin = 1;
138
139/* Return the size of cpuset_t at the kernel level */
140SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
141    SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
142
143/* Return the minimum size of cpuset_t allowed by the kernel */
144SYSCTL_UINT(_kern_sched, OID_AUTO, cpusetsizemin,
145    CTLFLAG_RD | CTLFLAG_CAPRD, &cpusetsizemin, 0,
146    "The minimum size of cpuset_t allowed by the kernel");
147
148cpuset_t *cpuset_root;
149cpuset_t cpuset_domain[MAXMEMDOM];
150
151static int cpuset_which2(cpuwhich_t *, id_t, struct proc **, struct thread **,
152    struct cpuset **);
153static int domainset_valid(const struct domainset *, const struct domainset *);
154
155/*
156 * Find the first non-anonymous set starting from 'set'.
157 */
158static struct cpuset *
159cpuset_getbase(struct cpuset *set)
160{
161
162	if (set->cs_id == CPUSET_INVALID)
163		set = set->cs_parent;
164	return (set);
165}
166
167/*
168 * Walks up the tree from 'set' to find the root.
169 */
170static struct cpuset *
171cpuset_getroot(struct cpuset *set)
172{
173
174	while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL)
175		set = set->cs_parent;
176	return (set);
177}
178
179/*
180 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
181 */
182struct cpuset *
183cpuset_ref(struct cpuset *set)
184{
185
186	refcount_acquire(&set->cs_ref);
187	return (set);
188}
189
190/*
191 * Walks up the tree from 'set' to find the root.  Returns the root
192 * referenced.
193 */
194static struct cpuset *
195cpuset_refroot(struct cpuset *set)
196{
197
198	return (cpuset_ref(cpuset_getroot(set)));
199}
200
201/*
202 * Find the first non-anonymous set starting from 'set'.  Returns this set
203 * referenced.  May return the passed in set with an extra ref if it is
204 * not anonymous.
205 */
206static struct cpuset *
207cpuset_refbase(struct cpuset *set)
208{
209
210	return (cpuset_ref(cpuset_getbase(set)));
211}
212
213/*
214 * Release a reference in a context where it is safe to allocate.
215 */
216void
217cpuset_rel(struct cpuset *set)
218{
219	cpusetid_t id;
220
221	if (refcount_release_if_not_last(&set->cs_ref))
222		return;
223	mtx_lock_spin(&cpuset_lock);
224	if (!refcount_release(&set->cs_ref)) {
225		mtx_unlock_spin(&cpuset_lock);
226		return;
227	}
228	LIST_REMOVE(set, cs_siblings);
229	id = set->cs_id;
230	if (id != CPUSET_INVALID)
231		LIST_REMOVE(set, cs_link);
232	mtx_unlock_spin(&cpuset_lock);
233	cpuset_rel(set->cs_parent);
234	uma_zfree(cpuset_zone, set);
235	if (id != CPUSET_INVALID)
236		free_unr(cpuset_unr, id);
237}
238
239/*
240 * Deferred release must be used when in a context that is not safe to
241 * allocate/free.  This places any unreferenced sets on the list 'head'.
242 */
243static void
244cpuset_rel_defer(struct setlist *head, struct cpuset *set)
245{
246
247	if (refcount_release_if_not_last(&set->cs_ref))
248		return;
249	mtx_lock_spin(&cpuset_lock);
250	if (!refcount_release(&set->cs_ref)) {
251		mtx_unlock_spin(&cpuset_lock);
252		return;
253	}
254	LIST_REMOVE(set, cs_siblings);
255	if (set->cs_id != CPUSET_INVALID)
256		LIST_REMOVE(set, cs_link);
257	LIST_INSERT_HEAD(head, set, cs_link);
258	mtx_unlock_spin(&cpuset_lock);
259}
260
261/*
262 * Complete a deferred release.  Removes the set from the list provided to
263 * cpuset_rel_defer.
264 */
265static void
266cpuset_rel_complete(struct cpuset *set)
267{
268	cpusetid_t id;
269
270	id = set->cs_id;
271	LIST_REMOVE(set, cs_link);
272	cpuset_rel(set->cs_parent);
273	uma_zfree(cpuset_zone, set);
274	if (id != CPUSET_INVALID)
275		free_unr(cpuset_unr, id);
276}
277
278/*
279 * Find a set based on an id.  Returns it with a ref.
280 */
281static struct cpuset *
282cpuset_lookup(cpusetid_t setid, struct thread *td)
283{
284	struct cpuset *set;
285
286	if (setid == CPUSET_INVALID)
287		return (NULL);
288	mtx_lock_spin(&cpuset_lock);
289	LIST_FOREACH(set, &cpuset_ids, cs_link)
290		if (set->cs_id == setid)
291			break;
292	if (set)
293		cpuset_ref(set);
294	mtx_unlock_spin(&cpuset_lock);
295
296	KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
297	if (set != NULL && jailed(td->td_ucred)) {
298		struct cpuset *jset, *tset;
299
300		jset = td->td_ucred->cr_prison->pr_cpuset;
301		for (tset = set; tset != NULL; tset = tset->cs_parent)
302			if (tset == jset)
303				break;
304		if (tset == NULL) {
305			cpuset_rel(set);
306			set = NULL;
307		}
308	}
309
310	return (set);
311}
312
313/*
314 * Initialize a set in the space provided in 'set' with the provided parameters.
315 * The set is returned with a single ref.  May return EDEADLK if the set
316 * will have no valid cpu based on restrictions from the parent.
317 */
318static int
319cpuset_init(struct cpuset *set, struct cpuset *parent,
320    const cpuset_t *mask, struct domainset *domain, cpusetid_t id)
321{
322
323	if (domain == NULL)
324		domain = parent->cs_domain;
325	if (mask == NULL)
326		mask = &parent->cs_mask;
327	if (!CPU_OVERLAP(&parent->cs_mask, mask))
328		return (EDEADLK);
329	/* The domain must be prepared ahead of time. */
330	if (!domainset_valid(parent->cs_domain, domain))
331		return (EDEADLK);
332	CPU_COPY(mask, &set->cs_mask);
333	LIST_INIT(&set->cs_children);
334	refcount_init(&set->cs_ref, 1);
335	set->cs_flags = 0;
336	mtx_lock_spin(&cpuset_lock);
337	set->cs_domain = domain;
338	CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask);
339	set->cs_id = id;
340	set->cs_parent = cpuset_ref(parent);
341	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
342	if (set->cs_id != CPUSET_INVALID)
343		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
344	mtx_unlock_spin(&cpuset_lock);
345
346	return (0);
347}
348
349/*
350 * Create a new non-anonymous set with the requested parent and mask.  May
351 * return failures if the mask is invalid or a new number can not be
352 * allocated.
353 *
354 * If *setp is not NULL, then it will be used as-is.  The caller must take
355 * into account that *setp will be inserted at the head of cpuset_ids and
356 * plan any potentially conflicting cs_link usage accordingly.
357 */
358static int
359cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
360{
361	struct cpuset *set;
362	cpusetid_t id;
363	int error;
364	bool dofree;
365
366	id = alloc_unr(cpuset_unr);
367	if (id == -1)
368		return (ENFILE);
369	dofree = (*setp == NULL);
370	if (*setp != NULL)
371		set = *setp;
372	else
373		*setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
374	error = cpuset_init(set, parent, mask, NULL, id);
375	if (error == 0)
376		return (0);
377	free_unr(cpuset_unr, id);
378	if (dofree)
379		uma_zfree(cpuset_zone, set);
380
381	return (error);
382}
383
384static void
385cpuset_freelist_add(struct setlist *list, int count)
386{
387	struct cpuset *set;
388	int i;
389
390	for (i = 0; i < count; i++) {
391		set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK);
392		LIST_INSERT_HEAD(list, set, cs_link);
393	}
394}
395
396static void
397cpuset_freelist_init(struct setlist *list, int count)
398{
399
400	LIST_INIT(list);
401	cpuset_freelist_add(list, count);
402}
403
404static void
405cpuset_freelist_free(struct setlist *list)
406{
407	struct cpuset *set;
408
409	while ((set = LIST_FIRST(list)) != NULL) {
410		LIST_REMOVE(set, cs_link);
411		uma_zfree(cpuset_zone, set);
412	}
413}
414
415static void
416domainset_freelist_add(struct domainlist *list, int count)
417{
418	struct domainset *set;
419	int i;
420
421	for (i = 0; i < count; i++) {
422		set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK);
423		LIST_INSERT_HEAD(list, set, ds_link);
424	}
425}
426
427static void
428domainset_freelist_init(struct domainlist *list, int count)
429{
430
431	LIST_INIT(list);
432	domainset_freelist_add(list, count);
433}
434
435static void
436domainset_freelist_free(struct domainlist *list)
437{
438	struct domainset *set;
439
440	while ((set = LIST_FIRST(list)) != NULL) {
441		LIST_REMOVE(set, ds_link);
442		uma_zfree(domainset_zone, set);
443	}
444}
445
446/* Copy a domainset preserving mask and policy. */
447static void
448domainset_copy(const struct domainset *from, struct domainset *to)
449{
450
451	DOMAINSET_COPY(&from->ds_mask, &to->ds_mask);
452	to->ds_policy = from->ds_policy;
453	to->ds_prefer = from->ds_prefer;
454}
455
456/* Return 1 if mask and policy are equal, otherwise 0. */
457static int
458domainset_equal(const struct domainset *one, const struct domainset *two)
459{
460
461	return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 &&
462	    one->ds_policy == two->ds_policy &&
463	    one->ds_prefer == two->ds_prefer);
464}
465
466/* Return 1 if child is a valid subset of parent. */
467static int
468domainset_valid(const struct domainset *parent, const struct domainset *child)
469{
470	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
471		return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask));
472	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
473}
474
475static int
476domainset_restrict(const struct domainset *parent,
477    const struct domainset *child)
478{
479	if (child->ds_policy != DOMAINSET_POLICY_PREFER)
480		return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask));
481	return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
482}
483
484/*
485 * Lookup or create a domainset.  The key is provided in ds_mask and
486 * ds_policy.  If the domainset does not yet exist the storage in
487 * 'domain' is used to insert.  Otherwise this storage is freed to the
488 * domainset_zone and the existing domainset is returned.
489 */
490static struct domainset *
491_domainset_create(struct domainset *domain, struct domainlist *freelist)
492{
493	struct domainset *ndomain;
494	int i, j;
495
496	KASSERT(domain->ds_cnt <= vm_ndomains,
497	    ("invalid domain count in domainset %p", domain));
498	KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER ||
499	    domain->ds_prefer < vm_ndomains,
500	    ("invalid preferred domain in domains %p", domain));
501
502	mtx_lock_spin(&cpuset_lock);
503	LIST_FOREACH(ndomain, &cpuset_domains, ds_link)
504		if (domainset_equal(ndomain, domain))
505			break;
506	/*
507	 * If the domain does not yet exist we insert it and initialize
508	 * various iteration helpers which are not part of the key.
509	 */
510	if (ndomain == NULL) {
511		LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link);
512		domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
513		for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
514			if (DOMAINSET_ISSET(i, &domain->ds_mask))
515				domain->ds_order[j++] = i;
516	}
517	mtx_unlock_spin(&cpuset_lock);
518	if (ndomain == NULL)
519		return (domain);
520	if (freelist != NULL)
521		LIST_INSERT_HEAD(freelist, domain, ds_link);
522	else
523		uma_zfree(domainset_zone, domain);
524	return (ndomain);
525
526}
527
528/*
529 * Are any of the domains in the mask empty?  If so, silently
530 * remove them and update the domainset accordingly.  If only empty
531 * domains are present, we must return failure.
532 */
533static bool
534domainset_empty_vm(struct domainset *domain)
535{
536	domainset_t empty;
537	int i, j;
538
539	DOMAINSET_ZERO(&empty);
540	for (i = 0; i < vm_ndomains; i++)
541		if (VM_DOMAIN_EMPTY(i))
542			DOMAINSET_SET(i, &empty);
543	if (DOMAINSET_SUBSET(&empty, &domain->ds_mask))
544		return (true);
545
546	/* Remove empty domains from the set and recompute. */
547	DOMAINSET_ANDNOT(&domain->ds_mask, &empty);
548	domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
549	for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
550		if (DOMAINSET_ISSET(i, &domain->ds_mask))
551			domain->ds_order[j++] = i;
552
553	/* Convert a PREFER policy referencing an empty domain to RR. */
554	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
555	    DOMAINSET_ISSET(domain->ds_prefer, &empty)) {
556		domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
557		domain->ds_prefer = -1;
558	}
559
560	return (false);
561}
562
563/*
564 * Create or lookup a domainset based on the key held in 'domain'.
565 */
566struct domainset *
567domainset_create(const struct domainset *domain)
568{
569	struct domainset *ndomain;
570
571	/*
572	 * Validate the policy.  It must specify a useable policy number with
573	 * only valid domains.  Preferred must include the preferred domain
574	 * in the mask.
575	 */
576	if (domain->ds_policy <= DOMAINSET_POLICY_INVALID ||
577	    domain->ds_policy > DOMAINSET_POLICY_MAX)
578		return (NULL);
579	if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
580	    !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask))
581		return (NULL);
582	if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask))
583		return (NULL);
584	ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO);
585	domainset_copy(domain, ndomain);
586	return _domainset_create(ndomain, NULL);
587}
588
589/*
590 * Update thread domainset pointers.
591 */
592static void
593domainset_notify(void)
594{
595	struct thread *td;
596	struct proc *p;
597
598	sx_slock(&allproc_lock);
599	FOREACH_PROC_IN_SYSTEM(p) {
600		PROC_LOCK(p);
601		if (p->p_state == PRS_NEW) {
602			PROC_UNLOCK(p);
603			continue;
604		}
605		FOREACH_THREAD_IN_PROC(p, td) {
606			thread_lock(td);
607			td->td_domain.dr_policy = td->td_cpuset->cs_domain;
608			thread_unlock(td);
609		}
610		PROC_UNLOCK(p);
611	}
612	sx_sunlock(&allproc_lock);
613	kernel_object->domain.dr_policy = cpuset_kernel->cs_domain;
614}
615
616/*
617 * Create a new set that is a subset of a parent.
618 */
619static struct domainset *
620domainset_shadow(const struct domainset *pdomain,
621    const struct domainset *domain, struct domainlist *freelist)
622{
623	struct domainset *ndomain;
624
625	ndomain = LIST_FIRST(freelist);
626	LIST_REMOVE(ndomain, ds_link);
627
628	/*
629	 * Initialize the key from the request.
630	 */
631	domainset_copy(domain, ndomain);
632
633	/*
634	 * Restrict the key by the parent.
635	 */
636	DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask);
637
638	return _domainset_create(ndomain, freelist);
639}
640
641/*
642 * Recursively check for errors that would occur from applying mask to
643 * the tree of sets starting at 'set'.  Checks for sets that would become
644 * empty as well as RDONLY flags.
645 */
646static int
647cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
648{
649	struct cpuset *nset;
650	cpuset_t newmask;
651	int error;
652
653	mtx_assert(&cpuset_lock, MA_OWNED);
654	if (set->cs_flags & CPU_SET_RDONLY)
655		return (EPERM);
656	if (augment_mask) {
657		CPU_AND(&newmask, &set->cs_mask, mask);
658	} else
659		CPU_COPY(mask, &newmask);
660
661	if (CPU_EMPTY(&newmask))
662		return (EDEADLK);
663	error = 0;
664	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
665		if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
666			break;
667	return (error);
668}
669
670/*
671 * Applies the mask 'mask' without checking for empty sets or permissions.
672 */
673static void
674cpuset_update(struct cpuset *set, cpuset_t *mask)
675{
676	struct cpuset *nset;
677
678	mtx_assert(&cpuset_lock, MA_OWNED);
679	CPU_AND(&set->cs_mask, &set->cs_mask, mask);
680	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
681		cpuset_update(nset, &set->cs_mask);
682
683	return;
684}
685
686/*
687 * Modify the set 'set' to use a copy of the mask provided.  Apply this new
688 * mask to restrict all children in the tree.  Checks for validity before
689 * applying the changes.
690 */
691static int
692cpuset_modify(struct cpuset *set, cpuset_t *mask)
693{
694	struct cpuset *root;
695	int error;
696
697	error = priv_check(curthread, PRIV_SCHED_CPUSET);
698	if (error)
699		return (error);
700	/*
701	 * In case we are called from within the jail,
702	 * we do not allow modifying the dedicated root
703	 * cpuset of the jail but may still allow to
704	 * change child sets, including subordinate jails'
705	 * roots.
706	 */
707	if ((set->cs_flags & CPU_SET_ROOT) != 0 &&
708	    jailed(curthread->td_ucred) &&
709	    set == curthread->td_ucred->cr_prison->pr_cpuset)
710		return (EPERM);
711	/*
712	 * Verify that we have access to this set of
713	 * cpus.
714	 */
715	if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) {
716		KASSERT(set->cs_parent != NULL,
717		    ("jail.cpuset=%d is not a proper child of parent jail's root.",
718		    set->cs_id));
719
720		/*
721		 * cpuset_getroot() cannot work here due to how top-level jail
722		 * roots are constructed.  Top-level jails are parented to
723		 * thread0's cpuset (i.e. cpuset 1) rather than the system root.
724		 */
725		root = set->cs_parent;
726	} else {
727		root = cpuset_getroot(set);
728	}
729	mtx_lock_spin(&cpuset_lock);
730	if (root && !CPU_SUBSET(&root->cs_mask, mask)) {
731		error = EINVAL;
732		goto out;
733	}
734	error = cpuset_testupdate(set, mask, 0);
735	if (error)
736		goto out;
737	CPU_COPY(mask, &set->cs_mask);
738	cpuset_update(set, mask);
739out:
740	mtx_unlock_spin(&cpuset_lock);
741
742	return (error);
743}
744
745/*
746 * Recursively check for errors that would occur from applying mask to
747 * the tree of sets starting at 'set'.  Checks for sets that would become
748 * empty as well as RDONLY flags.
749 */
750static int
751cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset,
752    struct domainset *orig, int *count, int augment_mask __unused)
753{
754	struct cpuset *nset;
755	struct domainset *domain;
756	struct domainset newset;
757	int error;
758
759	mtx_assert(&cpuset_lock, MA_OWNED);
760	if (set->cs_flags & CPU_SET_RDONLY)
761		return (EPERM);
762	domain = set->cs_domain;
763	domainset_copy(domain, &newset);
764	if (!domainset_equal(domain, orig)) {
765		if (!domainset_restrict(domain, dset))
766			return (EDEADLK);
767		DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask);
768		/* Count the number of domains that are changing. */
769		(*count)++;
770	}
771	error = 0;
772	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
773		if ((error = cpuset_testupdate_domain(nset, &newset, domain,
774		    count, 1)) != 0)
775			break;
776	return (error);
777}
778
779/*
780 * Applies the mask 'mask' without checking for empty sets or permissions.
781 */
782static void
783cpuset_update_domain(struct cpuset *set, struct domainset *domain,
784    struct domainset *orig, struct domainlist *domains)
785{
786	struct cpuset *nset;
787
788	mtx_assert(&cpuset_lock, MA_OWNED);
789	/*
790	 * If this domainset has changed from the parent we must calculate
791	 * a new set.  Otherwise it simply inherits from the parent.  When
792	 * we inherit from the parent we get a new mask and policy.  If the
793	 * set is modified from the parent we keep the policy and only
794	 * update the mask.
795	 */
796	if (set->cs_domain != orig) {
797		orig = set->cs_domain;
798		set->cs_domain = domainset_shadow(domain, orig, domains);
799	} else
800		set->cs_domain = domain;
801	LIST_FOREACH(nset, &set->cs_children, cs_siblings)
802		cpuset_update_domain(nset, set->cs_domain, orig, domains);
803
804	return;
805}
806
807/*
808 * Modify the set 'set' to use a copy the domainset provided.  Apply this new
809 * mask to restrict all children in the tree.  Checks for validity before
810 * applying the changes.
811 */
812static int
813cpuset_modify_domain(struct cpuset *set, struct domainset *domain)
814{
815	struct domainlist domains;
816	struct domainset temp;
817	struct domainset *dset;
818	struct cpuset *root;
819	int ndomains, needed;
820	int error;
821
822	error = priv_check(curthread, PRIV_SCHED_CPUSET);
823	if (error)
824		return (error);
825	/*
826	 * In case we are called from within the jail
827	 * we do not allow modifying the dedicated root
828	 * cpuset of the jail but may still allow to
829	 * change child sets.
830	 */
831	if (jailed(curthread->td_ucred) &&
832	    set->cs_flags & CPU_SET_ROOT)
833		return (EPERM);
834	domainset_freelist_init(&domains, 0);
835	domain = domainset_create(domain);
836	ndomains = 0;
837
838	mtx_lock_spin(&cpuset_lock);
839	for (;;) {
840		root = cpuset_getroot(set);
841		dset = root->cs_domain;
842		/*
843		 * Verify that we have access to this set of domains.
844		 */
845		if (!domainset_valid(dset, domain)) {
846			error = EINVAL;
847			goto out;
848		}
849		/*
850		 * If applying prefer we keep the current set as the fallback.
851		 */
852		if (domain->ds_policy == DOMAINSET_POLICY_PREFER)
853			DOMAINSET_COPY(&set->cs_domain->ds_mask,
854			    &domain->ds_mask);
855		/*
856		 * Determine whether we can apply this set of domains and
857		 * how many new domain structures it will require.
858		 */
859		domainset_copy(domain, &temp);
860		needed = 0;
861		error = cpuset_testupdate_domain(set, &temp, set->cs_domain,
862		    &needed, 0);
863		if (error)
864			goto out;
865		if (ndomains >= needed)
866			break;
867
868		/* Dropping the lock; we'll need to re-evaluate again. */
869		mtx_unlock_spin(&cpuset_lock);
870		domainset_freelist_add(&domains, needed - ndomains);
871		ndomains = needed;
872		mtx_lock_spin(&cpuset_lock);
873	}
874	dset = set->cs_domain;
875	cpuset_update_domain(set, domain, dset, &domains);
876out:
877	mtx_unlock_spin(&cpuset_lock);
878	domainset_freelist_free(&domains);
879	if (error == 0)
880		domainset_notify();
881
882	return (error);
883}
884
885/*
886 * Resolve the 'which' parameter of several cpuset apis.
887 *
888 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
889 * checks for permission via p_cansched().
890 *
891 * For WHICH_SET returns a valid set with a new reference.
892 *
893 * -1 may be supplied for any argument to mean the current proc/thread or
894 * the base set of the current thread.  May fail with ESRCH/EPERM.
895 */
896int
897cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
898    struct cpuset **setp)
899{
900	struct cpuset *set;
901	struct thread *td;
902	struct proc *p;
903	int error;
904
905	*pp = p = NULL;
906	*tdp = td = NULL;
907	*setp = set = NULL;
908	switch (which) {
909	case CPU_WHICH_PID:
910		if (id == -1) {
911			PROC_LOCK(curproc);
912			p = curproc;
913			break;
914		}
915		if ((p = pfind(id)) == NULL)
916			return (ESRCH);
917		break;
918	case CPU_WHICH_TID:
919		if (id == -1) {
920			PROC_LOCK(curproc);
921			p = curproc;
922			td = curthread;
923			break;
924		}
925		td = tdfind(id, -1);
926		if (td == NULL)
927			return (ESRCH);
928		p = td->td_proc;
929		break;
930	case CPU_WHICH_TIDPID:
931		if (id == -1) {
932			PROC_LOCK(curproc);
933			td = curthread;
934			p = curproc;
935		} else if (id > PID_MAX) {
936			td = tdfind(id, -1);
937			if (td == NULL)
938				return (ESRCH);
939			p = td->td_proc;
940		} else {
941			p = pfind(id);
942			if (p == NULL)
943				return (ESRCH);
944		}
945		break;
946	case CPU_WHICH_CPUSET:
947		if (id == -1) {
948			thread_lock(curthread);
949			set = cpuset_refbase(curthread->td_cpuset);
950			thread_unlock(curthread);
951		} else
952			set = cpuset_lookup(id, curthread);
953		if (set) {
954			*setp = set;
955			return (0);
956		}
957		return (ESRCH);
958	case CPU_WHICH_JAIL:
959	{
960		/* Find `set' for prison with given id. */
961		struct prison *pr;
962
963		sx_slock(&allprison_lock);
964		pr = prison_find_child(curthread->td_ucred->cr_prison, id);
965		sx_sunlock(&allprison_lock);
966		if (pr == NULL)
967			return (ESRCH);
968		cpuset_ref(pr->pr_cpuset);
969		*setp = pr->pr_cpuset;
970		mtx_unlock(&pr->pr_mtx);
971		return (0);
972	}
973	case CPU_WHICH_IRQ:
974	case CPU_WHICH_DOMAIN:
975		return (0);
976	default:
977		return (EINVAL);
978	}
979	error = p_cansched(curthread, p);
980	if (error) {
981		PROC_UNLOCK(p);
982		return (error);
983	}
984	if (td == NULL)
985		td = FIRST_THREAD_IN_PROC(p);
986	*pp = p;
987	*tdp = td;
988	return (0);
989}
990
991static int
992cpuset_which2(cpuwhich_t *which, id_t id, struct proc **pp, struct thread **tdp,
993    struct cpuset **setp)
994{
995
996	if (*which == CPU_WHICH_TIDPID) {
997		if (id == -1 || id > PID_MAX)
998			*which = CPU_WHICH_TID;
999		else
1000			*which = CPU_WHICH_PID;
1001	}
1002	return (cpuset_which(*which, id, pp, tdp, setp));
1003}
1004
1005static int
1006cpuset_testshadow(struct cpuset *set, const cpuset_t *mask,
1007    const struct domainset *domain)
1008{
1009	struct cpuset *parent;
1010	struct domainset *dset;
1011
1012	parent = cpuset_getbase(set);
1013	/*
1014	 * If we are restricting a cpu mask it must be a subset of the
1015	 * parent or invalid CPUs have been specified.
1016	 */
1017	if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask))
1018		return (EINVAL);
1019
1020	/*
1021	 * If we are restricting a domain mask it must be a subset of the
1022	 * parent or invalid domains have been specified.
1023	 */
1024	dset = parent->cs_domain;
1025	if (domain != NULL && !domainset_valid(dset, domain))
1026		return (EINVAL);
1027
1028	return (0);
1029}
1030
1031/*
1032 * Create an anonymous set with the provided mask in the space provided by
1033 * 'nset'.  If the passed in set is anonymous we use its parent otherwise
1034 * the new set is a child of 'set'.
1035 */
1036static int
1037cpuset_shadow(struct cpuset *set, struct cpuset **nsetp,
1038   const cpuset_t *mask, const struct domainset *domain,
1039   struct setlist *cpusets, struct domainlist *domains)
1040{
1041	struct cpuset *parent;
1042	struct cpuset *nset;
1043	struct domainset *dset;
1044	struct domainset *d;
1045	int error;
1046
1047	error = cpuset_testshadow(set, mask, domain);
1048	if (error)
1049		return (error);
1050
1051	parent = cpuset_getbase(set);
1052	dset = parent->cs_domain;
1053	if (mask == NULL)
1054		mask = &set->cs_mask;
1055	if (domain != NULL)
1056		d = domainset_shadow(dset, domain, domains);
1057	else
1058		d = set->cs_domain;
1059	nset = LIST_FIRST(cpusets);
1060	error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID);
1061	if (error == 0) {
1062		LIST_REMOVE(nset, cs_link);
1063		*nsetp = nset;
1064	}
1065	return (error);
1066}
1067
1068static struct cpuset *
1069cpuset_update_thread(struct thread *td, struct cpuset *nset)
1070{
1071	struct cpuset *tdset;
1072
1073	tdset = td->td_cpuset;
1074	td->td_cpuset = nset;
1075	td->td_domain.dr_policy = nset->cs_domain;
1076	sched_affinity(td);
1077
1078	return (tdset);
1079}
1080
1081static int
1082cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask,
1083    struct domainset *domain)
1084{
1085	struct cpuset *parent;
1086
1087	parent = cpuset_getbase(tdset);
1088	if (mask == NULL)
1089		mask = &tdset->cs_mask;
1090	if (domain == NULL)
1091		domain = tdset->cs_domain;
1092	return cpuset_testshadow(parent, mask, domain);
1093}
1094
1095static int
1096cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask,
1097    struct domainset *domain, struct cpuset **nsetp,
1098    struct setlist *freelist, struct domainlist *domainlist)
1099{
1100	struct cpuset *parent;
1101
1102	parent = cpuset_getbase(tdset);
1103	if (mask == NULL)
1104		mask = &tdset->cs_mask;
1105	if (domain == NULL)
1106		domain = tdset->cs_domain;
1107	return cpuset_shadow(parent, nsetp, mask, domain, freelist,
1108	    domainlist);
1109}
1110
1111static int
1112cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
1113    cpuset_t *mask, struct domainset *domain)
1114{
1115	struct cpuset *parent;
1116
1117	parent = cpuset_getbase(tdset);
1118
1119	/*
1120	 * If the thread restricted its mask then apply that same
1121	 * restriction to the new set, otherwise take it wholesale.
1122	 */
1123	if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
1124		CPU_AND(mask, &tdset->cs_mask, &set->cs_mask);
1125	} else
1126		CPU_COPY(&set->cs_mask, mask);
1127
1128	/*
1129	 * If the thread restricted the domain then we apply the
1130	 * restriction to the new set but retain the policy.
1131	 */
1132	if (tdset->cs_domain != parent->cs_domain) {
1133		domainset_copy(tdset->cs_domain, domain);
1134		DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask);
1135	} else
1136		domainset_copy(set->cs_domain, domain);
1137
1138	if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask))
1139		return (EDEADLK);
1140
1141	return (0);
1142}
1143
1144static int
1145cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set)
1146{
1147	struct domainset domain;
1148	cpuset_t mask;
1149
1150	if (tdset->cs_id != CPUSET_INVALID)
1151		return (0);
1152	return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1153}
1154
1155static int
1156cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set,
1157    struct cpuset **nsetp, struct setlist *freelist,
1158    struct domainlist *domainlist)
1159{
1160	struct domainset domain;
1161	cpuset_t mask;
1162	int error;
1163
1164	/*
1165	 * If we're replacing on a thread that has not constrained the
1166	 * original set we can simply accept the new set.
1167	 */
1168	if (tdset->cs_id != CPUSET_INVALID) {
1169		*nsetp = cpuset_ref(set);
1170		return (0);
1171	}
1172	error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1173	if (error)
1174		return (error);
1175
1176	return cpuset_shadow(set, nsetp, &mask, &domain, freelist,
1177	    domainlist);
1178}
1179
1180static int
1181cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
1182    struct cpuset *nroot, struct cpuset **nsetp,
1183    struct setlist *cpusets, struct domainlist *domainlist)
1184{
1185	struct domainset ndomain;
1186	cpuset_t nmask;
1187	struct cpuset *pbase;
1188	int error;
1189
1190	pbase = cpuset_getbase(td->td_cpuset);
1191
1192	/* Copy process mask, then further apply the new root mask. */
1193	CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask);
1194
1195	domainset_copy(pbase->cs_domain, &ndomain);
1196	DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
1197
1198	/* Policy is too restrictive, will not work. */
1199	if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask))
1200		return (EDEADLK);
1201
1202	/*
1203	 * Remove pbase from the freelist in advance, it'll be pushed to
1204	 * cpuset_ids on success.  We assume here that cpuset_create() will not
1205	 * touch pbase on failure, and we just enqueue it back to the freelist
1206	 * to remain in a consistent state.
1207	 */
1208	pbase = LIST_FIRST(cpusets);
1209	LIST_REMOVE(pbase, cs_link);
1210	error = cpuset_create(&pbase, set, &nmask);
1211	if (error != 0) {
1212		LIST_INSERT_HEAD(cpusets, pbase, cs_link);
1213		return (error);
1214	}
1215
1216	/* Duplicates some work from above... oh well. */
1217	pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain,
1218	    domainlist);
1219	*nsetp = pbase;
1220	return (0);
1221}
1222
1223/*
1224 * Handle four cases for updating an entire process.
1225 *
1226 * 1) Set is non-null and the process is not rebasing onto a new root.  This
1227 *    reparents all anonymous sets to the provided set and replaces all
1228 *    non-anonymous td_cpusets with the provided set.
1229 * 2) Set is non-null and the process is rebasing onto a new root.  This
1230 *    creates a new base set if the process previously had its own base set,
1231 *    then reparents all anonymous sets either to that set or the provided set
1232 *    if one was not created.  Non-anonymous sets are similarly replaced.
1233 * 3) Mask is non-null.  This replaces or creates anonymous sets for every
1234 *    thread with the existing base as a parent.
1235 * 4) domain is non-null.  This creates anonymous sets for every thread
1236 *    and replaces the domain set.
1237 *
1238 * This is overly complicated because we can't allocate while holding a
1239 * spinlock and spinlocks must be held while changing and examining thread
1240 * state.
1241 */
1242static int
1243cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask,
1244    struct domainset *domain, bool rebase)
1245{
1246	struct setlist freelist;
1247	struct setlist droplist;
1248	struct domainlist domainlist;
1249	struct cpuset *base, *nset, *nroot, *tdroot;
1250	struct thread *td;
1251	struct proc *p;
1252	int needed;
1253	int nfree;
1254	int error;
1255
1256	/*
1257	 * The algorithm requires two passes due to locking considerations.
1258	 *
1259	 * 1) Lookup the process and acquire the locks in the required order.
1260	 * 2) If enough cpusets have not been allocated release the locks and
1261	 *    allocate them.  Loop.
1262	 */
1263	cpuset_freelist_init(&freelist, 1);
1264	domainset_freelist_init(&domainlist, 1);
1265	nfree = 1;
1266	LIST_INIT(&droplist);
1267	nfree = 0;
1268	base = set;
1269	nroot = NULL;
1270	if (set != NULL)
1271		nroot = cpuset_getroot(set);
1272	for (;;) {
1273		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
1274		if (error)
1275			goto out;
1276		tdroot = cpuset_getroot(td->td_cpuset);
1277		needed = p->p_numthreads;
1278		if (set != NULL && rebase && tdroot != nroot)
1279			needed++;
1280		if (nfree >= needed)
1281			break;
1282		PROC_UNLOCK(p);
1283		if (nfree < needed) {
1284			cpuset_freelist_add(&freelist, needed - nfree);
1285			domainset_freelist_add(&domainlist, needed - nfree);
1286			nfree = needed;
1287		}
1288	}
1289	PROC_LOCK_ASSERT(p, MA_OWNED);
1290
1291	/*
1292	 * If we're changing roots and the root set is what has been specified
1293	 * as the parent, then we'll check if the process was previously using
1294	 * the root set and, if it wasn't, create a new base with the process's
1295	 * mask applied to it.
1296	 *
1297	 * If the new root is incompatible with the existing mask, then we allow
1298	 * the process to take on the new root if and only if they have
1299	 * privilege to widen their mask anyways.  Unprivileged processes get
1300	 * rejected with EDEADLK.
1301	 */
1302	if (set != NULL && rebase && nroot != tdroot) {
1303		cpusetid_t base_id, root_id;
1304
1305		root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id;
1306		base_id = cpuset_getbase(td->td_cpuset)->cs_id;
1307
1308		if (base_id != root_id) {
1309			error = cpuset_setproc_newbase(td, set, nroot, &base,
1310			    &freelist, &domainlist);
1311			if (error == EDEADLK &&
1312			    priv_check(td, PRIV_SCHED_CPUSET) == 0)
1313				error = 0;
1314			if (error != 0)
1315				goto unlock_out;
1316		}
1317	}
1318
1319	/*
1320	 * Now that the appropriate locks are held and we have enough cpusets,
1321	 * make sure the operation will succeed before applying changes. The
1322	 * proc lock prevents td_cpuset from changing between calls.
1323	 */
1324	error = 0;
1325	FOREACH_THREAD_IN_PROC(p, td) {
1326		thread_lock(td);
1327		if (set != NULL)
1328			error = cpuset_setproc_test_setthread(td->td_cpuset,
1329			    base);
1330		else
1331			error = cpuset_setproc_test_maskthread(td->td_cpuset,
1332			    mask, domain);
1333		thread_unlock(td);
1334		if (error)
1335			goto unlock_out;
1336	}
1337	/*
1338	 * Replace each thread's cpuset while using deferred release.  We
1339	 * must do this because the thread lock must be held while operating
1340	 * on the thread and this limits the type of operations allowed.
1341	 */
1342	FOREACH_THREAD_IN_PROC(p, td) {
1343		thread_lock(td);
1344		if (set != NULL)
1345			error = cpuset_setproc_setthread(td->td_cpuset, base,
1346			    &nset, &freelist, &domainlist);
1347		else
1348			error = cpuset_setproc_maskthread(td->td_cpuset, mask,
1349			    domain, &nset, &freelist, &domainlist);
1350		if (error) {
1351			thread_unlock(td);
1352			break;
1353		}
1354		cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset));
1355		thread_unlock(td);
1356	}
1357unlock_out:
1358	PROC_UNLOCK(p);
1359out:
1360	if (base != NULL && base != set)
1361		cpuset_rel(base);
1362	while ((nset = LIST_FIRST(&droplist)) != NULL)
1363		cpuset_rel_complete(nset);
1364	cpuset_freelist_free(&freelist);
1365	domainset_freelist_free(&domainlist);
1366	return (error);
1367}
1368
1369static int
1370bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen)
1371{
1372	size_t bytes;
1373	int i, once;
1374	char *p;
1375
1376	once = 0;
1377	p = buf;
1378	for (i = 0; i < __bitset_words(setlen); i++) {
1379		if (once != 0) {
1380			if (bufsiz < 1)
1381				return (0);
1382			*p = ',';
1383			p++;
1384			bufsiz--;
1385		} else
1386			once = 1;
1387		if (bufsiz < sizeof(__STRING(ULONG_MAX)))
1388			return (0);
1389		bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]);
1390		p += bytes;
1391		bufsiz -= bytes;
1392	}
1393	return (p - buf);
1394}
1395
1396static int
1397bitset_strscan(struct bitset *set, int setlen, const char *buf)
1398{
1399	int i, ret;
1400	const char *p;
1401
1402	BIT_ZERO(setlen, set);
1403	p = buf;
1404	for (i = 0; i < __bitset_words(setlen); i++) {
1405		if (*p == ',') {
1406			p++;
1407			continue;
1408		}
1409		ret = sscanf(p, "%lx", &set->__bits[i]);
1410		if (ret == 0 || ret == -1)
1411			break;
1412		while (isxdigit(*p))
1413			p++;
1414	}
1415	return (p - buf);
1416}
1417
1418/*
1419 * Return a string representing a valid layout for a cpuset_t object.
1420 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1421 */
1422char *
1423cpusetobj_strprint(char *buf, const cpuset_t *set)
1424{
1425
1426	bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set,
1427	    CPU_SETSIZE);
1428	return (buf);
1429}
1430
1431/*
1432 * Build a valid cpuset_t object from a string representation.
1433 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1434 */
1435int
1436cpusetobj_strscan(cpuset_t *set, const char *buf)
1437{
1438	char p;
1439
1440	if (strlen(buf) > CPUSETBUFSIZ - 1)
1441		return (-1);
1442
1443	p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)];
1444	if (p != '\0')
1445		return (-1);
1446
1447	return (0);
1448}
1449
1450/*
1451 * Handle a domainset specifier in the sysctl tree.  A poiner to a pointer to
1452 * a domainset is in arg1.  If the user specifies a valid domainset the
1453 * pointer is updated.
1454 *
1455 * Format is:
1456 * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred
1457 */
1458int
1459sysctl_handle_domainset(SYSCTL_HANDLER_ARGS)
1460{
1461	char buf[DOMAINSETBUFSIZ];
1462	struct domainset *dset;
1463	struct domainset key;
1464	int policy, prefer, error;
1465	char *p;
1466
1467	dset = *(struct domainset **)arg1;
1468	error = 0;
1469
1470	if (dset != NULL) {
1471		p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ,
1472		    (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE);
1473		sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer);
1474	} else
1475		sprintf(buf, "<NULL>");
1476	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1477	if (error != 0 || req->newptr == NULL)
1478		return (error);
1479
1480	/*
1481	 * Read in and validate the string.
1482	 */
1483	memset(&key, 0, sizeof(key));
1484	p = &buf[bitset_strscan((struct bitset *)&key.ds_mask,
1485	    DOMAINSET_SETSIZE, buf)];
1486	if (p == buf)
1487		return (EINVAL);
1488	if (sscanf(p, ":%d:%d", &policy, &prefer) != 2)
1489		return (EINVAL);
1490	key.ds_policy = policy;
1491	key.ds_prefer = prefer;
1492
1493	/* Domainset_create() validates the policy.*/
1494	dset = domainset_create(&key);
1495	if (dset == NULL)
1496		return (EINVAL);
1497	*(struct domainset **)arg1 = dset;
1498
1499	return (error);
1500}
1501
1502/*
1503 * Apply an anonymous mask or a domain to a single thread.
1504 */
1505static int
1506_cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain)
1507{
1508	struct setlist cpusets;
1509	struct domainlist domainlist;
1510	struct cpuset *nset;
1511	struct cpuset *set;
1512	struct thread *td;
1513	struct proc *p;
1514	int error;
1515
1516	cpuset_freelist_init(&cpusets, 1);
1517	domainset_freelist_init(&domainlist, domain != NULL);
1518	error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
1519	if (error)
1520		goto out;
1521	set = NULL;
1522	thread_lock(td);
1523	error = cpuset_shadow(td->td_cpuset, &nset, mask, domain,
1524	    &cpusets, &domainlist);
1525	if (error == 0)
1526		set = cpuset_update_thread(td, nset);
1527	thread_unlock(td);
1528	PROC_UNLOCK(p);
1529	if (set)
1530		cpuset_rel(set);
1531out:
1532	cpuset_freelist_free(&cpusets);
1533	domainset_freelist_free(&domainlist);
1534	return (error);
1535}
1536
1537/*
1538 * Apply an anonymous mask to a single thread.
1539 */
1540int
1541cpuset_setthread(lwpid_t id, cpuset_t *mask)
1542{
1543
1544	return _cpuset_setthread(id, mask, NULL);
1545}
1546
1547/*
1548 * Apply new cpumask to the ithread.
1549 */
1550int
1551cpuset_setithread(lwpid_t id, int cpu)
1552{
1553	cpuset_t mask;
1554
1555	CPU_ZERO(&mask);
1556	if (cpu == NOCPU)
1557		CPU_COPY(cpuset_root, &mask);
1558	else
1559		CPU_SET(cpu, &mask);
1560	return _cpuset_setthread(id, &mask, NULL);
1561}
1562
1563/*
1564 * Initialize static domainsets after NUMA information is available.  This is
1565 * called before memory allocators are initialized.
1566 */
1567void
1568domainset_init(void)
1569{
1570	struct domainset *dset;
1571	int i;
1572
1573	dset = &domainset_firsttouch;
1574	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1575	dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH;
1576	dset->ds_prefer = -1;
1577	_domainset_create(dset, NULL);
1578
1579	dset = &domainset_interleave;
1580	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1581	dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE;
1582	dset->ds_prefer = -1;
1583	_domainset_create(dset, NULL);
1584
1585	dset = &domainset_roundrobin;
1586	DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1587	dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1588	dset->ds_prefer = -1;
1589	_domainset_create(dset, NULL);
1590
1591	for (i = 0; i < vm_ndomains; i++) {
1592		dset = &domainset_fixed[i];
1593		DOMAINSET_ZERO(&dset->ds_mask);
1594		DOMAINSET_SET(i, &dset->ds_mask);
1595		dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1596		_domainset_create(dset, NULL);
1597
1598		dset = &domainset_prefer[i];
1599		DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1600		dset->ds_policy = DOMAINSET_POLICY_PREFER;
1601		dset->ds_prefer = i;
1602		_domainset_create(dset, NULL);
1603	}
1604}
1605
1606/*
1607 * Define the domainsets for cpuset 0, 1 and cpuset 2.
1608 */
1609void
1610domainset_zero(void)
1611{
1612	struct domainset *dset, *tmp;
1613
1614	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
1615
1616	domainset0 = &domainset_firsttouch;
1617	curthread->td_domain.dr_policy = domainset0;
1618
1619	domainset2 = &domainset_interleave;
1620	kernel_object->domain.dr_policy = domainset2;
1621
1622	/* Remove empty domains from the global policies. */
1623	LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp)
1624		if (domainset_empty_vm(dset))
1625			LIST_REMOVE(dset, ds_link);
1626}
1627
1628/*
1629 * Creates system-wide cpusets and the cpuset for thread0 including three
1630 * sets:
1631 *
1632 * 0 - The root set which should represent all valid processors in the
1633 *     system.  This set is immutable.
1634 * 1 - The default set which all processes are a member of until changed.
1635 *     This allows an administrator to move all threads off of given cpus to
1636 *     dedicate them to high priority tasks or save power etc.
1637 * 2 - The kernel set which allows restriction and policy to be applied only
1638 *     to kernel threads and the kernel_object.
1639 */
1640struct cpuset *
1641cpuset_thread0(void)
1642{
1643	struct cpuset *set;
1644	int i;
1645	int error __unused;
1646
1647	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
1648	    NULL, NULL, UMA_ALIGN_CACHE, 0);
1649	domainset_zone = uma_zcreate("domainset", sizeof(struct domainset),
1650	    NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
1651
1652	/*
1653	 * Create the root system set (0) for the whole machine.  Doesn't use
1654	 * cpuset_create() due to NULL parent.
1655	 */
1656	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1657	CPU_COPY(&all_cpus, &set->cs_mask);
1658	LIST_INIT(&set->cs_children);
1659	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
1660	refcount_init(&set->cs_ref, 1);
1661	set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY;
1662	set->cs_domain = domainset0;
1663	cpuset_zero = set;
1664	cpuset_root = &set->cs_mask;
1665
1666	/*
1667	 * Now derive a default (1), modifiable set from that to give out.
1668	 */
1669	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1670	error = cpuset_init(set, cpuset_zero, NULL, NULL, 1);
1671	KASSERT(error == 0, ("Error creating default set: %d\n", error));
1672	cpuset_default = set;
1673	/*
1674	 * Create the kernel set (2).
1675	 */
1676	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1677	error = cpuset_init(set, cpuset_zero, NULL, NULL, 2);
1678	KASSERT(error == 0, ("Error creating kernel set: %d\n", error));
1679	set->cs_domain = domainset2;
1680	cpuset_kernel = set;
1681
1682	/*
1683	 * Initialize the unit allocator. 0 and 1 are allocated above.
1684	 */
1685	cpuset_unr = new_unrhdr(3, INT_MAX, NULL);
1686
1687	/*
1688	 * If MD code has not initialized per-domain cpusets, place all
1689	 * CPUs in domain 0.
1690	 */
1691	for (i = 0; i < MAXMEMDOM; i++)
1692		if (!CPU_EMPTY(&cpuset_domain[i]))
1693			goto domains_set;
1694	CPU_COPY(&all_cpus, &cpuset_domain[0]);
1695domains_set:
1696
1697	return (cpuset_default);
1698}
1699
1700void
1701cpuset_kernthread(struct thread *td)
1702{
1703	struct cpuset *set;
1704
1705	thread_lock(td);
1706	set = td->td_cpuset;
1707	td->td_cpuset = cpuset_ref(cpuset_kernel);
1708	thread_unlock(td);
1709	cpuset_rel(set);
1710}
1711
1712/*
1713 * Create a cpuset, which would be cpuset_create() but
1714 * mark the new 'set' as root.
1715 *
1716 * We are not going to reparent the td to it.  Use cpuset_setproc_update_set()
1717 * for that.
1718 *
1719 * In case of no error, returns the set in *setp locked with a reference.
1720 */
1721int
1722cpuset_create_root(struct prison *pr, struct cpuset **setp)
1723{
1724	struct cpuset *set;
1725	int error;
1726
1727	KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
1728	KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
1729
1730	set = NULL;
1731	error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
1732	if (error)
1733		return (error);
1734
1735	KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data",
1736	    __func__, __LINE__));
1737
1738	/* Mark the set as root. */
1739	set->cs_flags |= CPU_SET_ROOT;
1740	*setp = set;
1741
1742	return (0);
1743}
1744
1745int
1746cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
1747{
1748	int error;
1749
1750	KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
1751	KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
1752
1753	cpuset_ref(set);
1754	error = cpuset_setproc(p->p_pid, set, NULL, NULL, true);
1755	if (error)
1756		return (error);
1757	cpuset_rel(set);
1758	return (0);
1759}
1760
1761/*
1762 * In Capability mode, the only accesses that are permitted are to the current
1763 * thread and process' CPU and domain sets.
1764 */
1765static bool
1766cpuset_capmode_allowed(struct thread *td, cpulevel_t level, cpuwhich_t which,
1767    id_t id)
1768{
1769	if (level != CPU_LEVEL_WHICH)
1770		return (false);
1771	if (which != CPU_WHICH_TID && which != CPU_WHICH_PID &&
1772	    which != CPU_WHICH_TIDPID)
1773		return (false);
1774	if (id != -1 && which == CPU_WHICH_TIDPID &&
1775	    id != td->td_tid && id != td->td_proc->p_pid)
1776		return (false);
1777	if (id != -1 &&
1778	    !(which == CPU_WHICH_TID && id == td->td_tid) &&
1779	    !(which == CPU_WHICH_PID && id == td->td_proc->p_pid))
1780		return (false);
1781	return (true);
1782}
1783
1784/*
1785 * Check for capability violations and record them if ktrace(2) is active.
1786 */
1787static int
1788cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which,
1789    id_t id)
1790{
1791	if (IN_CAPABILITY_MODE(td) || CAP_TRACING(td)) {
1792		if (cpuset_capmode_allowed(td, level, which, id))
1793			return (0);
1794		if (CAP_TRACING(td))
1795			ktrcapfail(CAPFAIL_CPUSET, NULL);
1796		if (IN_CAPABILITY_MODE(td))
1797			return (ECAPMODE);
1798	}
1799	return (0);
1800}
1801
1802#if defined(__powerpc__)
1803/*
1804 * TODO: At least powerpc64 and powerpc64le kernels panic with
1805 * exception 0x480 (instruction segment exception) when copyin/copyout,
1806 * are set as a function pointer in cpuset_copy_cb struct and called by
1807 * an external module (like pfsync). Tip: copyin/copyout have an ifunc
1808 * resolver function.
1809 *
1810 * Bisect of LLVM shows that the behavior changed on LLVM 10.0 with
1811 * https://reviews.llvm.org/rGdc06b0bc9ad055d06535462d91bfc2a744b2f589
1812 *
1813 * This is a hack/workaround while problem is being discussed with LLVM
1814 * community
1815 */
1816static int
1817cpuset_copyin(const void *uaddr, void *kaddr, size_t len)
1818{
1819	return(copyin(uaddr, kaddr, len));
1820}
1821
1822static int
1823cpuset_copyout(const void *kaddr, void *uaddr, size_t len)
1824{
1825	return(copyout(kaddr, uaddr, len));
1826}
1827
1828static const struct cpuset_copy_cb copy_set = {
1829	.cpuset_copyin = cpuset_copyin,
1830	.cpuset_copyout = cpuset_copyout
1831};
1832#else
1833static const struct cpuset_copy_cb copy_set = {
1834        .cpuset_copyin = copyin,
1835        .cpuset_copyout = copyout
1836};
1837#endif
1838
1839#ifndef _SYS_SYSPROTO_H_
1840struct cpuset_args {
1841	cpusetid_t	*setid;
1842};
1843#endif
1844int
1845sys_cpuset(struct thread *td, struct cpuset_args *uap)
1846{
1847	struct cpuset *root;
1848	struct cpuset *set;
1849	int error;
1850
1851	thread_lock(td);
1852	root = cpuset_refroot(td->td_cpuset);
1853	thread_unlock(td);
1854	set = NULL;
1855	error = cpuset_create(&set, root, &root->cs_mask);
1856	cpuset_rel(root);
1857	if (error)
1858		return (error);
1859	error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
1860	if (error == 0)
1861		error = cpuset_setproc(-1, set, NULL, NULL, false);
1862	cpuset_rel(set);
1863	return (error);
1864}
1865
1866#ifndef _SYS_SYSPROTO_H_
1867struct cpuset_setid_args {
1868	cpuwhich_t	which;
1869	id_t		id;
1870	cpusetid_t	setid;
1871};
1872#endif
1873int
1874sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
1875{
1876
1877	return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
1878}
1879
1880int
1881kern_cpuset_setid(struct thread *td, cpuwhich_t which,
1882    id_t id, cpusetid_t setid)
1883{
1884	struct cpuset *set;
1885	int error;
1886
1887	/*
1888	 * Presently we only support per-process sets.
1889	 */
1890	if (which != CPU_WHICH_PID)
1891		return (EINVAL);
1892	set = cpuset_lookup(setid, td);
1893	if (set == NULL)
1894		return (ESRCH);
1895	error = cpuset_setproc(id, set, NULL, NULL, false);
1896	cpuset_rel(set);
1897	return (error);
1898}
1899
1900#ifndef _SYS_SYSPROTO_H_
1901struct cpuset_getid_args {
1902	cpulevel_t	level;
1903	cpuwhich_t	which;
1904	id_t		id;
1905	cpusetid_t	*setid;
1906};
1907#endif
1908int
1909sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
1910{
1911
1912	return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
1913	    uap->setid));
1914}
1915
1916int
1917kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
1918    id_t id, cpusetid_t *setid)
1919{
1920	struct cpuset *nset;
1921	struct cpuset *set;
1922	struct thread *ttd;
1923	struct proc *p;
1924	cpusetid_t tmpid;
1925	int error;
1926
1927	if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
1928		return (EINVAL);
1929	error = cpuset_which(which, id, &p, &ttd, &set);
1930	if (error)
1931		return (error);
1932	switch (which) {
1933	case CPU_WHICH_TID:
1934	case CPU_WHICH_PID:
1935	case CPU_WHICH_TIDPID:
1936		thread_lock(ttd);
1937		set = cpuset_refbase(ttd->td_cpuset);
1938		thread_unlock(ttd);
1939		PROC_UNLOCK(p);
1940		break;
1941	case CPU_WHICH_CPUSET:
1942	case CPU_WHICH_JAIL:
1943		break;
1944	case CPU_WHICH_IRQ:
1945	case CPU_WHICH_DOMAIN:
1946		return (EINVAL);
1947	}
1948	switch (level) {
1949	case CPU_LEVEL_ROOT:
1950		nset = cpuset_refroot(set);
1951		cpuset_rel(set);
1952		set = nset;
1953		break;
1954	case CPU_LEVEL_CPUSET:
1955		break;
1956	case CPU_LEVEL_WHICH:
1957		break;
1958	}
1959	tmpid = set->cs_id;
1960	cpuset_rel(set);
1961	if (error == 0)
1962		error = copyout(&tmpid, setid, sizeof(tmpid));
1963
1964	return (error);
1965}
1966
1967#ifndef _SYS_SYSPROTO_H_
1968struct cpuset_getaffinity_args {
1969	cpulevel_t	level;
1970	cpuwhich_t	which;
1971	id_t		id;
1972	size_t		cpusetsize;
1973	cpuset_t	*mask;
1974};
1975#endif
1976int
1977sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
1978{
1979
1980	return (user_cpuset_getaffinity(td, uap->level, uap->which,
1981	    uap->id, uap->cpusetsize, uap->mask, &copy_set));
1982}
1983
1984int
1985kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1986    id_t id, size_t cpusetsize, cpuset_t *mask)
1987{
1988	struct thread *ttd;
1989	struct cpuset *nset;
1990	struct cpuset *set;
1991	struct proc *p;
1992	int error;
1993
1994	error = cpuset_check_capabilities(td, level, which, id);
1995	if (error != 0)
1996		return (error);
1997	error = cpuset_which2(&which, id, &p, &ttd, &set);
1998	if (error != 0)
1999		return (error);
2000	switch (level) {
2001	case CPU_LEVEL_ROOT:
2002	case CPU_LEVEL_CPUSET:
2003		switch (which) {
2004		case CPU_WHICH_TID:
2005		case CPU_WHICH_PID:
2006			thread_lock(ttd);
2007			set = cpuset_ref(ttd->td_cpuset);
2008			thread_unlock(ttd);
2009			break;
2010		case CPU_WHICH_CPUSET:
2011		case CPU_WHICH_JAIL:
2012			break;
2013		case CPU_WHICH_IRQ:
2014		case CPU_WHICH_INTRHANDLER:
2015		case CPU_WHICH_ITHREAD:
2016		case CPU_WHICH_DOMAIN:
2017			return (EINVAL);
2018		}
2019		if (level == CPU_LEVEL_ROOT)
2020			nset = cpuset_refroot(set);
2021		else
2022			nset = cpuset_refbase(set);
2023		CPU_COPY(&nset->cs_mask, mask);
2024		cpuset_rel(nset);
2025		break;
2026	case CPU_LEVEL_WHICH:
2027		switch (which) {
2028		case CPU_WHICH_TID:
2029			thread_lock(ttd);
2030			CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
2031			thread_unlock(ttd);
2032			break;
2033		case CPU_WHICH_PID:
2034			FOREACH_THREAD_IN_PROC(p, ttd) {
2035				thread_lock(ttd);
2036				CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask);
2037				thread_unlock(ttd);
2038			}
2039			break;
2040		case CPU_WHICH_CPUSET:
2041		case CPU_WHICH_JAIL:
2042			CPU_COPY(&set->cs_mask, mask);
2043			break;
2044		case CPU_WHICH_IRQ:
2045		case CPU_WHICH_INTRHANDLER:
2046		case CPU_WHICH_ITHREAD:
2047			error = intr_getaffinity(id, which, mask);
2048			break;
2049		case CPU_WHICH_DOMAIN:
2050			if (id < 0 || id >= MAXMEMDOM)
2051				error = ESRCH;
2052			else
2053				CPU_COPY(&cpuset_domain[id], mask);
2054			break;
2055		}
2056		break;
2057	default:
2058		error = EINVAL;
2059		break;
2060	}
2061	if (set)
2062		cpuset_rel(set);
2063	if (p)
2064		PROC_UNLOCK(p);
2065	if (error == 0) {
2066		if (cpusetsize < howmany(CPU_FLS(mask), NBBY))
2067			return (ERANGE);
2068#ifdef KTRACE
2069		if (KTRPOINT(td, KTR_STRUCT))
2070			ktrcpuset(mask, cpusetsize);
2071#endif
2072	}
2073	return (error);
2074}
2075
2076int
2077user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
2078    id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb)
2079{
2080	cpuset_t *mask;
2081	size_t size;
2082	int error;
2083
2084	mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
2085	size = min(cpusetsize, sizeof(cpuset_t));
2086	error = kern_cpuset_getaffinity(td, level, which, id, size, mask);
2087	if (error == 0) {
2088		error = cb->cpuset_copyout(mask, maskp, size);
2089		if (error != 0)
2090			goto out;
2091		if (cpusetsize > size) {
2092			char *end;
2093			char *cp;
2094			int rv;
2095
2096			end = cp = (char *)&maskp->__bits;
2097			end += cpusetsize;
2098			cp += size;
2099			while (cp != end) {
2100				rv = subyte(cp, 0);
2101				if (rv == -1) {
2102					error = EFAULT;
2103					goto out;
2104				}
2105				cp++;
2106			}
2107		}
2108	}
2109out:
2110	free(mask, M_TEMP);
2111	return (error);
2112}
2113
2114#ifndef _SYS_SYSPROTO_H_
2115struct cpuset_setaffinity_args {
2116	cpulevel_t	level;
2117	cpuwhich_t	which;
2118	id_t		id;
2119	size_t		cpusetsize;
2120	const cpuset_t	*mask;
2121};
2122#endif
2123int
2124sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
2125{
2126
2127	return (user_cpuset_setaffinity(td, uap->level, uap->which,
2128	    uap->id, uap->cpusetsize, uap->mask, &copy_set));
2129}
2130
2131int
2132kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
2133    id_t id, cpuset_t *mask)
2134{
2135	struct cpuset *nset;
2136	struct cpuset *set;
2137	struct thread *ttd;
2138	struct proc *p;
2139	int error;
2140
2141#ifdef KTRACE
2142	if (KTRPOINT(td, KTR_STRUCT))
2143		ktrcpuset(mask, sizeof(cpuset_t));
2144#endif
2145	error = cpuset_check_capabilities(td, level, which, id);
2146	if (error != 0)
2147		return (error);
2148	if (CPU_EMPTY(mask))
2149		return (EDEADLK);
2150	switch (level) {
2151	case CPU_LEVEL_ROOT:
2152	case CPU_LEVEL_CPUSET:
2153		error = cpuset_which(which, id, &p, &ttd, &set);
2154		if (error)
2155			break;
2156		switch (which) {
2157		case CPU_WHICH_TID:
2158		case CPU_WHICH_PID:
2159		case CPU_WHICH_TIDPID:
2160			thread_lock(ttd);
2161			set = cpuset_ref(ttd->td_cpuset);
2162			thread_unlock(ttd);
2163			PROC_UNLOCK(p);
2164			break;
2165		case CPU_WHICH_CPUSET:
2166		case CPU_WHICH_JAIL:
2167			break;
2168		case CPU_WHICH_IRQ:
2169		case CPU_WHICH_INTRHANDLER:
2170		case CPU_WHICH_ITHREAD:
2171		case CPU_WHICH_DOMAIN:
2172			return (EINVAL);
2173		}
2174		if (level == CPU_LEVEL_ROOT)
2175			nset = cpuset_refroot(set);
2176		else
2177			nset = cpuset_refbase(set);
2178		error = cpuset_modify(nset, mask);
2179		cpuset_rel(nset);
2180		cpuset_rel(set);
2181		break;
2182	case CPU_LEVEL_WHICH:
2183		switch (which) {
2184		case CPU_WHICH_TID:
2185			error = cpuset_setthread(id, mask);
2186			break;
2187		case CPU_WHICH_PID:
2188			error = cpuset_setproc(id, NULL, mask, NULL, false);
2189			break;
2190		case CPU_WHICH_TIDPID:
2191			if (id > PID_MAX || id == -1)
2192				error = cpuset_setthread(id, mask);
2193			else
2194				error = cpuset_setproc(id, NULL, mask, NULL,
2195				    false);
2196			break;
2197		case CPU_WHICH_CPUSET:
2198		case CPU_WHICH_JAIL:
2199			error = cpuset_which(which, id, &p, &ttd, &set);
2200			if (error == 0) {
2201				error = cpuset_modify(set, mask);
2202				cpuset_rel(set);
2203			}
2204			break;
2205		case CPU_WHICH_IRQ:
2206		case CPU_WHICH_INTRHANDLER:
2207		case CPU_WHICH_ITHREAD:
2208			error = intr_setaffinity(id, which, mask);
2209			break;
2210		default:
2211			error = EINVAL;
2212			break;
2213		}
2214		break;
2215	default:
2216		error = EINVAL;
2217		break;
2218	}
2219	return (error);
2220}
2221
2222int
2223user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
2224    id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb)
2225{
2226	cpuset_t *mask;
2227	int error;
2228	size_t size;
2229
2230	size = min(cpusetsize, sizeof(cpuset_t));
2231	mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
2232	error = cb->cpuset_copyin(maskp, mask, size);
2233	if (error)
2234		goto out;
2235	/*
2236	 * Verify that no high bits are set.
2237	 */
2238	if (cpusetsize > sizeof(cpuset_t)) {
2239		const char *end, *cp;
2240		int val;
2241		end = cp = (const char *)&maskp->__bits;
2242		end += cpusetsize;
2243		cp += sizeof(cpuset_t);
2244
2245		while (cp != end) {
2246			val = fubyte(cp);
2247			if (val == -1) {
2248				error = EFAULT;
2249				goto out;
2250			}
2251			if (val != 0) {
2252				error = EINVAL;
2253				goto out;
2254			}
2255			cp++;
2256		}
2257	}
2258	error = kern_cpuset_setaffinity(td, level, which, id, mask);
2259
2260out:
2261	free(mask, M_TEMP);
2262	return (error);
2263}
2264
2265#ifndef _SYS_SYSPROTO_H_
2266struct cpuset_getdomain_args {
2267	cpulevel_t	level;
2268	cpuwhich_t	which;
2269	id_t		id;
2270	size_t		domainsetsize;
2271	domainset_t	*mask;
2272	int 		*policy;
2273};
2274#endif
2275int
2276sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap)
2277{
2278
2279	return (kern_cpuset_getdomain(td, uap->level, uap->which,
2280	    uap->id, uap->domainsetsize, uap->mask, uap->policy, &copy_set));
2281}
2282
2283int
2284kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2285    id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp,
2286    const struct cpuset_copy_cb *cb)
2287{
2288	struct domainset outset;
2289	struct thread *ttd;
2290	struct cpuset *nset;
2291	struct cpuset *set;
2292	struct domainset *dset;
2293	struct proc *p;
2294	domainset_t *mask;
2295	int error;
2296
2297	if (domainsetsize < sizeof(domainset_t) ||
2298	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2299		return (ERANGE);
2300	error = cpuset_check_capabilities(td, level, which, id);
2301	if (error != 0)
2302		return (error);
2303	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2304	bzero(&outset, sizeof(outset));
2305	error = cpuset_which2(&which, id, &p, &ttd, &set);
2306	if (error)
2307		goto out;
2308	switch (level) {
2309	case CPU_LEVEL_ROOT:
2310	case CPU_LEVEL_CPUSET:
2311		switch (which) {
2312		case CPU_WHICH_TID:
2313		case CPU_WHICH_PID:
2314			thread_lock(ttd);
2315			set = cpuset_ref(ttd->td_cpuset);
2316			thread_unlock(ttd);
2317			break;
2318		case CPU_WHICH_CPUSET:
2319		case CPU_WHICH_JAIL:
2320			break;
2321		case CPU_WHICH_IRQ:
2322		case CPU_WHICH_INTRHANDLER:
2323		case CPU_WHICH_ITHREAD:
2324		case CPU_WHICH_DOMAIN:
2325			error = EINVAL;
2326			goto out;
2327		}
2328		if (level == CPU_LEVEL_ROOT)
2329			nset = cpuset_refroot(set);
2330		else
2331			nset = cpuset_refbase(set);
2332		domainset_copy(nset->cs_domain, &outset);
2333		cpuset_rel(nset);
2334		break;
2335	case CPU_LEVEL_WHICH:
2336		switch (which) {
2337		case CPU_WHICH_TID:
2338			thread_lock(ttd);
2339			domainset_copy(ttd->td_cpuset->cs_domain, &outset);
2340			thread_unlock(ttd);
2341			break;
2342		case CPU_WHICH_PID:
2343			FOREACH_THREAD_IN_PROC(p, ttd) {
2344				thread_lock(ttd);
2345				dset = ttd->td_cpuset->cs_domain;
2346				/* Show all domains in the proc. */
2347				DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask);
2348				/* Last policy wins. */
2349				outset.ds_policy = dset->ds_policy;
2350				outset.ds_prefer = dset->ds_prefer;
2351				thread_unlock(ttd);
2352			}
2353			break;
2354		case CPU_WHICH_CPUSET:
2355		case CPU_WHICH_JAIL:
2356			domainset_copy(set->cs_domain, &outset);
2357			break;
2358		case CPU_WHICH_IRQ:
2359		case CPU_WHICH_INTRHANDLER:
2360		case CPU_WHICH_ITHREAD:
2361		case CPU_WHICH_DOMAIN:
2362			error = EINVAL;
2363			break;
2364		}
2365		break;
2366	default:
2367		error = EINVAL;
2368		break;
2369	}
2370	if (set)
2371		cpuset_rel(set);
2372	if (p)
2373		PROC_UNLOCK(p);
2374	/*
2375	 * Translate prefer into a set containing only the preferred domain,
2376	 * not the entire fallback set.
2377	 */
2378	if (outset.ds_policy == DOMAINSET_POLICY_PREFER) {
2379		DOMAINSET_ZERO(&outset.ds_mask);
2380		DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask);
2381	}
2382	DOMAINSET_COPY(&outset.ds_mask, mask);
2383	if (error == 0)
2384		error = cb->cpuset_copyout(mask, maskp, domainsetsize);
2385	if (error == 0)
2386		if (suword32(policyp, outset.ds_policy) != 0)
2387			error = EFAULT;
2388out:
2389	free(mask, M_TEMP);
2390	return (error);
2391}
2392
2393#ifndef _SYS_SYSPROTO_H_
2394struct cpuset_setdomain_args {
2395	cpulevel_t	level;
2396	cpuwhich_t	which;
2397	id_t		id;
2398	size_t		domainsetsize;
2399	domainset_t	*mask;
2400	int 		policy;
2401};
2402#endif
2403int
2404sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap)
2405{
2406
2407	return (kern_cpuset_setdomain(td, uap->level, uap->which,
2408	    uap->id, uap->domainsetsize, uap->mask, uap->policy, &copy_set));
2409}
2410
2411int
2412kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2413    id_t id, size_t domainsetsize, const domainset_t *maskp, int policy,
2414    const struct cpuset_copy_cb *cb)
2415{
2416	struct cpuset *nset;
2417	struct cpuset *set;
2418	struct thread *ttd;
2419	struct proc *p;
2420	struct domainset domain;
2421	domainset_t *mask;
2422	int error;
2423
2424	if (domainsetsize < sizeof(domainset_t) ||
2425	    domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2426		return (ERANGE);
2427	if (policy <= DOMAINSET_POLICY_INVALID ||
2428	    policy > DOMAINSET_POLICY_MAX)
2429		return (EINVAL);
2430	error = cpuset_check_capabilities(td, level, which, id);
2431	if (error != 0)
2432		return (error);
2433	memset(&domain, 0, sizeof(domain));
2434	mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2435	error = cb->cpuset_copyin(maskp, mask, domainsetsize);
2436	if (error)
2437		goto out;
2438	/*
2439	 * Verify that no high bits are set.
2440	 */
2441	if (domainsetsize > sizeof(domainset_t)) {
2442		char *end;
2443		char *cp;
2444
2445		end = cp = (char *)&mask->__bits;
2446		end += domainsetsize;
2447		cp += sizeof(domainset_t);
2448		while (cp != end)
2449			if (*cp++ != 0) {
2450				error = EINVAL;
2451				goto out;
2452			}
2453	}
2454	if (DOMAINSET_EMPTY(mask)) {
2455		error = EDEADLK;
2456		goto out;
2457	}
2458	DOMAINSET_COPY(mask, &domain.ds_mask);
2459	domain.ds_policy = policy;
2460
2461	/*
2462	 * Sanitize the provided mask.
2463	 */
2464	if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) {
2465		error = EINVAL;
2466		goto out;
2467	}
2468
2469	/* Translate preferred policy into a mask and fallback. */
2470	if (policy == DOMAINSET_POLICY_PREFER) {
2471		/* Only support a single preferred domain. */
2472		if (DOMAINSET_COUNT(&domain.ds_mask) != 1) {
2473			error = EINVAL;
2474			goto out;
2475		}
2476		domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1;
2477		/* This will be constrained by domainset_shadow(). */
2478		DOMAINSET_COPY(&all_domains, &domain.ds_mask);
2479	}
2480
2481	/*
2482	 * When given an impossible policy, fall back to interleaving
2483	 * across all domains.
2484	 */
2485	if (domainset_empty_vm(&domain))
2486		domainset_copy(domainset2, &domain);
2487
2488	switch (level) {
2489	case CPU_LEVEL_ROOT:
2490	case CPU_LEVEL_CPUSET:
2491		error = cpuset_which(which, id, &p, &ttd, &set);
2492		if (error)
2493			break;
2494		switch (which) {
2495		case CPU_WHICH_TID:
2496		case CPU_WHICH_PID:
2497		case CPU_WHICH_TIDPID:
2498			thread_lock(ttd);
2499			set = cpuset_ref(ttd->td_cpuset);
2500			thread_unlock(ttd);
2501			PROC_UNLOCK(p);
2502			break;
2503		case CPU_WHICH_CPUSET:
2504		case CPU_WHICH_JAIL:
2505			break;
2506		case CPU_WHICH_IRQ:
2507		case CPU_WHICH_INTRHANDLER:
2508		case CPU_WHICH_ITHREAD:
2509		case CPU_WHICH_DOMAIN:
2510			error = EINVAL;
2511			goto out;
2512		}
2513		if (level == CPU_LEVEL_ROOT)
2514			nset = cpuset_refroot(set);
2515		else
2516			nset = cpuset_refbase(set);
2517		error = cpuset_modify_domain(nset, &domain);
2518		cpuset_rel(nset);
2519		cpuset_rel(set);
2520		break;
2521	case CPU_LEVEL_WHICH:
2522		switch (which) {
2523		case CPU_WHICH_TID:
2524			error = _cpuset_setthread(id, NULL, &domain);
2525			break;
2526		case CPU_WHICH_PID:
2527			error = cpuset_setproc(id, NULL, NULL, &domain, false);
2528			break;
2529		case CPU_WHICH_TIDPID:
2530			if (id > PID_MAX || id == -1)
2531				error = _cpuset_setthread(id, NULL, &domain);
2532			else
2533				error = cpuset_setproc(id, NULL, NULL, &domain,
2534				    false);
2535			break;
2536		case CPU_WHICH_CPUSET:
2537		case CPU_WHICH_JAIL:
2538			error = cpuset_which(which, id, &p, &ttd, &set);
2539			if (error == 0) {
2540				error = cpuset_modify_domain(set, &domain);
2541				cpuset_rel(set);
2542			}
2543			break;
2544		case CPU_WHICH_IRQ:
2545		case CPU_WHICH_INTRHANDLER:
2546		case CPU_WHICH_ITHREAD:
2547		default:
2548			error = EINVAL;
2549			break;
2550		}
2551		break;
2552	default:
2553		error = EINVAL;
2554		break;
2555	}
2556out:
2557	free(mask, M_TEMP);
2558	return (error);
2559}
2560
2561#ifdef DDB
2562
2563static void
2564ddb_display_bitset(const struct bitset *set, int size)
2565{
2566	int bit, once;
2567
2568	for (once = 0, bit = 0; bit < size; bit++) {
2569		if (CPU_ISSET(bit, set)) {
2570			if (once == 0) {
2571				db_printf("%d", bit);
2572				once = 1;
2573			} else
2574				db_printf(",%d", bit);
2575		}
2576	}
2577	if (once == 0)
2578		db_printf("<none>");
2579}
2580
2581void
2582ddb_display_cpuset(const cpuset_t *set)
2583{
2584	ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE);
2585}
2586
2587static void
2588ddb_display_domainset(const domainset_t *set)
2589{
2590	ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE);
2591}
2592
2593DB_SHOW_COMMAND_FLAGS(cpusets, db_show_cpusets, DB_CMD_MEMSAFE)
2594{
2595	struct cpuset *set;
2596
2597	LIST_FOREACH(set, &cpuset_ids, cs_link) {
2598		db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
2599		    set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags,
2600		    (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
2601		db_printf("  cpu mask=");
2602		ddb_display_cpuset(&set->cs_mask);
2603		db_printf("\n");
2604		db_printf("  domain policy %d prefer %d mask=",
2605		    set->cs_domain->ds_policy, set->cs_domain->ds_prefer);
2606		ddb_display_domainset(&set->cs_domain->ds_mask);
2607		db_printf("\n");
2608		if (db_pager_quit)
2609			break;
2610	}
2611}
2612
2613DB_SHOW_COMMAND_FLAGS(domainsets, db_show_domainsets, DB_CMD_MEMSAFE)
2614{
2615	struct domainset *set;
2616
2617	LIST_FOREACH(set, &cpuset_domains, ds_link) {
2618		db_printf("set=%p policy %d prefer %d cnt %d\n",
2619		    set, set->ds_policy, set->ds_prefer, set->ds_cnt);
2620		db_printf("  mask =");
2621		ddb_display_domainset(&set->ds_mask);
2622		db_printf("\n");
2623	}
2624}
2625#endif /* DDB */
2626