1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/task_server.h>
31
32#include <kern/sched.h>
33#include <kern/task.h>
34#include <mach/thread_policy.h>
35#include <sys/errno.h>
36#include <sys/resource.h>
37#include <machine/limits.h>
38#include <kern/ledger.h>
39#include <kern/thread_call.h>
40#if CONFIG_EMBEDDED
41#include <kern/kalloc.h>
42#include <sys/errno.h>
43#endif /* CONFIG_EMBEDDED */
44#include <sys/kdebug.h>
45
46#if CONFIG_MEMORYSTATUS
47extern void memorystatus_on_suspend(int pid);
48extern void memorystatus_on_resume(int pid);
49#endif
50
51static int proc_apply_bgtaskpolicy_internal(task_t, int, int);
52static int proc_restore_bgtaskpolicy_internal(task_t, int, int, int);
53static int task_get_cpuusage(task_t task, uint32_t * percentagep, uint64_t * intervalp, uint64_t * deadlinep);
54int task_set_cpuusage(task_t task, uint64_t percentage, uint64_t interval, uint64_t deadline, int scope);
55static int task_clear_cpuusage_locked(task_t task);
56static int task_apply_resource_actions(task_t task, int type);
57static void task_priority(task_t task, integer_t priority, integer_t max_priority);
58static kern_return_t task_role_default_handler(task_t task, task_role_t role);
59void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t param1);
60static int proc_apply_bgthreadpolicy_locked(thread_t thread, int selfset);
61static void restore_bgthreadpolicy_locked(thread_t thread, int selfset, int importance);
62static int proc_get_task_selfdiskacc_internal(task_t task, thread_t thread);
63extern void unthrottle_thread(void * uthread);
64
65#if CONFIG_EMBEDDED
66static void set_thread_appbg(thread_t thread, int setbg,int importance);
67static void apply_bgthreadpolicy_external(thread_t thread);
68static void add_taskwatch_locked(task_t task, task_watch_t * twp);
69static void remove_taskwatch_locked(task_t task, task_watch_t * twp);
70static void task_watch_lock(void);
71static void task_watch_unlock(void);
72static void apply_appstate_watchers(task_t task, int setbg);
73void proc_apply_task_networkbg_internal(void *, thread_t);
74void proc_restore_task_networkbg_internal(void *, thread_t);
75int proc_pid(void * proc);
76
77typedef struct thread_watchlist {
78	thread_t thread;	/* thread being worked on for taskwatch action */
79	int	importance;	/* importance to be restored if thread is being made active */
80} thread_watchlist_t;
81
82#endif /* CONFIG_EMBEDDED */
83
84
85process_policy_t default_task_proc_policy = {0,
86					     0,
87					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
88					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
89					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
90					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
91					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
92					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
93					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
94					    0,
95					    TASK_POLICY_HWACCESS_CPU_ATTRIBUTE_FULLACCESS,
96					    TASK_POLICY_HWACCESS_NET_ATTRIBUTE_FULLACCESS,
97					    TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_FULLACCESS,
98					    TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_NORMAL,
99					    TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL
100					    };
101
102process_policy_t default_task_null_policy = {0,
103					     0,
104					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
105					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
106					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
107					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
108					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
109					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
110					    TASK_POLICY_RESOURCE_ATTRIBUTE_NONE,
111					    0,
112					    TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NONE,
113					    TASK_POLICY_HWACCESS_NET_ATTRIBUTE_NONE,
114					    TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NONE,
115					    TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_NORMAL,
116					    TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE
117					    };
118
119
120
121/*
122 * This routine should always be called with the task lock held.
123 * This routine handles Default operations for TASK_FOREGROUND_APPLICATION
124 * and TASK_BACKGROUND_APPLICATION of task with no special app type.
125 */
126static kern_return_t
127task_role_default_handler(task_t task, task_role_t role)
128{
129	kern_return_t result = KERN_SUCCESS;
130
131	switch (task->role) {
132		case TASK_FOREGROUND_APPLICATION:
133		case TASK_BACKGROUND_APPLICATION:
134		case TASK_UNSPECIFIED:
135			/* if there are no process wide backgrounding ... */
136			if ((task->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) &&
137				(task->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)) {
138					task_priority(task,
139						((role == TASK_FOREGROUND_APPLICATION)?
140						BASEPRI_FOREGROUND: BASEPRI_BACKGROUND),
141						task->max_priority);
142			}
143			task->role = role;
144			break;
145
146		case TASK_CONTROL_APPLICATION:
147		case TASK_RENICED:
148			/* else fail silently */
149			break;
150
151		default:
152			result = KERN_INVALID_ARGUMENT;
153			break;
154	}
155	return(result);
156}
157
158
159kern_return_t
160task_policy_set(
161	task_t					task,
162	task_policy_flavor_t	flavor,
163	task_policy_t			policy_info,
164	mach_msg_type_number_t	count)
165{
166	kern_return_t		result = KERN_SUCCESS;
167	void * bsdinfo = NULL;
168	int setbg = 0;
169
170	if (task == TASK_NULL || task == kernel_task)
171		return (KERN_INVALID_ARGUMENT);
172
173	switch (flavor) {
174
175	case TASK_CATEGORY_POLICY:
176	{
177		task_category_policy_t info = (task_category_policy_t)policy_info;
178
179		if (count < TASK_CATEGORY_POLICY_COUNT)
180			return (KERN_INVALID_ARGUMENT);
181
182#if CONFIG_EMBEDDED
183		if ((current_task() == task) && (info != NULL) &&
184		    (info->role != TASK_THROTTLE_APPLICATION))
185		return (KERN_INVALID_ARGUMENT);
186#endif
187
188		task_lock(task);
189		switch(info->role) {
190			case TASK_FOREGROUND_APPLICATION : {
191				if (task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_NONE) {
192					result = task_role_default_handler(task, info->role);
193				} else {
194					switch (task->ext_appliedstate.apptype) {
195#if !CONFIG_EMBEDDED
196						case PROC_POLICY_OSX_APPTYPE_TAL:
197							/* Move the app to foreground with no DarwinBG */
198							proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_FOREGROUND);
199							bsdinfo = task->bsd_info;
200							setbg = 0;
201							break;
202
203						case PROC_POLICY_OSX_APPTYPE_DBCLIENT:
204							/* reset the apptype so enforcement on background/foregound */
205							task->ext_appliedstate.apptype = PROC_POLICY_OSX_APPTYPE_NONE;
206							/* Internal application and make it foreground pri */
207							proc_restore_bgtaskpolicy_internal(task, 1, 0, BASEPRI_FOREGROUND);
208							bsdinfo = task->bsd_info;
209							setbg = 0;
210							break;
211#endif /* !CONFIG_EMBEDDED */
212
213						default:
214						/* the app types cannot be in CONTROL, GRAPHICS STATE, so it will de default state here */
215							task_priority(task, BASEPRI_FOREGROUND, task->max_priority);
216							break;
217
218					} /* switch (task->ext_appliedstate.apptype) */
219					task->role = TASK_FOREGROUND_APPLICATION;
220				}
221			}
222			break;
223
224			case TASK_BACKGROUND_APPLICATION : {
225				if (task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_NONE) {
226					result = task_role_default_handler(task, info->role);
227				} else  { /* apptype != PROC_POLICY_OSX_APPTYPE_NONE */
228					switch (task->ext_appliedstate.apptype) {
229#if !CONFIG_EMBEDDED
230						case PROC_POLICY_OSX_APPTYPE_TAL:
231							 /* TAL apps will get Darwin backgrounded if not already set */
232							if (task->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) {
233								proc_apply_bgtaskpolicy_internal(task, 1, 1);
234								bsdinfo = task->bsd_info;
235								setbg = 1;
236							}
237							break;
238#endif /* !CONFIG_EMBEDDED */
239						default:
240							task_priority(task, BASEPRI_BACKGROUND, task->max_priority);
241							break;
242					} /* switch (task->ext_appliedstate.apptype) */
243					task->role = TASK_BACKGROUND_APPLICATION;
244				}
245			}
246			break;
247
248		case TASK_CONTROL_APPLICATION:
249			if (task != current_task()||
250					task->sec_token.val[0] != 0)
251				result = KERN_INVALID_ARGUMENT;
252			else {
253				task_priority(task, BASEPRI_CONTROL, task->max_priority);
254				task->role = info->role;
255			}
256			break;
257
258		case TASK_GRAPHICS_SERVER:
259			if (task != current_task() ||
260					task->sec_token.val[0] != 0)
261				result = KERN_INVALID_ARGUMENT;
262			else {
263				task_priority(task, MAXPRI_RESERVED - 3, MAXPRI_RESERVED);
264				task->role = info->role;
265			}
266			break;
267		case TASK_DEFAULT_APPLICATION:
268			task_priority(task, BASEPRI_DEFAULT, MAXPRI_USER);
269			task->role = info->role;
270			break;
271
272		default :
273			result = KERN_INVALID_ARGUMENT;
274			break;
275		} /* switch (info->role) */
276
277		task_unlock(task);
278
279		/* if backgrounding action ... */
280		if (bsdinfo != NULL)
281			proc_set_task_networkbg(bsdinfo, setbg);
282
283		break;
284	}
285
286	default:
287		result = KERN_INVALID_ARGUMENT;
288		break;
289	}
290
291	return (result);
292}
293
294static void
295task_priority(
296	task_t			task,
297	integer_t		priority,
298	integer_t		max_priority)
299{
300	thread_t		thread;
301
302	task->max_priority = max_priority;
303
304	if (priority > task->max_priority)
305		priority = task->max_priority;
306	else
307	if (priority < MINPRI)
308		priority = MINPRI;
309
310	task->priority = priority;
311
312	queue_iterate(&task->threads, thread, thread_t, task_threads) {
313		thread_mtx_lock(thread);
314
315		if (thread->active)
316			thread_task_priority(thread, priority, max_priority);
317
318		thread_mtx_unlock(thread);
319	}
320}
321
322kern_return_t
323task_importance(
324	task_t				task,
325	integer_t			importance)
326{
327	if (task == TASK_NULL || task == kernel_task)
328		return (KERN_INVALID_ARGUMENT);
329
330	task_lock(task);
331
332	if (!task->active) {
333		task_unlock(task);
334
335		return (KERN_TERMINATED);
336	}
337
338	if (task->role >= TASK_CONTROL_APPLICATION) {
339		task_unlock(task);
340
341		return (KERN_INVALID_ARGUMENT);
342	}
343
344	task_priority(task, importance + BASEPRI_DEFAULT, task->max_priority);
345	task->role = TASK_RENICED;
346
347	task_unlock(task);
348
349	return (KERN_SUCCESS);
350}
351
352kern_return_t
353task_policy_get(
354	task_t					task,
355	task_policy_flavor_t	flavor,
356	task_policy_t			policy_info,
357	mach_msg_type_number_t	*count,
358	boolean_t				*get_default)
359{
360	if (task == TASK_NULL || task == kernel_task)
361		return (KERN_INVALID_ARGUMENT);
362
363	switch (flavor) {
364
365	case TASK_CATEGORY_POLICY:
366	{
367		task_category_policy_t		info = (task_category_policy_t)policy_info;
368
369		if (*count < TASK_CATEGORY_POLICY_COUNT)
370			return (KERN_INVALID_ARGUMENT);
371
372		if (*get_default)
373			info->role = TASK_UNSPECIFIED;
374		else {
375			task_lock(task);
376			info->role = task->role;
377			task_unlock(task);
378		}
379		break;
380	}
381
382	default:
383		return (KERN_INVALID_ARGUMENT);
384	}
385
386	return (KERN_SUCCESS);
387}
388
389/* task Darwin BG enforcement/settings related routines */
390int
391proc_get_task_bg_policy(task_t task)
392{
393
394	int selfset = 0;
395	int val = 0;
396
397	if (current_task() == task)
398		selfset = 1;
399
400	if (selfset == 0) {
401		val = task->ext_policystate.hw_bg;
402	} else {
403		val = task->policystate.hw_bg;
404	}
405
406	return(val);
407}
408
409
410int
411proc_get_thread_bg_policy(task_t task, uint64_t tid)
412{
413	int selfset = 0;
414	thread_t self = current_thread();
415	thread_t thread = THREAD_NULL;
416	int val = 0;
417
418	if (tid == self->thread_id)
419		selfset = 1;
420
421	if (selfset == 0)  {
422		task_lock(task);
423		thread = task_findtid(task, tid);
424		if (thread != NULL)
425			val = thread->ext_policystate.hw_bg;
426		task_unlock(task);
427	} else {
428		val = self->policystate.hw_bg;
429	}
430
431	return(val);
432}
433
434int
435proc_get_self_isbackground(void)
436{
437	task_t task = current_task();;
438	thread_t thread = current_thread();
439
440	if ((task->ext_appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) ||
441		(task->appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) ||
442		(thread->ext_appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) ||
443		(thread->appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE))
444			return(1);
445	else
446		return(0);
447
448}
449
450int proc_get_selfthread_isbackground(void)
451{
452	thread_t thread = current_thread();
453
454	if ((thread->ext_appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) ||
455		(thread->appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE))
456			return(1);
457	else
458		return(0);
459}
460
461
462int
463proc_set_bgtaskpolicy(task_t task, int intval)
464{
465
466	int selfset = 0;
467
468	if (current_task() == task)
469		selfset = 1;
470
471	task_lock(task);
472
473	if (selfset == 0) {
474		/* allready set? */
475		if (task->ext_policystate.hw_bg != intval)
476			task->ext_policystate.hw_bg = intval;
477	} else {
478		if (task->policystate.hw_bg != intval)
479			task->policystate.hw_bg = intval;
480	}
481
482	task_unlock(task);
483	return(0);
484}
485
486/* set and apply as well , handles reset of NONUI due to setprio() task app state implmn side effect */
487int
488proc_set_and_apply_bgtaskpolicy(task_t task, int prio)
489{
490	int error = 0;
491
492	if (prio == PRIO_DARWIN_BG) {
493		error = proc_set_bgtaskpolicy(task, TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL);
494		if (error == 0) {
495			error = proc_apply_bgtaskpolicy(task);
496#if CONFIG_EMBEDDED
497			/* XXX: till SB uses newer SPIs */
498			apply_appstate_watchers(task, 1);
499#endif /* CONFIG_EMBEDDED */
500		}
501	} else {
502		error = proc_restore_bgtaskpolicy(task);
503		if (error == 0) {
504			/* since prior impl of non UI was overloaded with bg state, need to reset */
505			error = proc_apply_task_gpuacc(task, TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT);
506#if CONFIG_EMBEDDED
507			/* XXX: till SB uses newer SPIs */
508			apply_appstate_watchers(task, 0);
509#endif /* CONFIG_EMBEDDED */
510		}
511
512	}
513
514	return(error);
515}
516
517
518int
519proc_set_bgthreadpolicy(task_t task, uint64_t tid, int prio)
520{
521	int selfset = 0;
522	thread_t self = current_thread();
523	thread_t thread = THREAD_NULL;
524	int reset;
525
526	if (prio == 0)
527		reset = 1;
528	if (tid == self->thread_id)
529		selfset = 1;
530
531	task_lock(task);
532	if (selfset == 0)  {
533		thread = task_findtid(task, tid);
534		if (thread != NULL)
535			thread->ext_policystate.hw_bg = prio;
536	} else {
537		self->policystate.hw_bg = prio;
538	}
539
540	task_unlock(task);
541
542	return(0);
543}
544
545int
546proc_set_and_apply_bgthreadpolicy(task_t task, uint64_t tid, int prio)
547{
548	int error = 0;
549
550	if (prio == PRIO_DARWIN_BG) {
551		error = proc_set_bgthreadpolicy(task, tid, TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL);
552		if (error == 0)
553			error = proc_apply_bgthreadpolicy(task, tid);
554	} else {
555		error = proc_restore_bgthreadpolicy(task, tid);
556	}
557
558	return(error);
559}
560
561int
562proc_add_bgtaskpolicy(task_t task, int val)
563{
564	int selfset = 0;
565
566	if (current_task() == task)
567		selfset = 1;
568
569	task_lock(task);
570
571	if (selfset == 0) {
572		task->policystate.hw_bg |= val;
573	} else {
574		task->ext_policystate.hw_bg |= val;
575	}
576
577	task_unlock(task);
578	return(0);
579}
580
581int
582proc_add_bgthreadpolicy(task_t task, uint64_t tid, int val)
583{
584	int selfset = 0;
585	thread_t self = current_thread();
586	thread_t thread = THREAD_NULL;
587	int reset;
588
589	if (val == 0)
590		reset = 1;
591	if (tid == self->thread_id)
592		selfset = 1;
593
594	task_lock(task);
595	if (selfset == 0)  {
596		thread = task_findtid(task, tid);
597		if (thread != NULL)
598			thread->ext_policystate.hw_bg |= val;
599	} else {
600		self->policystate.hw_bg |= val;
601	}
602
603	task_unlock(task);
604
605	return(val);
606}
607
608int
609proc_remove_bgtaskpolicy(task_t task, int intval)
610{
611	int selfset = 0;
612
613	if (current_task() == task)
614		selfset = 1;
615
616	task_lock(task);
617
618	if (selfset == 0) {
619		task->policystate.hw_bg &= ~intval;
620	} else {
621		task->ext_policystate.hw_bg &= ~intval;
622	}
623
624	task_unlock(task);
625	return(0);
626}
627
628int
629proc_remove_bgthreadpolicy(task_t task, uint64_t tid, int val)
630{
631	int selfset = 0;
632	thread_t self = current_thread();
633	thread_t thread = THREAD_NULL;
634	int reset;
635
636	if (val == 0)
637		reset = 1;
638	if (tid == self->thread_id)
639		selfset = 1;
640
641	task_lock(task);
642	if (selfset == 0)  {
643		thread = task_findtid(task, tid);
644		if (thread != NULL)
645			thread->ext_policystate.hw_bg &= ~val;
646	} else {
647		self->policystate.hw_bg &= ~val;
648	}
649
650	task_unlock(task);
651
652	return(val);
653}
654
655int
656proc_apply_bgtask_selfpolicy(void)
657{
658	return(proc_apply_bgtaskpolicy(current_task()));
659}
660
661int
662proc_apply_bgtaskpolicy(task_t task)
663{
664	int external = 1;
665
666	if (task == current_task())
667		external = 0;
668	return(proc_apply_bgtaskpolicy_internal(task, 0, external));
669}
670
671int
672proc_apply_bgtaskpolicy_external(task_t task)
673{
674	return(proc_apply_bgtaskpolicy_internal(task, 0, 1));
675}
676
677static int
678proc_apply_bgtaskpolicy_internal(task_t task, int locked, int external)
679{
680
681	if (locked == 0)
682		task_lock(task);
683
684	/* if the process is exiting, no action to be done */
685	if (task->proc_terminate != 0)
686		goto out;
687
688	if (external != 0) {
689		/* allready set? */
690		if (task->ext_appliedstate.hw_bg != task->ext_policystate.hw_bg) {
691			task->ext_appliedstate.hw_bg = task->ext_policystate.hw_bg;
692			task_priority(task, MAXPRI_THROTTLE, MAXPRI_THROTTLE);
693			/* background state applied */
694		}
695	} else {
696		if (task->appliedstate.hw_bg != task->policystate.hw_bg) {
697			task->appliedstate.hw_bg = task->policystate.hw_bg;
698			task_priority(task, MAXPRI_THROTTLE, MAXPRI_THROTTLE);
699		}
700	}
701out:
702	if (locked == 0)
703		task_unlock(task);
704	return(0);
705}
706
707/* apply the self backgrounding even if the thread is not current thread */
708int
709proc_apply_workq_bgthreadpolicy(thread_t thread)
710{
711	int error;
712	task_t wqtask = TASK_NULL;
713
714	if (thread != THREAD_NULL) {
715		wqtask = thread->task;
716		task_lock(wqtask);
717		/* apply the background as selfset internal one */
718		error = proc_apply_bgthreadpolicy_locked(thread, 1);
719		task_unlock(wqtask);
720	} else
721		error = ESRCH;
722
723	return(error);
724}
725
726int
727proc_apply_bgthreadpolicy(task_t task, uint64_t tid)
728{
729	int selfset = 0, error = 0;
730	thread_t self = current_thread();
731	thread_t thread = THREAD_NULL;
732	task_t localtask = TASK_NULL;
733
734	if (tid == self->thread_id) {
735		selfset = 1;
736		localtask = current_task();
737	} else
738		localtask = task;
739
740	task_lock(localtask);
741	if (selfset != 0)  {
742		thread = self;
743	} else {
744		thread = task_findtid(localtask, tid);
745	}
746
747	error = proc_apply_bgthreadpolicy_locked(thread, selfset);
748
749	task_unlock(localtask);
750
751	return(error);
752}
753
754static int
755proc_apply_bgthreadpolicy_locked(thread_t thread, int selfset)
756{
757	int set = 0;
758	thread_precedence_policy_data_t policy;
759
760
761	if (thread != NULL) {
762		/* if the process is exiting, no action to be done */
763		if (thread->task->proc_terminate != 0)
764			goto out;
765
766		if (selfset != 0)  {
767			/* internal application */
768			if (thread->appliedstate.hw_bg != thread->policystate.hw_bg) {
769				thread->appliedstate.hw_bg = thread->policystate.hw_bg;
770				if (thread->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)
771					set = 1;
772
773			}
774		} else {
775			/* external application */
776			if (thread->ext_appliedstate.hw_bg != thread->ext_policystate.hw_bg) {
777				thread->ext_appliedstate.hw_bg = thread->ext_policystate.hw_bg;
778				if (thread->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)
779					set = 1;
780			}
781		}
782
783		if (set != 0) {
784#if CONFIG_EMBEDDED
785		if (thread->task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
786			thread->saved_importance = thread->importance;
787		}
788#endif /* CONFIG_EMBEDDED */
789			/* set thread priority (we did not save previous value) */
790			policy.importance = INT_MIN;
791
792			thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
793                                                   (thread_policy_t)&policy,
794                                                   THREAD_PRECEDENCE_POLICY_COUNT );
795
796		}
797	} else
798		return(ESRCH);
799
800out:
801	return(0);
802}
803
804#if CONFIG_EMBEDDED
805/* set external application of background */
806static void
807apply_bgthreadpolicy_external(thread_t thread)
808{
809int set = 0;
810thread_precedence_policy_data_t policy;
811
812	/* if the process is exiting, no action to be done */
813	if (thread->task->proc_terminate != 0)
814		return;
815
816	thread->ext_policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
817
818	if (thread->ext_appliedstate.hw_bg != thread->ext_policystate.hw_bg) {
819		thread->ext_appliedstate.hw_bg = thread->ext_policystate.hw_bg;
820		if (thread->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)
821			set = 1;
822	}
823
824	if (set != 0) {
825		/* set thread priority (we did not save previous value) */
826		policy.importance = INT_MIN;
827
828		thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
829                                                   (thread_policy_t)&policy,
830                                                   THREAD_PRECEDENCE_POLICY_COUNT );
831	}
832
833}
834#endif /* CONFIG_EMBEDDED */
835
836int
837proc_apply_bgthread_selfpolicy(void)
838{
839	return(proc_apply_bgthreadpolicy(current_task(), current_thread()->thread_id));
840}
841
842
843int
844proc_restore_bgtaskpolicy(task_t task)
845{
846	int external = 1;
847
848	if (current_task() == task)
849		external = 0;
850	return(proc_restore_bgtaskpolicy_internal(task, 0, external, BASEPRI_DEFAULT));
851}
852
853static int
854proc_restore_bgtaskpolicy_internal(task_t task, int locked, int external, int pri)
855{
856	if (locked == 0)
857		task_lock(task);
858
859	/* if the process is exiting, no action to be done */
860	if (task->proc_terminate != 0)
861		goto out;
862
863	if (external != 0) {
864		task->ext_appliedstate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE;
865		/* self BG in flight? */
866		if (task->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) {
867			task_priority(task, pri, MAXPRI_USER);
868#if CONFIG_EMBEDDED
869			task->role = TASK_DEFAULT_APPLICATION;
870#endif /* CONFIG_EMBEDDED */
871		}
872	 } else {
873		task->appliedstate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE;
874		/* external BG in flight? */
875		if (task->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE) {
876			task_priority(task, pri, MAXPRI_USER);
877#if CONFIG_EMBEDDED
878			task->role = TASK_DEFAULT_APPLICATION;
879#endif /* CONFIG_EMBEDDED */
880		}
881	}
882out:
883	if (locked == 0)
884		task_unlock(task);
885
886	return(0);
887}
888
889/* restore the self backgrounding even if the thread is not current thread */
890int
891proc_restore_workq_bgthreadpolicy(thread_t thread)
892{
893	int error = 0;
894	task_t wqtask = TASK_NULL;
895	int importance = 0;
896
897	if (thread != THREAD_NULL) {
898		wqtask = thread->task;
899		task_lock(wqtask);
900		/* remove the background and restore default importance as self(internal) removal */
901#if CONFIG_EMBEDDED
902		if (thread->task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
903			/* restore prev set importnace */
904			importance = thread->saved_importance;
905			thread->saved_importance = 0;
906		}
907#endif /* CONFIG_EMBEDDED */
908		restore_bgthreadpolicy_locked(thread, 1, importance);
909		task_unlock(wqtask);
910	} else
911		error = ESRCH;
912
913	return(error);
914}
915
916int
917proc_restore_bgthread_selfpolicy(void)
918{
919	return(proc_restore_bgthreadpolicy(current_task(), thread_tid(current_thread())));
920}
921
922int
923proc_restore_bgthreadpolicy(task_t task, uint64_t tid)
924{
925
926	int selfset = 0;
927	thread_t self = current_thread();
928	thread_t thread = THREAD_NULL;
929	int importance = 0;
930
931	if (tid == self->thread_id)
932		selfset = 1;
933
934	task_lock(task);
935	if (selfset == 0)  {
936		thread = task_findtid(task, tid);
937	} else {
938		thread = self;
939	}
940
941	if (thread != NULL) {
942#if CONFIG_EMBEDDED
943		if (thread->task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
944			/* restore prev set importnace */
945			importance = thread->saved_importance;
946			thread->saved_importance = 0;
947		}
948#endif /* CONFIG_EMBEDDED */
949		restore_bgthreadpolicy_locked(thread, selfset, importance);
950	}
951	task_unlock(task);
952
953	if (thread != NULL)
954		return(0);
955	else
956		return(1);
957}
958
959static void
960restore_bgthreadpolicy_locked(thread_t thread, int selfset, int importance)
961{
962	thread_precedence_policy_data_t policy;
963	int reset = 0;
964
965	if (thread != NULL) {
966		/* if the process is exiting, no action to be done */
967		if (thread->task->proc_terminate != 0)
968			return;
969
970		if (selfset != 0)  {
971			thread->appliedstate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE;
972			/* external BG in flight? */
973			if (thread->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)
974					reset = 1;
975
976		} else {
977			thread->ext_appliedstate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE;
978			/* self BG in flight? */
979			if (thread->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_NONE)
980					reset = 1;
981		}
982
983		if (reset != 0) {
984			/* reset thread priority (we did not save previous value) */
985			policy.importance = importance;
986			thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
987                                                   (thread_policy_t)&policy,
988                                                   THREAD_PRECEDENCE_POLICY_COUNT );
989		}
990	}
991}
992
993void
994#if CONFIG_EMBEDDED
995proc_set_task_apptype(task_t task, int type, thread_t thread)
996#else
997proc_set_task_apptype(task_t task, int type, __unused thread_t thread)
998#endif
999{
1000#if CONFIG_EMBEDDED
1001	thread_t th = THREAD_NULL;
1002#endif /* CONFIG_EMBEDDED */
1003
1004	switch (type) {
1005#if CONFIG_EMBEDDED
1006		case PROC_POLICY_IOS_RESV1_APPTYPE:
1007			task->ext_policystate.apptype = type;
1008			task->policystate.apptype = type;
1009			proc_apply_bgtaskpolicy_external(task);
1010			/* indicate that BG is set and next foreground needs to reset */
1011			task->ext_appliedstate.apptype = type;
1012			break;
1013
1014		case PROC_POLICY_IOS_APPLE_DAEMON:
1015			task->ext_policystate.apptype = type;
1016			task->policystate.apptype = type;
1017			task->ext_appliedstate.apptype = type;
1018			/* posix spawn will already have thread created, so backround it */
1019			if (thread == NULL)
1020				th = current_thread();
1021			else
1022				th = thread;
1023			if (th->appliedstate.hw_bg != TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL) {
1024				/* apply self backgrounding if not already set */
1025				task_lock(th->task);
1026				proc_apply_bgthreadpolicy_locked(th, 1);
1027				task_unlock(th->task);
1028			}
1029			break;
1030
1031		case PROC_POLICY_IOS_APPTYPE:
1032			task->ext_policystate.apptype = type;
1033			task->policystate.apptype = type;
1034			break;
1035		case PROC_POLICY_IOS_NONUITYPE:
1036			task->ext_policystate.apptype = type;
1037			task->policystate.apptype = type;
1038			/* set to deny access to gpu */
1039			task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1040			task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1041			break;
1042#else /* CONFIG_EMBEDDED */
1043		case PROC_POLICY_OSX_APPTYPE_TAL:
1044			task->ext_policystate.apptype = type;
1045			task->policystate.apptype = type;
1046			proc_apply_bgtaskpolicy_external(task);
1047			/* indicate that BG is set and next foreground needs to reset */
1048			task->ext_appliedstate.apptype = type;
1049			break;
1050
1051		case PROC_POLICY_OSX_APPTYPE_DBCLIENT:
1052			task->ext_policystate.apptype = type;
1053			task->policystate.apptype = type;
1054			proc_apply_bgtaskpolicy_internal(task, 0, 0);
1055			break;
1056
1057#endif /* CONFIG_EMBEDDED */
1058
1059		default:
1060			break;
1061	}
1062}
1063
1064/* update the darwin backdground action state in the flags field for libproc */
1065#define PROC_FLAG_DARWINBG      0x8000  /* process in darwin background */
1066#define PROC_FLAG_EXT_DARWINBG  0x10000 /* process in darwin background - external enforcement */
1067#define PROC_FLAG_IOS_APPLEDAEMON  0x20000 /* process is apple ios daemon */
1068
1069int
1070proc_get_darwinbgstate(task_t task, uint32_t * flagsp)
1071{
1072	if (task->ext_appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL){
1073		*flagsp |= PROC_FLAG_EXT_DARWINBG;
1074	}
1075	if (task->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL){
1076		*flagsp |= PROC_FLAG_DARWINBG;
1077	}
1078#if CONFIG_EMBEDDED
1079	if (task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
1080		*flagsp |= PROC_FLAG_IOS_APPLEDAEMON;
1081	}
1082#endif /* CONFIG_EMBEDDED */
1083
1084	return(0);
1085}
1086
1087/*
1088 * HW disk access realted routines, they need to return
1089 * IOPOL_XXX equivalents for spec_xxx/throttle updates.
1090 */
1091
1092int
1093proc_get_task_disacc(task_t task)
1094{
1095#if CONFIG_EMBEDDED
1096	if ((task->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1097		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1098#else /* CONFIG_EMBEDDED */
1099	if ((task->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0) {
1100		/* if it is a TAL or DBClient and not self throttled, return Utility */
1101		if ((task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_TAL) || (task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_DBCLIENT)) {
1102			/* any setting for DBG, we need to honor that */
1103			if ((task->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE) &&
1104				((task->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE)!= 0) &&
1105				(task->appliedstate.hw_disk !=  TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE)) {
1106				return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_UTILITY);
1107			}  else
1108				return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1109		 } else
1110			return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1111	}
1112#endif /* CONFIG_EMBEDDED */
1113	if (task->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1114		return(task->ext_appliedstate.hw_disk);
1115	if ((task->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1116		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1117	if (task->appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1118		return(task->appliedstate.hw_disk);
1119	return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS);
1120}
1121
1122int
1123proc_get_task_selfdiskacc_internal(task_t task, thread_t thread)
1124{
1125	/* if the task is marked for proc_terminate, no throttling for it */
1126	if (task->proc_terminate != 0)
1127		goto out;
1128	/*
1129	 * As per defined iopolicysys behavior, thread trumps task.
1130	 * Do we need to follow that for external enforcements of BG or hw access?
1131	 * Status quo for now..
1132	 */
1133
1134	if((thread->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1135		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1136	if (thread->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1137		return(thread->ext_appliedstate.hw_disk);
1138	if((thread->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1139		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1140	if (thread->appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1141		return(thread->appliedstate.hw_disk);
1142
1143#if CONFIG_EMBEDDED
1144	if ((task->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1145		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1146#else /* CONFIG_EMBEDDED */
1147	if ((task->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0) {
1148		/* if it is a TAL or DBClient and not self throttled, return Utility */
1149		if ((task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_TAL) || (task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_DBCLIENT)) {
1150			/* any setting for DBG, we need to honor that */
1151			if ((task->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE) &&
1152				((task->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE)!= 0) &&
1153				(task->appliedstate.hw_disk !=  TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE)) {
1154				return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_UTILITY);
1155			}  else
1156				return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1157		 } else
1158			return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1159	}
1160#endif /* CONFIG_EMBEDDED */
1161	if (task->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1162		return(task->ext_appliedstate.hw_disk);
1163	if ((task->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1164		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1165	if (task->appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1166		return(task->appliedstate.hw_disk);
1167out:
1168	return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS);
1169}
1170
1171
1172int
1173proc_get_task_selfdiskacc(void)
1174{
1175	return(proc_get_task_selfdiskacc_internal(current_task(), current_thread()));
1176}
1177
1178
1179int
1180proc_get_diskacc(thread_t thread)
1181{
1182	return(proc_get_task_selfdiskacc_internal(thread->task, thread));
1183}
1184
1185
1186int
1187proc_get_thread_selfdiskacc(void)
1188{
1189	thread_t thread = current_thread();
1190
1191	if((thread->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1192		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1193	if (thread->ext_appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1194		return(thread->ext_appliedstate.hw_disk);
1195	if((thread->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_DISKTHROTTLE) != 0)
1196		return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_THROTTLE);
1197	if (thread->appliedstate.hw_disk != TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS)
1198		return(thread->appliedstate.hw_disk);
1199	return(TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS);
1200}
1201
1202int
1203proc_apply_task_diskacc(task_t task, int policy)
1204{
1205	task_t self = current_task();
1206
1207	task_lock(task);
1208	if (task ==  self) {
1209		task->appliedstate.hw_disk = policy;
1210		task->policystate.hw_disk = policy;
1211	} else {
1212		task->ext_appliedstate.hw_disk = policy;
1213		task->ext_policystate.hw_disk = policy;
1214	}
1215	task_unlock(task);
1216	return(0);
1217}
1218
1219int
1220proc_apply_thread_diskacc(task_t task, uint64_t tid, int policy)
1221{
1222	thread_t thread;
1223
1224	if (tid == TID_NULL) {
1225		thread = current_thread();
1226		proc_apply_thread_selfdiskacc(policy);
1227	} else {
1228		task_lock(task);
1229		thread = task_findtid(task, tid);
1230		if (thread != NULL) {
1231			thread->ext_appliedstate.hw_disk = policy;
1232			thread->ext_policystate.hw_disk = policy;
1233		}
1234		task_unlock(task);
1235	}
1236	if (thread != NULL)
1237		return(0);
1238	else
1239		return(0);
1240}
1241
1242void
1243proc_task_remove_throttle(task_t task)
1244{
1245	thread_t	thread;
1246	int importance = 0;
1247
1248	task_lock(task);
1249
1250
1251	/* remove processwide internal DBG applicationn */
1252	proc_restore_bgtaskpolicy_internal(task, 1, 0, BASEPRI_DEFAULT);
1253	/* remove processwide external DBG applicationn */
1254	proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_DEFAULT);
1255
1256	for (thread  = (thread_t)queue_first(&task->threads);
1257			!queue_end(&task->threads, (queue_entry_t)thread); ) {
1258#if CONFIG_EMBEDDED
1259		if (thread->task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
1260			/* restore prev set importnace */
1261			importance = thread->saved_importance;
1262			thread->saved_importance = 0;
1263		}
1264#endif /* CONFIG_EMBEDDED */
1265		/* remove thread level internal DBG application */
1266		restore_bgthreadpolicy_locked(thread, 1, importance);
1267		/* remove thread level external DBG application */
1268		restore_bgthreadpolicy_locked(thread, 0, importance);
1269		/* reset thread io policy */
1270		thread->ext_appliedstate.hw_disk = TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS;
1271		thread->appliedstate.hw_disk = TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS;
1272		unthrottle_thread(thread->uthread);
1273		thread = (thread_t)queue_next(&thread->task_threads);
1274	}
1275
1276	/* reset task iopolicy */
1277	task->ext_appliedstate.hw_disk = TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS;
1278	task->appliedstate.hw_disk = TASK_POLICY_HWACCESS_DISK_ATTRIBUTE_FULLACCESS;
1279	task->proc_terminate = 1;
1280
1281	task_unlock(task);
1282}
1283
1284
1285
1286int
1287proc_apply_thread_selfdiskacc(int policy)
1288{
1289	task_t task = current_task();
1290	thread_t thread = current_thread();
1291
1292	task_lock(task);
1293	thread->appliedstate.hw_disk = policy;
1294	thread->policystate.hw_disk = policy;
1295	task_unlock(task);
1296	return(0);
1297}
1298
1299int
1300proc_denyinherit_policy(__unused task_t task)
1301{
1302	return(0);
1303}
1304
1305int
1306proc_denyselfset_policy(__unused task_t task)
1307{
1308	return(0);
1309}
1310
1311/* HW GPU access related routines */
1312int
1313proc_get_task_selfgpuacc_deny(void)
1314{
1315	task_t task = current_task();
1316#ifdef NOTYET
1317	thread_t thread = current_thread();
1318#endif /* NOTYET */
1319
1320	if (((task->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_NOGPU) != 0) || (task->ext_appliedstate.hw_gpu == TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS))
1321		return(TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS);
1322	if (((task->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_NOGPU) != 0) || (task->appliedstate.hw_gpu == TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS))
1323		return(TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS);
1324#ifdef NOTYET
1325	/*
1326	 * Since background dispatch items run in a thread can also be
1327	 * denied access, we need to make sure there are no unintended
1328	 * consequences of background dispatch usage. So till this is
1329	 * hashed out, disable thread level checking.
1330	 */
1331	if (((thread->ext_appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_NOGPU) != 0) || (thread->ext_appliedstate.hw_gpu == TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS))
1332		return(TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS);
1333	if (((thread->appliedstate.hw_bg & TASK_POLICY_BACKGROUND_ATTRIBUTE_NOGPU) != 0) || (thread->appliedstate.hw_gpu == TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS))
1334		return(TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS);
1335
1336#endif /* NOTYET */
1337	return(TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_FULLACCESS);
1338}
1339
1340int
1341proc_apply_task_gpuacc(task_t task, int policy)
1342{
1343
1344	task_t self = current_task();
1345
1346	task_lock(task);
1347	if (task ==  self) {
1348		task->appliedstate.hw_gpu = policy;
1349		task->policystate.hw_gpu = policy;
1350	} else {
1351		task->ext_appliedstate.hw_gpu = policy;
1352		task->ext_policystate.hw_gpu = policy;
1353	}
1354	task_unlock(task);
1355
1356	return(0);
1357}
1358
1359/* Resource usage , CPU realted routines */
1360int
1361proc_get_task_ruse_cpu(task_t task, uint32_t * policyp, uint32_t * percentagep, uint64_t * intervalp, uint64_t * deadlinep)
1362{
1363
1364	int error = 0;
1365
1366	task_lock(task);
1367	if (task != current_task()) {
1368		*policyp = task->ext_policystate.ru_cpu;
1369	} else {
1370		*policyp = task->policystate.ru_cpu;
1371	}
1372
1373	error = task_get_cpuusage(task, percentagep, intervalp, deadlinep);
1374
1375	return(error);
1376}
1377
1378/*
1379 * Currently supported configurations for CPU limits.
1380 *
1381 * 					Deadline-based CPU limit    	Percentage-based CPU limit
1382 * PROC_POLICY_RSRCACT_THROTTLE		ENOTSUP				Task-wide scope only
1383 * PROC_POLICY_RSRCACT_SUSPEND		Task-wide scope only		ENOTSUP
1384 * PROC_POLICY_RSRCACT_TERMINATE	Task-wide scope only		ENOTSUP
1385 * PROC_POLICY_RSRCACT_NOTIFY_KQ	Task-wide scope only		ENOTSUP
1386 * PROC_POLICY_RSRCACT_NOTIFY_EXC	ENOTSUP				Per-thread scope only
1387 *
1388 * A deadline-based CPU limit is actually a simple wallclock timer - the requested action is performed
1389 * after the specified amount of wallclock time has elapsed.
1390 *
1391 * A percentage-based CPU limit performs the requested action after the specified amount of actual CPU time
1392 * has been consumed -- regardless of how much wallclock time has elapsed -- by either the task as an
1393 * aggregate entity (so-called "Task-wide" or "Proc-wide" scope, whereby the CPU time consumed by all threads
1394 * in the task are added together), or by any one thread in the task (so-called "per-thread" scope).
1395 *
1396 * We support either deadline != 0 OR percentage != 0, but not both. The original intention in having them
1397 * share an API was to use actual CPU time as the basis of the deadline-based limit (as in: perform an action
1398 * after I have used some amount of CPU time; this is different than the recurring percentage/interval model)
1399 * but the potential consumer of the API at the time was insisting on wallclock time instead.
1400 *
1401 * Currently, requesting notification via an exception is the only way to get per-thread scope for a
1402 * CPU limit. All other types of notifications force task-wide scope for the limit.
1403 */
1404int
1405proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint32_t percentage, uint64_t interval, uint64_t deadline)
1406{
1407	int error = 0;
1408	int scope;
1409
1410 	/*
1411 	 * Enforce the matrix of supported configurations for policy, percentage, and deadline.
1412 	 */
1413 	switch (policy) {
1414 	// If no policy is explicitly given, the default is to throttle.
1415 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE:
1416	case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE:
1417		if (deadline != 0)
1418			return (ENOTSUP);
1419		scope = TASK_RUSECPU_FLAGS_PROC_LIMIT;
1420		break;
1421	case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND:
1422	case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE:
1423	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ:
1424		if (percentage != 0)
1425			return (ENOTSUP);
1426		scope = TASK_RUSECPU_FLAGS_DEADLINE;
1427		break;
1428 	case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC:
1429		if (deadline != 0)
1430			return (ENOTSUP);
1431		scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
1432		break;
1433	default:
1434		return (EINVAL);
1435	}
1436
1437	task_lock(task);
1438	if (task != current_task()) {
1439		task->ext_policystate.ru_cpu = policy;
1440	} else {
1441		task->policystate.ru_cpu = policy;
1442	}
1443	error = task_set_cpuusage(task, percentage, interval, deadline, scope);
1444	task_unlock(task);
1445	return(error);
1446}
1447
1448int
1449proc_clear_task_ruse_cpu(task_t task)
1450{
1451	int error = 0;
1452	int action;
1453	void * bsdinfo = NULL;
1454
1455	task_lock(task);
1456	if (task != current_task()) {
1457		task->ext_policystate.ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
1458	} else {
1459		task->policystate.ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT;
1460	}
1461
1462	error = task_clear_cpuusage_locked(task);
1463	if (error != 0)
1464		goto out;
1465
1466	action = task->ext_appliedstate.ru_cpu;
1467	if (task->ext_appliedstate.ru_cpu != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
1468		/* reset action */
1469		task->ext_appliedstate.ru_cpu = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
1470	}
1471	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
1472		bsdinfo = task->bsd_info;
1473		task_unlock(task);
1474		proc_restore_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
1475		goto out1;
1476	}
1477
1478out:
1479	task_unlock(task);
1480out1:
1481	return(error);
1482
1483}
1484
1485/* used to apply resource limit related actions */
1486static int
1487task_apply_resource_actions(task_t task, int type)
1488{
1489	int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE;
1490	void * bsdinfo = NULL;
1491
1492	switch (type) {
1493		case TASK_POLICY_CPU_RESOURCE_USAGE:
1494			break;
1495		case TASK_POLICY_WIREDMEM_RESOURCE_USAGE:
1496		case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE:
1497		case TASK_POLICY_DISK_RESOURCE_USAGE:
1498		case TASK_POLICY_NETWORK_RESOURCE_USAGE:
1499		case TASK_POLICY_POWER_RESOURCE_USAGE:
1500			return(0);
1501
1502		default:
1503			return(1);
1504	};
1505
1506	/* only cpu actions for now */
1507	task_lock(task);
1508
1509	if (task->ext_appliedstate.ru_cpu == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
1510		/* apply action */
1511		task->ext_appliedstate.ru_cpu = task->ext_policystate.ru_cpu;
1512		action = task->ext_appliedstate.ru_cpu;
1513	} else {
1514		action = task->ext_appliedstate.ru_cpu;
1515	}
1516
1517	if (action != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) {
1518		bsdinfo = task->bsd_info;
1519		task_unlock(task);
1520		proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action);
1521	} else
1522		task_unlock(task);
1523
1524	return(0);
1525}
1526
1527/* For ledger hookups */
1528static int
1529task_get_cpuusage(task_t task, uint32_t * percentagep, uint64_t * intervalp, uint64_t * deadlinep)
1530{
1531	*percentagep = task->rusage_cpu_percentage;
1532	*intervalp = task->rusage_cpu_interval;
1533	*deadlinep = task->rusage_cpu_deadline;
1534
1535	return(0);
1536}
1537
1538int
1539task_set_cpuusage(task_t task, uint64_t percentage, uint64_t interval, uint64_t deadline, int scope)
1540{
1541	uint64_t abstime = 0;
1542	uint64_t save_abstime = 0;
1543	uint64_t limittime = 0;
1544	thread_t thread;
1545
1546	lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED);
1547
1548	/* By default, refill once per second */
1549	if (interval == 0)
1550		interval = NSEC_PER_SEC;
1551
1552	if (percentage != 0) {
1553		if (percentage > 100)
1554			percentage = 100;
1555		limittime = (interval * percentage)/ 100;
1556		nanoseconds_to_absolutetime(limittime, &abstime);
1557		if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1558			/*
1559			 * A per-thread CPU limit on a task generates an exception
1560			 * (LEDGER_ACTION_EXCEPTION) if any one thread in the task
1561			 * exceeds the limit.
1562			 */
1563			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
1564			task->rusage_cpu_perthr_percentage = percentage;
1565			task->rusage_cpu_perthr_interval = interval;
1566			queue_iterate(&task->threads, thread, thread_t, task_threads) {
1567				set_astledger(thread);
1568			}
1569		} else if (scope == TASK_RUSECPU_FLAGS_PROC_LIMIT) {
1570			/*
1571			 * Currently, a proc-wide CPU limit always blocks if the limit is
1572			 * exceeded (LEDGER_ACTION_BLOCK).
1573			 */
1574			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_PROC_LIMIT;
1575			task->rusage_cpu_percentage = percentage;
1576			task->rusage_cpu_interval = interval;
1577
1578			ledger_set_limit(task->ledger, task_ledgers.cpu_time, abstime);
1579			ledger_set_period(task->ledger, task_ledgers.cpu_time, interval);
1580			ledger_set_action(task->ledger, task_ledgers.cpu_time, LEDGER_ACTION_BLOCK);
1581		}
1582	}
1583
1584	if (deadline != 0) {
1585		assert(scope == TASK_RUSECPU_FLAGS_DEADLINE);
1586
1587		/* if already in use, cancel and wait for it to cleanout */
1588		if (task->rusage_cpu_callt != NULL) {
1589			task_unlock(task);
1590			thread_call_cancel_wait(task->rusage_cpu_callt);
1591			task_lock(task);
1592		}
1593		if (task->rusage_cpu_callt == NULL) {
1594			task->rusage_cpu_callt = thread_call_allocate_with_priority(task_action_cpuusage, (thread_call_param_t)task, THREAD_CALL_PRIORITY_KERNEL);
1595		}
1596		/* setup callout */
1597		if (task->rusage_cpu_callt != 0) {
1598			task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_DEADLINE;
1599			task->rusage_cpu_deadline = deadline;
1600
1601			nanoseconds_to_absolutetime(deadline, &abstime);
1602			save_abstime = abstime;
1603			clock_absolutetime_interval_to_deadline(save_abstime, &abstime);
1604			thread_call_enter_delayed(task->rusage_cpu_callt, abstime);
1605		}
1606	}
1607
1608	return(0);
1609}
1610
1611int
1612task_clear_cpuusage(task_t task)
1613{
1614	int retval = 0;
1615
1616	task_lock(task);
1617	retval = task_clear_cpuusage_locked(task);
1618	task_unlock(task);
1619
1620	return(retval);
1621}
1622
1623int
1624task_clear_cpuusage_locked(task_t task)
1625{
1626	thread_call_t savecallt;
1627	thread_t thread;
1628
1629	/* cancel percentage handling if set */
1630	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) {
1631		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT;
1632		ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY);
1633		task->rusage_cpu_percentage = 0;
1634		task->rusage_cpu_interval = 0;
1635	}
1636
1637	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
1638		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PERTHR_LIMIT;
1639		queue_iterate(&task->threads, thread, thread_t, task_threads) {
1640			set_astledger(thread);
1641		}
1642		task->rusage_cpu_perthr_percentage = 0;
1643		task->rusage_cpu_perthr_interval = 0;
1644
1645	}
1646
1647	/* cancel deadline handling if set */
1648	if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_DEADLINE) {
1649		task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_DEADLINE;
1650		if (task->rusage_cpu_callt != 0) {
1651			savecallt = task->rusage_cpu_callt;
1652			task->rusage_cpu_callt = NULL;
1653			task->rusage_cpu_deadline = 0;
1654			task_unlock(task);
1655			thread_call_cancel_wait(savecallt);
1656			thread_call_free(savecallt);
1657			task_lock(task);
1658		}
1659	}
1660	return(0);
1661}
1662
1663/* called by ledger unit to enforce action due to  resource usage criteria being met */
1664void
1665task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t param1)
1666{
1667	task_t task = (task_t)param0;
1668	(void)task_apply_resource_actions(task, TASK_POLICY_CPU_RESOURCE_USAGE);
1669	return;
1670}
1671
1672#if CONFIG_EMBEDDED
1673/* return the appstate of a task */
1674int
1675proc_lf_getappstate(task_t task)
1676{
1677	return(task->appstate);
1678
1679}
1680
1681
1682/* set appstate of a task and apply approp actions */
1683int
1684proc_lf_setappstate(task_t task, int state)
1685{
1686	int ret = 0, oldstate;
1687	kern_return_t kret = KERN_SUCCESS;
1688	int applywatch = 0, setbg = 0, setnetbg = 0;
1689	int sethib_suspend = 0, sethib_resume=0;
1690
1691	if (state == TASK_APPSTATE_NONE)
1692		goto out;
1693
1694	/* valid states? */
1695	switch (state) {
1696		case TASK_APPSTATE_ACTIVE:
1697		case TASK_APPSTATE_BACKGROUND:
1698		case TASK_APPSTATE_NONUI:
1699		case TASK_APPSTATE_INACTIVE:
1700			break;
1701		default:
1702			ret = EINVAL;
1703			goto out;
1704
1705	}
1706
1707	task_lock(task);
1708	oldstate = task->appstate;
1709	if (oldstate == state) {
1710		/* no changes */
1711		goto out1;
1712	}
1713
1714	switch(oldstate) {
1715		case TASK_APPSTATE_ACTIVE:
1716			switch(state) {
1717				case TASK_APPSTATE_BACKGROUND:
1718					/* moving from active to  app background */
1719					task->ext_policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
1720					proc_apply_bgtaskpolicy_internal(task, 1, 1);
1721					/* watchers need update */
1722					applywatch = 1;
1723					setbg = 1;
1724					/* set network part */
1725					setnetbg = 1;
1726					break;
1727
1728				case TASK_APPSTATE_NONUI:
1729					/* set no graphics */
1730					task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1731					task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1732					break;
1733
1734				case TASK_APPSTATE_INACTIVE:
1735					/* suspend the process */
1736					kret = task_pidsuspend_locked(task);
1737					if (kret != KERN_SUCCESS)
1738						ret = EINVAL;
1739					else
1740						sethib_suspend = 1;
1741
1742					break;
1743			}
1744			break;
1745
1746		case TASK_APPSTATE_BACKGROUND:
1747			switch(state) {
1748				/* watchers need update */
1749				applywatch = 1;
1750				setbg = 0;
1751				/* set network part */
1752				setnetbg = 1;
1753				case TASK_APPSTATE_ACTIVE:
1754					/* remove app background */
1755					ret = proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_DEFAULT);
1756					/* going from BG to active */
1757					break;
1758
1759				case TASK_APPSTATE_NONUI:
1760					/* remove app background + no graphics */
1761					task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1762					task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1763					ret = proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_DEFAULT);
1764					break;
1765
1766				case TASK_APPSTATE_INACTIVE:
1767					/* suspend and then remove app background */
1768					kret = task_pidsuspend_locked(task);
1769					if (kret != KERN_SUCCESS) {
1770						ret = EINVAL;
1771					} else {
1772						ret = proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_DEFAULT);
1773						sethib_suspend = 1;
1774					}
1775
1776					break;
1777
1778			}
1779			break;
1780
1781		case TASK_APPSTATE_NONUI:
1782			switch(state) {
1783				case TASK_APPSTATE_ACTIVE:
1784					/* restore graphics access */
1785					task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1786					task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT;
1787					break;
1788
1789				case TASK_APPSTATE_BACKGROUND:
1790					/* set app background */
1791					task->ext_policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
1792
1793					ret = proc_apply_bgtaskpolicy_internal(task, 1, 1);
1794					if (ret == 0) {
1795						task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT;
1796						task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT;
1797					}
1798					/* watchers need update */
1799					applywatch = 1;
1800					setbg = 1;
1801					/* set network part */
1802					setnetbg = 1;
1803					break;
1804
1805				case TASK_APPSTATE_INACTIVE:
1806					/* suspend & restore graphics access */
1807					kret = task_pidsuspend_locked(task);
1808					if (kret != KERN_SUCCESS) {
1809						ret = EINVAL;
1810					} else {
1811						ret = proc_restore_bgtaskpolicy_internal(task, 1, 1, BASEPRI_DEFAULT);
1812						task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT;
1813						task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_DEFAULT;
1814						sethib_suspend = 1;
1815					}
1816					break;
1817			}
1818			break;
1819
1820		case TASK_APPSTATE_INACTIVE:
1821			switch(state) {
1822				case TASK_APPSTATE_ACTIVE:
1823					/* resume process */
1824					/* going from inactive to active */
1825					break;
1826
1827				case TASK_APPSTATE_BACKGROUND:
1828					task->ext_policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
1829					ret = proc_apply_bgtaskpolicy_internal(task, 1, 1);
1830					/* put in app background & resume process */
1831					/* watchers need update */
1832					applywatch = 1;
1833					setbg = 1;
1834					/* set network part */
1835					setnetbg = 1;
1836					break;
1837
1838				case TASK_APPSTATE_NONUI:
1839					/* remove graphics access and resume */
1840					task->ext_policystate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1841					task->ext_appliedstate.hw_gpu = TASK_POLICY_HWACCESS_GPU_ATTRIBUTE_NOACCESS;
1842					break;
1843			}
1844			/* pidresume does drop task lock,so no need to have locked version */
1845			task_unlock(task);
1846			kret = task_pidresume(task);
1847			task_lock(task);
1848			sethib_resume = 1;
1849			break;
1850	}
1851	/* set the new app state on the task */
1852	task->appstate = state;
1853out1:
1854	task_unlock(task);
1855	if (setnetbg != 0) {
1856		/* apply network background */
1857		if (setbg != 0)
1858			proc_apply_task_networkbg_internal(task->bsd_info, NULL);
1859		else
1860			proc_restore_task_networkbg_internal(task->bsd_info, NULL);
1861	}
1862#if CONFIG_MEMORYSTATUS
1863	if (sethib_suspend != 0)
1864			memorystatus_on_suspend(proc_pid(task->bsd_info));
1865	if (sethib_resume != 0)
1866			memorystatus_on_resume(proc_pid(task->bsd_info));
1867#endif /* CONFIG_MEMORYSTATUS */
1868	/* if watchers need update, safe point to do that */
1869	if (applywatch != 0)
1870		apply_appstate_watchers(task, setbg);
1871
1872out:
1873	return(ret);
1874}
1875
1876static void
1877task_watch_lock(void)
1878{
1879	lck_mtx_lock(&task_watch_mtx);
1880}
1881
1882static void
1883task_watch_unlock(void)
1884{
1885	lck_mtx_unlock(&task_watch_mtx);
1886}
1887
1888static void
1889add_taskwatch_locked(task_t task, task_watch_t * twp)
1890{
1891	queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links);
1892	task->num_taskwatchers++;
1893
1894}
1895
1896static void
1897remove_taskwatch_locked(task_t task, task_watch_t * twp)
1898{
1899	queue_remove(&task->task_watchers, twp, task_watch_t *, tw_links);
1900	task->num_taskwatchers--;
1901}
1902
1903
1904int
1905proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind)
1906{
1907	thread_t self = current_thread();
1908	thread_t target_thread = NULL;
1909	int selfset = 0, ret = 0, setbg = 0;
1910	task_watch_t *twp = NULL;
1911	task_t task = TASK_NULL;
1912
1913
1914	if ((tid == 0) || (tid == self->thread_id)) {
1915		selfset = 1;
1916		target_thread = self;
1917		thread_reference(target_thread);
1918	} else {
1919		task_lock(curtask);
1920		target_thread = task_findtid(curtask, tid);
1921		if (target_thread != NULL)
1922			thread_reference(target_thread);
1923		else {
1924			ret = ESRCH;
1925			goto out;
1926		}
1927
1928		task_unlock(curtask);
1929	}
1930
1931	if (bind != 0) {
1932		/* task is still active ? */
1933		task_lock(target_task);
1934		if (target_task->active == 0) {
1935			task_unlock(target_task);
1936			ret = ESRCH;
1937			goto out;
1938		}
1939		task_unlock(target_task);
1940
1941		twp = (task_watch_t *)kalloc(sizeof(task_watch_t));
1942		if (twp == NULL) {
1943			task_watch_unlock();
1944			ret = ENOMEM;
1945			goto out;
1946		}
1947
1948		bzero(twp, sizeof(task_watch_t));
1949
1950		task_watch_lock();
1951
1952		if (target_thread->taskwatch != NULL){
1953			/* already bound to another task */
1954			task_watch_unlock();
1955
1956			kfree(twp, sizeof(task_watch_t));
1957			ret = EBUSY;
1958			goto out;
1959		}
1960
1961		task_reference(target_task);
1962
1963		twp->tw_task = target_task;		/* holds the task reference */
1964		twp->tw_thread = target_thread;		/* holds the thread reference */
1965		twp->tw_state = target_task->appstate;
1966		twp->tw_importance = target_thread->importance;
1967
1968		add_taskwatch_locked(target_task, twp);
1969
1970		target_thread->taskwatch = twp;
1971
1972		if (target_task->appstate == TASK_APPSTATE_BACKGROUND)
1973			setbg = 1;
1974
1975		task_watch_unlock();
1976
1977		if (setbg != 0) {
1978			set_thread_appbg(target_thread, setbg, INT_MIN);
1979		}
1980
1981		/* retain the thread reference as it is in twp */
1982		target_thread = NULL;
1983	} else {
1984		/* unbind */
1985		task_watch_lock();
1986		if ((twp = target_thread->taskwatch) != NULL) {
1987			task = twp->tw_task;
1988			target_thread->taskwatch = NULL;
1989			remove_taskwatch_locked(task, twp);
1990
1991			task_watch_unlock();
1992
1993			task_deallocate(task);			/* drop task ref in twp */
1994			set_thread_appbg(target_thread, 0, twp->tw_importance);
1995			thread_deallocate(target_thread);	/* drop thread ref in twp */
1996			kfree(twp, sizeof(task_watch_t));
1997		} else {
1998			task_watch_unlock();
1999			ret = 0;		/* return success if it not alredy bound */
2000			goto out;
2001		}
2002	}
2003out:
2004	if (target_thread != NULL)
2005		thread_deallocate(target_thread);	/* drop thread ref acquired in this routine */
2006	return(ret);
2007}
2008
2009static void
2010set_thread_appbg(thread_t thread, int setbg,int importance)
2011{
2012	/* TBD: ensure the proc for network is fine */
2013	if (setbg == 0) {
2014		restore_bgthreadpolicy_locked(thread, 0, importance);
2015		proc_restore_task_networkbg_internal(thread->task->bsd_info, thread);
2016	 } else {
2017		apply_bgthreadpolicy_external(thread);
2018		proc_apply_task_networkbg_internal(thread->task->bsd_info, thread);
2019	}
2020}
2021
2022static void
2023apply_appstate_watchers(task_t task, int setbg)
2024{
2025	int numwatchers = 0, i, j;
2026	thread_watchlist_t * threadlist;
2027	task_watch_t * twp;
2028
2029retry:
2030	/* if no watchers on the list return */
2031	if ((numwatchers = task->num_taskwatchers) == 0)
2032		return;
2033
2034	threadlist = (thread_watchlist_t *)kalloc(numwatchers*sizeof(thread_watchlist_t));
2035	if (threadlist == NULL)
2036		return;
2037
2038	bzero(threadlist, numwatchers*sizeof(thread_watchlist_t));
2039
2040	task_watch_lock();
2041	/*serialize application of app state changes */
2042	if (task->watchapplying != 0) {
2043		lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT);
2044		task_watch_unlock();
2045		kfree(threadlist, numwatchers*sizeof(thread_watchlist_t));
2046		goto retry;
2047	}
2048
2049	if (numwatchers != task->num_taskwatchers) {
2050		task_watch_unlock();
2051		kfree(threadlist, numwatchers*sizeof(thread_watchlist_t));
2052		goto retry;
2053	}
2054
2055	task->watchapplying = 1;
2056	i = 0;
2057	queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) {
2058
2059		threadlist[i].thread = twp->tw_thread;
2060		thread_reference(threadlist[i].thread);
2061		if (setbg != 0) {
2062			twp->tw_importance = twp->tw_thread->importance;
2063			threadlist[i].importance = INT_MIN;
2064		} else
2065			threadlist[i].importance = twp->tw_importance;
2066		i++;
2067		if (i > numwatchers)
2068			break;
2069	}
2070	task_watch_unlock();
2071
2072	for (j = 0; j< i; j++) {
2073		set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance);
2074		thread_deallocate(threadlist[j].thread);
2075	}
2076	kfree(threadlist, numwatchers*sizeof(thread_watchlist_t));
2077
2078
2079	task_watch_lock();
2080	task->watchapplying = 0;
2081	thread_wakeup_one(&task->watchapplying);
2082	task_watch_unlock();
2083}
2084
2085void
2086thead_remove_taskwatch(thread_t thread)
2087{
2088	task_watch_t * twp;
2089	int importance = 0;
2090
2091	task_watch_lock();
2092	if ((twp = thread->taskwatch) != NULL) {
2093		thread->taskwatch = NULL;
2094		remove_taskwatch_locked(twp->tw_task, twp);
2095	}
2096	task_watch_unlock();
2097	if (twp != NULL) {
2098		thread_deallocate(twp->tw_thread);
2099		task_deallocate(twp->tw_task);
2100		importance = twp->tw_importance;
2101		kfree(twp, sizeof(task_watch_t));
2102		/* remove the thread and networkbg */
2103		set_thread_appbg(thread, 0, importance);
2104	}
2105}
2106
2107void
2108task_removewatchers(task_t task)
2109{
2110	int numwatchers = 0, i, j;
2111	task_watch_t ** twplist = NULL;
2112	task_watch_t * twp = NULL;
2113
2114retry:
2115	if ((numwatchers = task->num_taskwatchers) == 0)
2116		return;
2117
2118	twplist = (task_watch_t **)kalloc(numwatchers*sizeof(task_watch_t *));
2119	if (twplist == NULL)
2120		return;
2121
2122	bzero(twplist, numwatchers*sizeof(task_watch_t *));
2123
2124	task_watch_lock();
2125	if (task->num_taskwatchers == 0) {
2126		task_watch_unlock();
2127		goto out;
2128	}
2129
2130	if (numwatchers != task->num_taskwatchers) {
2131		task_watch_unlock();
2132		kfree(twplist, numwatchers*sizeof(task_watch_t *));
2133		numwatchers = 0;
2134		goto retry;
2135	}
2136
2137	i = 0;
2138	while((twp = (task_watch_t *)dequeue_head(&task->task_watchers)) != NULL)
2139	{
2140		twplist[i] = twp;
2141		task->num_taskwatchers--;
2142
2143		/*
2144		 * Since the linkage is removed and thead state cleanup is already set up,
2145		 * remove the refernce from the thread.
2146		 */
2147		twp->tw_thread->taskwatch = NULL;	/* removed linkage, clear thread holding ref */
2148		i++;
2149		if ((task->num_taskwatchers == 0) || (i > numwatchers))
2150			break;
2151	}
2152
2153	task_watch_unlock();
2154
2155	for (j = 0; j< i; j++) {
2156
2157		twp = twplist[j];
2158		/* remove thread and network bg */
2159		set_thread_appbg(twp->tw_thread, 0, twp->tw_importance);
2160		thread_deallocate(twp->tw_thread);
2161		task_deallocate(twp->tw_task);
2162		kfree(twp, sizeof(task_watch_t));
2163	}
2164
2165out:
2166	kfree(twplist, numwatchers*sizeof(task_watch_t *));
2167
2168}
2169#endif /* CONFIG_EMBEDDED */
2170
2171
2172int
2173proc_disable_task_apptype(task_t task, int policy_subtype)
2174{
2175	void * bsdinfo = NULL;
2176	int ret = 0;
2177	int setbg = 0;
2178#if !CONFIG_EMBEDDED
2179	int maxpri = BASEPRI_DEFAULT;
2180#endif /* !CONFIG_EMBEDDED */
2181
2182	task_lock(task);
2183
2184	if (task->ext_policystate.apptype != policy_subtype) {
2185		ret = EINVAL;
2186		goto out;
2187	}
2188
2189#if !CONFIG_EMBEDDED
2190	switch (task->role) {
2191		case TASK_FOREGROUND_APPLICATION:
2192			maxpri = BASEPRI_FOREGROUND;
2193			break;
2194		case TASK_BACKGROUND_APPLICATION:
2195			maxpri = BASEPRI_BACKGROUND;
2196			break;
2197		default:
2198			maxpri = BASEPRI_DEFAULT;
2199	}
2200
2201
2202#endif /* !CONFIG_EMBEDDED */
2203
2204	/* TAL apps are cleared with BG handling on first foreground application */
2205	if (task->ext_appliedstate.apptype != PROC_POLICY_OSX_APPTYPE_NONE) {
2206			switch (task->ext_appliedstate.apptype) {
2207#if !CONFIG_EMBEDDED
2208				case PROC_POLICY_OSX_APPTYPE_TAL:
2209					/* disable foreground/background handling */
2210					task->ext_appliedstate.apptype = PROC_POLICY_OSX_APPTYPE_NONE;
2211					/* external BG application removal */
2212					proc_restore_bgtaskpolicy_internal(task, 1, 1, maxpri);
2213					bsdinfo = task->bsd_info;
2214					setbg = 0;
2215					break;
2216
2217				case PROC_POLICY_OSX_APPTYPE_DBCLIENT:
2218					/* disable foreground/background handling */
2219					task->ext_appliedstate.apptype = PROC_POLICY_OSX_APPTYPE_NONE;
2220					/* internal BG application removal */
2221					proc_restore_bgtaskpolicy_internal(task, 1, 0, maxpri);
2222					bsdinfo = task->bsd_info;
2223					setbg = 0;
2224					break;
2225
2226#endif /* !CONFIG_EMBEDDED */
2227				default:
2228					ret = EINVAL;
2229					break;
2230			}
2231
2232	} else {
2233		ret = EINVAL;
2234	}
2235
2236out:
2237	task_unlock(task);
2238	/* if backgrounding action ... */
2239	if (bsdinfo != NULL)
2240		proc_set_task_networkbg(bsdinfo, setbg);
2241
2242	return(ret);
2243}
2244
2245int
2246proc_enable_task_apptype(task_t task, int policy_subtype)
2247{
2248	void * bsdinfo = NULL;
2249	int setbg = 0;
2250	int ret = 0;
2251
2252	task_lock(task);
2253
2254	if (task->ext_policystate.apptype != policy_subtype) {
2255		ret = EINVAL;
2256		goto out;
2257	}
2258
2259	if (task->ext_appliedstate.apptype == PROC_POLICY_OSX_APPTYPE_NONE) {
2260		switch (task->ext_policystate.apptype) {
2261#if !CONFIG_EMBEDDED
2262			case PROC_POLICY_OSX_APPTYPE_TAL:
2263			 	 /* TAL policy is activated again */
2264				task->ext_appliedstate.apptype = task->ext_policystate.apptype;
2265				if (task->role == TASK_BACKGROUND_APPLICATION) {
2266					if (task->role == TASK_BACKGROUND_APPLICATION) {
2267						proc_apply_bgtaskpolicy_internal(task, 1, 1);
2268						bsdinfo = task->bsd_info;
2269						setbg = 1;
2270					}
2271				}
2272				ret = 0;
2273				break;
2274#endif /* !CONFIG_EMBEDDED */
2275			default:
2276				ret = EINVAL;
2277		}
2278	} else
2279		ret = EINVAL;
2280
2281out:
2282	task_unlock(task);
2283	/* if backgrounding action ... */
2284	if (bsdinfo != NULL)
2285		proc_set_task_networkbg(bsdinfo, setbg);
2286
2287	return(ret);
2288}
2289
2290#if CONFIG_EMBEDDED
2291int
2292proc_setthread_saved_importance(thread_t thread, int importance)
2293{
2294	if ((thread->task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON)  &&
2295		(thread->appliedstate.hw_bg == TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL))
2296	{
2297		/* the thread is still backgrounded , save the importance for restore time */
2298		thread->saved_importance = importance;
2299
2300		return(1);
2301	} else
2302		return(0);
2303}
2304#endif /* CONFIG_EMBEDDED */
2305