1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach/mach_types.h>
29#include <mach/machine/vm_param.h>
30#include <mach/task.h>
31
32#include <kern/kern_types.h>
33#include <kern/ledger.h>
34#include <kern/processor.h>
35#include <kern/thread.h>
36#include <kern/task.h>
37#include <kern/spl.h>
38#include <kern/ast.h>
39#include <ipc/ipc_port.h>
40#include <ipc/ipc_object.h>
41#include <vm/vm_map.h>
42#include <vm/vm_kern.h>
43#include <vm/pmap.h>
44#include <vm/vm_protos.h> /* last */
45#include <sys/resource.h>
46
47#undef thread_should_halt
48
49/* BSD KERN COMPONENT INTERFACE */
50
51task_t	bsd_init_task = TASK_NULL;
52char	init_task_failure_data[1024];
53extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
54
55thread_t get_firstthread(task_t);
56int get_task_userstop(task_t);
57int get_thread_userstop(thread_t);
58boolean_t current_thread_aborted(void);
59void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
60kern_return_t get_signalact(task_t , thread_t *, int);
61int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
62int fill_task_rusage(task_t task, rusage_info_current *ri);
63int fill_task_io_rusage(task_t task, rusage_info_current *ri);
64int fill_task_qos_rusage(task_t task, rusage_info_current *ri);
65void fill_task_billed_usage(task_t task, rusage_info_current *ri);
66
67/*
68 *
69 */
70void  *get_bsdtask_info(task_t t)
71{
72	return(t->bsd_info);
73}
74
75/*
76 *
77 */
78void *get_bsdthreadtask_info(thread_t th)
79{
80	return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
81}
82
83/*
84 *
85 */
86void set_bsdtask_info(task_t t,void * v)
87{
88	t->bsd_info=v;
89}
90
91/*
92 *
93 */
94void *get_bsdthread_info(thread_t th)
95{
96	return(th->uthread);
97}
98
99/*
100 * XXX
101 */
102int get_thread_lock_count(thread_t th);		/* forced forward */
103int get_thread_lock_count(thread_t th)
104{
105 	return(th->mutex_count);
106}
107
108/*
109 * XXX: wait for BSD to  fix signal code
110 * Until then, we cannot block here.  We know the task
111 * can't go away, so we make sure it is still active after
112 * retrieving the first thread for extra safety.
113 */
114thread_t get_firstthread(task_t task)
115{
116	thread_t	thread = (thread_t)(void *)queue_first(&task->threads);
117
118	if (queue_end(&task->threads, (queue_entry_t)thread))
119		thread = THREAD_NULL;
120
121	if (!task->active)
122		return (THREAD_NULL);
123
124	return (thread);
125}
126
127kern_return_t
128get_signalact(
129	task_t		task,
130	thread_t	*result_out,
131	int			setast)
132{
133	kern_return_t	result = KERN_SUCCESS;
134	thread_t		inc, thread = THREAD_NULL;
135
136	task_lock(task);
137
138	if (!task->active) {
139		task_unlock(task);
140
141		return (KERN_FAILURE);
142	}
143
144	for (inc  = (thread_t)(void *)queue_first(&task->threads);
145			!queue_end(&task->threads, (queue_entry_t)inc); ) {
146		thread_mtx_lock(inc);
147		if (inc->active &&
148				(inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
149			thread = inc;
150			break;
151		}
152		thread_mtx_unlock(inc);
153
154		inc = (thread_t)(void *)queue_next(&inc->task_threads);
155	}
156
157	if (result_out)
158		*result_out = thread;
159
160	if (thread) {
161		if (setast)
162			act_set_astbsd(thread);
163
164		thread_mtx_unlock(thread);
165	}
166	else
167		result = KERN_FAILURE;
168
169	task_unlock(task);
170
171	return (result);
172}
173
174
175kern_return_t
176check_actforsig(
177	task_t			task,
178	thread_t		thread,
179	int				setast)
180{
181	kern_return_t	result = KERN_FAILURE;
182	thread_t		inc;
183
184	task_lock(task);
185
186	if (!task->active) {
187		task_unlock(task);
188
189		return (KERN_FAILURE);
190	}
191
192	for (inc  = (thread_t)(void *)queue_first(&task->threads);
193			!queue_end(&task->threads, (queue_entry_t)inc); ) {
194		if (inc == thread) {
195			thread_mtx_lock(inc);
196
197			if (inc->active  &&
198					(inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
199				result = KERN_SUCCESS;
200				break;
201			}
202
203			thread_mtx_unlock(inc);
204			break;
205		}
206
207		inc = (thread_t)(void *)queue_next(&inc->task_threads);
208	}
209
210	if (result == KERN_SUCCESS) {
211		if (setast)
212			act_set_astbsd(thread);
213
214		thread_mtx_unlock(thread);
215	}
216
217	task_unlock(task);
218
219	return (result);
220}
221
222ledger_t  get_task_ledger(task_t t)
223{
224	return(t->ledger);
225}
226
227/*
228 * This is only safe to call from a thread executing in
229 * in the task's context or if the task is locked  Otherwise,
230 * the map could be switched for the task (and freed) before
231 * we to return it here.
232 */
233vm_map_t  get_task_map(task_t t)
234{
235	return(t->map);
236}
237
238vm_map_t  get_task_map_reference(task_t t)
239{
240	vm_map_t m;
241
242	if (t == NULL)
243		return VM_MAP_NULL;
244
245	task_lock(t);
246	if (!t->active) {
247		task_unlock(t);
248		return VM_MAP_NULL;
249	}
250	m = t->map;
251	vm_map_reference_swap(m);
252	task_unlock(t);
253	return m;
254}
255
256/*
257 *
258 */
259ipc_space_t  get_task_ipcspace(task_t t)
260{
261	return(t->itk_space);
262}
263
264int get_task_numactivethreads(task_t task)
265{
266	thread_t	inc;
267	int num_active_thr=0;
268	task_lock(task);
269
270	for (inc  = (thread_t)(void *)queue_first(&task->threads);
271			!queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads))
272	{
273		if(inc->active)
274			num_active_thr++;
275	}
276	task_unlock(task);
277	return num_active_thr;
278}
279
280int  get_task_numacts(task_t t)
281{
282	return(t->thread_count);
283}
284
285/* does this machine need  64bit register set for signal handler */
286int is_64signalregset(void)
287{
288	if (task_has_64BitData(current_task())) {
289		return(1);
290	}
291
292	return(0);
293}
294
295/*
296 * Swap in a new map for the task/thread pair; the old map reference is
297 * returned.
298 */
299vm_map_t
300swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch)
301{
302	vm_map_t old_map;
303
304	if (task != thread->task)
305		panic("swap_task_map");
306
307	task_lock(task);
308	mp_disable_preemption();
309	old_map = task->map;
310	thread->map = task->map = map;
311	if (doswitch) {
312		pmap_switch(map->pmap);
313	}
314	mp_enable_preemption();
315	task_unlock(task);
316
317#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
318	inval_copy_windows(thread);
319#endif
320
321	return old_map;
322}
323
324/*
325 *
326 */
327pmap_t  get_task_pmap(task_t t)
328{
329	return(t->map->pmap);
330}
331
332/*
333 *
334 */
335uint64_t get_task_resident_size(task_t task)
336{
337	vm_map_t map;
338
339	map = (task == kernel_task) ? kernel_map: task->map;
340	return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
341}
342
343uint64_t get_task_compressed(task_t task)
344{
345	vm_map_t map;
346
347	map = (task == kernel_task) ? kernel_map: task->map;
348	return((uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64);
349}
350
351uint64_t get_task_resident_max(task_t task)
352{
353	vm_map_t map;
354
355	map = (task == kernel_task) ? kernel_map: task->map;
356	return((uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64);
357}
358
359uint64_t get_task_purgeable_size(task_t task)
360{
361	vm_map_t map;
362    mach_vm_size_t  volatile_virtual_size;
363    mach_vm_size_t  volatile_resident_size;
364    mach_vm_size_t  volatile_pmap_size;
365
366	map = (task == kernel_task) ? kernel_map: task->map;
367	vm_map_query_volatile(map, &volatile_virtual_size, &volatile_resident_size, &volatile_pmap_size);
368
369	return((uint64_t)volatile_resident_size);
370}
371/*
372 *
373 */
374uint64_t get_task_phys_footprint(task_t task)
375{
376	kern_return_t ret;
377	ledger_amount_t credit, debit;
378
379	ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit);
380	if (KERN_SUCCESS == ret) {
381		return (credit - debit);
382	}
383
384	return 0;
385}
386
387/*
388 *
389 */
390uint64_t get_task_phys_footprint_max(task_t task)
391{
392	kern_return_t ret;
393	ledger_amount_t max;
394
395	ret = ledger_get_maximum(task->ledger, task_ledgers.phys_footprint, &max);
396	if (KERN_SUCCESS == ret) {
397		return max;
398	}
399
400	return 0;
401}
402
403uint64_t get_task_cpu_time(task_t task)
404{
405	kern_return_t ret;
406	ledger_amount_t credit, debit;
407
408	ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit);
409	if (KERN_SUCCESS == ret) {
410		return (credit - debit);
411	}
412
413	return 0;
414}
415
416/*
417 *
418 */
419pmap_t  get_map_pmap(vm_map_t map)
420{
421	return(map->pmap);
422}
423/*
424 *
425 */
426task_t	get_threadtask(thread_t th)
427{
428	return(th->task);
429}
430
431/*
432 *
433 */
434vm_map_offset_t
435get_map_min(
436	vm_map_t	map)
437{
438	return(vm_map_min(map));
439}
440
441/*
442 *
443 */
444vm_map_offset_t
445get_map_max(
446	vm_map_t	map)
447{
448	return(vm_map_max(map));
449}
450vm_map_size_t
451get_vmmap_size(
452	vm_map_t	map)
453{
454	return(map->size);
455}
456
457int
458get_vmsubmap_entries(
459	vm_map_t	map,
460	vm_object_offset_t	start,
461	vm_object_offset_t	end)
462{
463	int	total_entries = 0;
464	vm_map_entry_t	entry;
465
466	if (not_in_kdp)
467	  vm_map_lock(map);
468	entry = vm_map_first_entry(map);
469	while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
470		entry = entry->vme_next;
471	}
472
473	while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
474		if(entry->is_sub_map) {
475			total_entries +=
476				get_vmsubmap_entries(entry->object.sub_map,
477					entry->offset,
478					entry->offset +
479					(entry->vme_end - entry->vme_start));
480		} else {
481			total_entries += 1;
482		}
483		entry = entry->vme_next;
484	}
485	if (not_in_kdp)
486	  vm_map_unlock(map);
487	return(total_entries);
488}
489
490int
491get_vmmap_entries(
492	vm_map_t	map)
493{
494	int	total_entries = 0;
495	vm_map_entry_t	entry;
496
497	if (not_in_kdp)
498	  vm_map_lock(map);
499	entry = vm_map_first_entry(map);
500
501	while(entry != vm_map_to_entry(map)) {
502		if(entry->is_sub_map) {
503			total_entries +=
504				get_vmsubmap_entries(entry->object.sub_map,
505					entry->offset,
506					entry->offset +
507					(entry->vme_end - entry->vme_start));
508		} else {
509			total_entries += 1;
510		}
511		entry = entry->vme_next;
512	}
513	if (not_in_kdp)
514	  vm_map_unlock(map);
515	return(total_entries);
516}
517
518/*
519 *
520 */
521/*
522 *
523 */
524int
525get_task_userstop(
526	task_t task)
527{
528	return(task->user_stop_count);
529}
530
531/*
532 *
533 */
534int
535get_thread_userstop(
536	thread_t th)
537{
538	return(th->user_stop_count);
539}
540
541/*
542 *
543 */
544boolean_t
545get_task_pidsuspended(
546	task_t task)
547{
548    return (task->pidsuspended);
549}
550
551/*
552 *
553 */
554boolean_t
555get_task_frozen(
556	task_t task)
557{
558    return (task->frozen);
559}
560
561/*
562 *
563 */
564boolean_t
565thread_should_abort(
566	thread_t th)
567{
568	return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT);
569}
570
571/*
572 * This routine is like thread_should_abort() above.  It checks to
573 * see if the current thread is aborted.  But unlike above, it also
574 * checks to see if thread is safely aborted.  If so, it returns
575 * that fact, and clears the condition (safe aborts only should
576 * have a single effect, and a poll of the abort status
577 * qualifies.
578 */
579boolean_t
580current_thread_aborted (
581		void)
582{
583	thread_t th = current_thread();
584	spl_t s;
585
586	if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
587			(th->options & TH_OPT_INTMASK) != THREAD_UNINT)
588		return (TRUE);
589	if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
590		s = splsched();
591		thread_lock(th);
592		if (th->sched_flags & TH_SFLAG_ABORTSAFELY)
593			th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
594		thread_unlock(th);
595		splx(s);
596	}
597	return FALSE;
598}
599
600/*
601 *
602 */
603void
604task_act_iterate_wth_args(
605	task_t			task,
606	void			(*func_callback)(thread_t, void *),
607	void			*func_arg)
608{
609	thread_t	inc;
610
611	task_lock(task);
612
613	for (inc  = (thread_t)(void *)queue_first(&task->threads);
614			!queue_end(&task->threads, (queue_entry_t)inc); ) {
615		(void) (*func_callback)(inc, func_arg);
616		inc = (thread_t)(void *)queue_next(&inc->task_threads);
617	}
618
619	task_unlock(task);
620}
621
622
623void
624astbsd_on(void)
625{
626	boolean_t	reenable;
627
628	reenable = ml_set_interrupts_enabled(FALSE);
629	ast_on_fast(AST_BSD);
630	(void)ml_set_interrupts_enabled(reenable);
631}
632
633
634#include <sys/bsdtask_info.h>
635
636void
637fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
638{
639	vm_map_t map;
640	task_absolutetime_info_data_t   tinfo;
641	thread_t thread;
642	uint32_t cswitch = 0, numrunning = 0;
643	uint32_t syscalls_unix = 0;
644	uint32_t syscalls_mach = 0;
645
646	map = (task == kernel_task)? kernel_map: task->map;
647
648	ptinfo->pti_virtual_size  = map->size;
649	ptinfo->pti_resident_size =
650		(mach_vm_size_t)(pmap_resident_count(map->pmap))
651		* PAGE_SIZE_64;
652
653	task_lock(task);
654
655	ptinfo->pti_policy = ((task != kernel_task)?
656                                          POLICY_TIMESHARE: POLICY_RR);
657
658	tinfo.threads_user = tinfo.threads_system = 0;
659	tinfo.total_user = task->total_user_time;
660	tinfo.total_system = task->total_system_time;
661
662	queue_iterate(&task->threads, thread, thread_t, task_threads) {
663		uint64_t    tval;
664		spl_t x;
665
666		if (thread->options & TH_OPT_IDLE_THREAD)
667			continue;
668
669		x = splsched();
670		thread_lock(thread);
671
672		if ((thread->state & TH_RUN) == TH_RUN)
673			numrunning++;
674		cswitch += thread->c_switch;
675		tval = timer_grab(&thread->user_timer);
676		tinfo.threads_user += tval;
677		tinfo.total_user += tval;
678
679		tval = timer_grab(&thread->system_timer);
680
681		if (thread->precise_user_kernel_time) {
682			tinfo.threads_system += tval;
683			tinfo.total_system += tval;
684		} else {
685			/* system_timer may represent either sys or user */
686			tinfo.threads_user += tval;
687			tinfo.total_user += tval;
688		}
689
690		syscalls_unix += thread->syscalls_unix;
691		syscalls_mach += thread->syscalls_mach;
692
693		thread_unlock(thread);
694		splx(x);
695	}
696
697	ptinfo->pti_total_system = tinfo.total_system;
698	ptinfo->pti_total_user = tinfo.total_user;
699	ptinfo->pti_threads_system = tinfo.threads_system;
700	ptinfo->pti_threads_user = tinfo.threads_user;
701
702	ptinfo->pti_faults = task->faults;
703	ptinfo->pti_pageins = task->pageins;
704	ptinfo->pti_cow_faults = task->cow_faults;
705	ptinfo->pti_messages_sent = task->messages_sent;
706	ptinfo->pti_messages_received = task->messages_received;
707	ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
708	ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
709	ptinfo->pti_csw = task->c_switch + cswitch;
710	ptinfo->pti_threadnum = task->thread_count;
711	ptinfo->pti_numrunning = numrunning;
712	ptinfo->pti_priority = task->priority;
713
714	task_unlock(task);
715}
716
717int
718fill_taskthreadinfo(task_t task, uint64_t thaddr, int thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
719{
720	thread_t  thact;
721	int err=0;
722	mach_msg_type_number_t count;
723	thread_basic_info_data_t basic_info;
724	kern_return_t kret;
725	uint64_t addr = 0;
726
727	task_lock(task);
728
729	for (thact  = (thread_t)(void *)queue_first(&task->threads);
730			!queue_end(&task->threads, (queue_entry_t)thact); ) {
731		addr = (thuniqueid==0)?thact->machine.cthread_self: thact->thread_id;
732		if (addr == thaddr)
733		{
734
735			count = THREAD_BASIC_INFO_COUNT;
736			if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
737				err = 1;
738				goto out;
739			}
740			ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC));
741			ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC));
742
743			ptinfo->pth_cpu_usage = basic_info.cpu_usage;
744			ptinfo->pth_policy = basic_info.policy;
745			ptinfo->pth_run_state = basic_info.run_state;
746			ptinfo->pth_flags = basic_info.flags;
747			ptinfo->pth_sleep_time = basic_info.sleep_time;
748			ptinfo->pth_curpri = thact->sched_pri;
749			ptinfo->pth_priority = thact->priority;
750			ptinfo->pth_maxpriority = thact->max_priority;
751
752			if ((vpp != NULL) && (thact->uthread != NULL))
753				bsd_threadcdir(thact->uthread, vpp, vidp);
754			bsd_getthreadname(thact->uthread,ptinfo->pth_name);
755			err = 0;
756			goto out;
757		}
758		thact = (thread_t)(void *)queue_next(&thact->task_threads);
759	}
760	err = 1;
761
762out:
763	task_unlock(task);
764	return(err);
765}
766
767int
768fill_taskthreadlist(task_t task, void * buffer, int thcount)
769{
770	int numthr=0;
771	thread_t thact;
772	uint64_t * uptr;
773	uint64_t  thaddr;
774
775	uptr = (uint64_t *)buffer;
776
777	task_lock(task);
778
779	for (thact  = (thread_t)(void *)queue_first(&task->threads);
780			!queue_end(&task->threads, (queue_entry_t)thact); ) {
781		thaddr = thact->machine.cthread_self;
782		*uptr++ = thaddr;
783		numthr++;
784		if (numthr >= thcount)
785			goto out;
786		thact = (thread_t)(void *)queue_next(&thact->task_threads);
787	}
788
789out:
790	task_unlock(task);
791	return (int)(numthr * sizeof(uint64_t));
792
793}
794
795int
796get_numthreads(task_t task)
797{
798	return(task->thread_count);
799}
800
801/*
802 * Gather the various pieces of info about the designated task,
803 * and collect it all into a single rusage_info.
804 */
805int
806fill_task_rusage(task_t task, rusage_info_current *ri)
807{
808	struct task_power_info powerinfo;
809
810	assert(task != TASK_NULL);
811	task_lock(task);
812
813	task_power_info_locked(task, &powerinfo, NULL);
814	ri->ri_pkg_idle_wkups = powerinfo.task_platform_idle_wakeups;
815	ri->ri_interrupt_wkups = powerinfo.task_interrupt_wakeups;
816	ri->ri_user_time = powerinfo.total_user;
817	ri->ri_system_time = powerinfo.total_system;
818
819	ledger_get_balance(task->ledger, task_ledgers.phys_footprint,
820	                   (ledger_amount_t *)&ri->ri_phys_footprint);
821	ledger_get_balance(task->ledger, task_ledgers.phys_mem,
822	                   (ledger_amount_t *)&ri->ri_resident_size);
823	ledger_get_balance(task->ledger, task_ledgers.wired_mem,
824	                   (ledger_amount_t *)&ri->ri_wired_size);
825
826	ri->ri_pageins = task->pageins;
827
828	task_unlock(task);
829	return (0);
830}
831
832void
833fill_task_billed_usage(task_t task __unused, rusage_info_current *ri)
834{
835#if CONFIG_BANK
836	ri->ri_billed_system_time = bank_billed_time(task->bank_context);
837	ri->ri_serviced_system_time = bank_serviced_time(task->bank_context);
838#else
839	ri->ri_billed_system_time = 0;
840	ri->ri_serviced_system_time = 0;
841#endif
842}
843
844int
845fill_task_io_rusage(task_t task, rusage_info_current *ri)
846{
847	assert(task != TASK_NULL);
848	task_lock(task);
849
850	if (task->task_io_stats) {
851		ri->ri_diskio_bytesread = task->task_io_stats->disk_reads.size;
852		ri->ri_diskio_byteswritten = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
853	} else {
854		/* I/O Stats unavailable */
855		ri->ri_diskio_bytesread = 0;
856		ri->ri_diskio_byteswritten = 0;
857	}
858	task_unlock(task);
859	return (0);
860}
861
862int
863fill_task_qos_rusage(task_t task, rusage_info_current *ri)
864{
865	thread_t thread;
866
867	assert(task != TASK_NULL);
868	task_lock(task);
869
870	/* Rollup Qos time of all the threads to task */
871	queue_iterate(&task->threads, thread, thread_t, task_threads) {
872		if (thread->options & TH_OPT_IDLE_THREAD)
873			continue;
874
875		thread_mtx_lock(thread);
876		thread_update_qos_cpu_time(thread, TRUE);
877		thread_mtx_unlock(thread);
878
879	}
880	ri->ri_cpu_time_qos_default = task->cpu_time_qos_stats.cpu_time_qos_default;
881	ri->ri_cpu_time_qos_maintenance = task->cpu_time_qos_stats.cpu_time_qos_maintenance;
882	ri->ri_cpu_time_qos_background = task->cpu_time_qos_stats.cpu_time_qos_background;
883	ri->ri_cpu_time_qos_utility = task->cpu_time_qos_stats.cpu_time_qos_utility;
884	ri->ri_cpu_time_qos_legacy = task->cpu_time_qos_stats.cpu_time_qos_legacy;
885	ri->ri_cpu_time_qos_user_initiated = task->cpu_time_qos_stats.cpu_time_qos_user_initiated;
886	ri->ri_cpu_time_qos_user_interactive = task->cpu_time_qos_stats.cpu_time_qos_user_interactive;
887
888	task_unlock(task);
889	return (0);
890}
891