1/*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License").  You may not use this file except in compliance with the
9 * License.  Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23
24#include <machine/spl.h>
25
26#include <sys/errno.h>
27#include <sys/param.h>
28#include <sys/systm.h>
29#include <sys/proc_internal.h>
30#include <sys/vm.h>
31#include <sys/sysctl.h>
32#include <sys/kdebug.h>
33#include <sys/sysproto.h>
34#include <sys/bsdtask_info.h>
35
36#define HZ      100
37#include <mach/clock_types.h>
38#include <mach/mach_types.h>
39#include <mach/mach_time.h>
40#include <machine/machine_routines.h>
41
42#if defined(__i386__) || defined(__x86_64__)
43#include <i386/rtclock_protos.h>
44#include <i386/mp.h>
45#include <i386/machine_routines.h>
46#endif
47
48#include <kern/clock.h>
49
50#include <kern/thread.h>
51#include <kern/task.h>
52#include <kern/debug.h>
53#include <kern/kalloc.h>
54#include <kern/cpu_data.h>
55#include <kern/assert.h>
56#include <kern/telemetry.h>
57#include <vm/vm_kern.h>
58#include <sys/lock.h>
59
60#include <sys/malloc.h>
61#include <sys/mcache.h>
62#include <sys/kauth.h>
63
64#include <sys/vnode.h>
65#include <sys/vnode_internal.h>
66#include <sys/fcntl.h>
67#include <sys/file_internal.h>
68#include <sys/ubc.h>
69#include <sys/param.h>			/* for isset() */
70
71#include <mach/mach_host.h>		/* for host_info() */
72#include <libkern/OSAtomic.h>
73
74#include <machine/pal_routines.h>
75
76/*
77 * IOP(s)
78 *
79 * https://coreoswiki.apple.com/wiki/pages/U6z3i0q9/Consistent_Logging_Implementers_Guide.html
80 *
81 * IOP(s) are auxiliary cores that want to participate in kdebug event logging.
82 * They are registered dynamically. Each is assigned a cpu_id at registration.
83 *
84 * NOTE: IOP trace events may not use the same clock hardware as "normal"
85 * cpus. There is an effort made to synchronize the IOP timebase with the
86 * AP, but it should be understood that there may be discrepancies.
87 *
88 * Once registered, an IOP is permanent, it cannot be unloaded/unregistered.
89 * The current implementation depends on this for thread safety.
90 *
91 * New registrations occur by allocating an kd_iop struct and assigning
92 * a provisional cpu_id of list_head->cpu_id + 1. Then a CAS to claim the
93 * list_head pointer resolves any races.
94 *
95 * You may safely walk the kd_iops list at any time, without holding locks.
96 *
97 * When allocating buffers, the current kd_iops head is captured. Any operations
98 * that depend on the buffer state (such as flushing IOP traces on reads,
99 * etc.) should use the captured list head. This will allow registrations to
100 * take place while trace is in use.
101 */
102
103typedef struct kd_iop {
104	kd_callback_t	callback;
105	uint32_t	cpu_id;
106	uint64_t	last_timestamp; /* Prevent timer rollback */
107	struct kd_iop*	next;
108} kd_iop_t;
109
110static kd_iop_t* kd_iops = NULL;
111
112/* XXX should have prototypes, but Mach does not provide one */
113void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
114int cpu_number(void);	/* XXX <machine/...> include path broken */
115
116/* XXX should probably be static, but it's debugging code... */
117int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
118void kdbg_control_chud(int, void *);
119int kdbg_control(int *, u_int, user_addr_t, size_t *);
120int kdbg_getentropy (user_addr_t, size_t *, int);
121int kdbg_readcpumap(user_addr_t, size_t *);
122int kdbg_readcurcpumap(user_addr_t, size_t *);
123int kdbg_readthrmap(user_addr_t, size_t *, vnode_t, vfs_context_t);
124int kdbg_readcurthrmap(user_addr_t, size_t *);
125int kdbg_getreg(kd_regtype *);
126int kdbg_setreg(kd_regtype *);
127int kdbg_setrtcdec(kd_regtype *);
128int kdbg_setpidex(kd_regtype *);
129int kdbg_setpid(kd_regtype *);
130void kdbg_thrmap_init(void);
131int kdbg_reinit(boolean_t);
132int kdbg_bootstrap(boolean_t);
133
134int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size);
135kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount);
136
137static int kdbg_enable_typefilter(void);
138static int kdbg_disable_typefilter(void);
139
140static int create_buffers(boolean_t);
141static void delete_buffers(void);
142
143extern void IOSleep(int);
144
145/* trace enable status */
146unsigned int kdebug_enable = 0;
147
148/* track timestamps for security server's entropy needs */
149uint64_t * 	  kd_entropy_buffer = 0;
150unsigned int      kd_entropy_bufsize = 0;
151unsigned int      kd_entropy_count  = 0;
152unsigned int      kd_entropy_indx   = 0;
153vm_offset_t       kd_entropy_buftomem = 0;
154
155#define MAX_ENTROPY_COUNT	(128 * 1024)
156
157#define SLOW_NOLOG	0x01
158#define SLOW_CHECKS	0x02
159#define SLOW_ENTROPY	0x04
160#define SLOW_CHUD	0x08
161
162#define EVENTS_PER_STORAGE_UNIT		2048
163#define MIN_STORAGE_UNITS_PER_CPU	4
164
165#define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset])
166
167union kds_ptr {
168	struct {
169		uint32_t buffer_index:21;
170		uint16_t offset:11;
171	};
172	uint32_t raw;
173};
174
175struct kd_storage {
176	union	kds_ptr kds_next;
177	uint32_t kds_bufindx;
178	uint32_t kds_bufcnt;
179	uint32_t kds_readlast;
180	boolean_t kds_lostevents;
181	uint64_t  kds_timestamp;
182
183	kd_buf	kds_records[EVENTS_PER_STORAGE_UNIT];
184};
185
186#define MAX_BUFFER_SIZE			(1024 * 1024 * 128)
187#define N_STORAGE_UNITS_PER_BUFFER	(MAX_BUFFER_SIZE / sizeof(struct kd_storage))
188
189struct kd_storage_buffers {
190	struct	kd_storage	*kdsb_addr;
191	uint32_t		kdsb_size;
192};
193
194#define KDS_PTR_NULL 0xffffffff
195struct kd_storage_buffers *kd_bufs = NULL;
196int	n_storage_units = 0;
197int	n_storage_buffers = 0;
198int	n_storage_threshold = 0;
199int	kds_waiter = 0;
200int	kde_waiter = 0;
201
202#pragma pack(0)
203struct kd_bufinfo {
204	union  kds_ptr kd_list_head;
205	union  kds_ptr kd_list_tail;
206	boolean_t kd_lostevents;
207	uint32_t _pad;
208	uint64_t kd_prev_timebase;
209	uint32_t num_bufs;
210} __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) ));
211
212struct kd_ctrl_page_t {
213	union kds_ptr kds_free_list;
214	uint32_t enabled	:1;
215	uint32_t _pad0		:31;
216	int			kds_inuse_count;
217	uint32_t kdebug_flags;
218	uint32_t kdebug_slowcheck;
219	/*
220	 * The number of kd_bufinfo structs allocated may not match the current
221	 * number of active cpus. We capture the iops list head at initialization
222	 * which we could use to calculate the number of cpus we allocated data for,
223	 * unless it happens to be null. To avoid that case, we explicitly also
224	 * capture a cpu count.
225	 */
226	kd_iop_t* kdebug_iops;
227	uint32_t kdebug_cpus;
228} kd_ctrl_page = { .kds_free_list = {.raw = KDS_PTR_NULL}, .kdebug_slowcheck = SLOW_NOLOG };
229
230#pragma pack()
231
232struct kd_bufinfo *kdbip = NULL;
233
234#define KDCOPYBUF_COUNT	8192
235#define KDCOPYBUF_SIZE	(KDCOPYBUF_COUNT * sizeof(kd_buf))
236kd_buf *kdcopybuf = NULL;
237
238boolean_t kdlog_bg_trace = FALSE;
239boolean_t kdlog_bg_trace_running = FALSE;
240unsigned int bg_nkdbufs = 0;
241
242unsigned int nkdbufs = 0;
243unsigned int kdlog_beg=0;
244unsigned int kdlog_end=0;
245unsigned int kdlog_value1=0;
246unsigned int kdlog_value2=0;
247unsigned int kdlog_value3=0;
248unsigned int kdlog_value4=0;
249
250static lck_spin_t * kdw_spin_lock;
251static lck_spin_t * kds_spin_lock;
252static lck_mtx_t  * kd_trace_mtx_sysctl;
253static lck_grp_t  * kd_trace_mtx_sysctl_grp;
254static lck_attr_t * kd_trace_mtx_sysctl_attr;
255static lck_grp_attr_t   *kd_trace_mtx_sysctl_grp_attr;
256
257static lck_grp_t       *stackshot_subsys_lck_grp;
258static lck_grp_attr_t  *stackshot_subsys_lck_grp_attr;
259static lck_attr_t      *stackshot_subsys_lck_attr;
260static lck_mtx_t        stackshot_subsys_mutex;
261
262void *stackshot_snapbuf = NULL;
263
264int
265stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval);
266
267int
268stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced);
269extern void
270kdp_snapshot_preflight(int pid, void  *tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset);
271
272extern int
273kdp_stack_snapshot_geterror(void);
274extern unsigned int
275kdp_stack_snapshot_bytes_traced(void);
276
277kd_threadmap *kd_mapptr = 0;
278unsigned int kd_mapsize = 0;
279unsigned int kd_mapcount = 0;
280
281off_t	RAW_file_offset = 0;
282int	RAW_file_written = 0;
283
284#define	RAW_FLUSH_SIZE	(2 * 1024 * 1024)
285
286pid_t global_state_pid = -1;       /* Used to control exclusive use of kd_buffer */
287
288#define DBG_FUNC_MASK	0xfffffffc
289
290/*  TODO: move to kdebug.h */
291#define CLASS_MASK      0xff000000
292#define CLASS_OFFSET    24
293#define SUBCLASS_MASK   0x00ff0000
294#define SUBCLASS_OFFSET 16
295#define CSC_MASK        0xffff0000	/*  class and subclass mask */
296#define CSC_OFFSET      SUBCLASS_OFFSET
297
298#define EXTRACT_CLASS(debugid)          ( (uint8_t) ( ((debugid) & CLASS_MASK   ) >> CLASS_OFFSET    ) )
299#define EXTRACT_SUBCLASS(debugid)       ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
300#define EXTRACT_CSC(debugid)            ( (uint16_t)( ((debugid) & CSC_MASK     ) >> CSC_OFFSET      ) )
301
302#define INTERRUPT	0x01050000
303#define MACH_vmfault	0x01300008
304#define BSC_SysCall	0x040c0000
305#define MACH_SysCall	0x010c0000
306#define DBG_SCALL_MASK	0xffff0000
307
308
309/* task to string structure */
310struct tts
311{
312  task_t    task;            /* from procs task */
313  pid_t     pid;             /* from procs p_pid  */
314  char      task_comm[20];   /* from procs p_comm */
315};
316
317typedef struct tts tts_t;
318
319struct krt
320{
321	kd_threadmap *map;    /* pointer to the map buffer */
322	int count;
323	int maxcount;
324	struct tts *atts;
325};
326
327typedef struct krt krt_t;
328
329/* This is for the CHUD toolkit call */
330typedef void (*kd_chudhook_fn) (uint32_t debugid, uintptr_t arg1,
331				uintptr_t arg2, uintptr_t arg3,
332				uintptr_t arg4, uintptr_t arg5);
333
334volatile kd_chudhook_fn kdebug_chudhook = 0;   /* pointer to CHUD toolkit function */
335
336__private_extern__ void stackshot_lock_init( void );
337
338static uint8_t *type_filter_bitmap;
339
340static uint32_t
341kdbg_cpu_count(boolean_t early_trace)
342{
343	if (early_trace) {
344		/*
345		 * we've started tracing before the IOKit has even
346		 * started running... just use the static max value
347		 */
348		return max_ncpus;
349	}
350
351	host_basic_info_data_t hinfo;
352	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
353	host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
354	assert(hinfo.logical_cpu_max > 0);
355	return hinfo.logical_cpu_max;
356}
357
358#if MACH_ASSERT
359static boolean_t
360kdbg_iop_list_is_valid(kd_iop_t* iop)
361{
362        if (iop) {
363                /* Is list sorted by cpu_id? */
364                kd_iop_t* temp = iop;
365                do {
366                        assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1);
367                        assert(temp->next || (temp->cpu_id == kdbg_cpu_count(FALSE) || temp->cpu_id == kdbg_cpu_count(TRUE)));
368                } while ((temp = temp->next));
369
370                /* Does each entry have a function and a name? */
371                temp = iop;
372                do {
373                        assert(temp->callback.func);
374                        assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name));
375                } while ((temp = temp->next));
376        }
377
378        return TRUE;
379}
380
381static boolean_t
382kdbg_iop_list_contains_cpu_id(kd_iop_t* list, uint32_t cpu_id)
383{
384	while (list) {
385		if (list->cpu_id == cpu_id)
386			return TRUE;
387		list = list->next;
388	}
389
390	return FALSE;
391}
392
393/*
394 * This is a temporary workaround for <rdar://problem/13512084>
395 *
396 * DO NOT CALL IN RELEASE BUILD, LEAKS ADDRESS INFORMATION!
397 */
398static boolean_t
399kdbg_iop_list_check_for_timestamp_rollback(kd_iop_t* list, uint32_t cpu_id, uint64_t timestamp)
400{
401	while (list) {
402		if (list->cpu_id == cpu_id) {
403			if (list->last_timestamp > timestamp) {
404				kprintf("%s is sending trace events that have gone backwards in time. Run the following command: \"symbols -2 -lookup 0x%p\" and file a radar against the matching kext.\n", list->callback.iop_name, (void*)list->callback.func);
405			}
406			/* Unconditional set mitigates syslog spam */
407			list->last_timestamp = timestamp;
408			return TRUE;
409		}
410		list = list->next;
411	}
412
413	return FALSE;
414}
415#endif /* MACH_ASSERT */
416
417static void
418kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg)
419{
420	while (iop) {
421		iop->callback.func(iop->callback.context, type, arg);
422		iop = iop->next;
423	}
424}
425
426static void
427kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
428{
429	int s = ml_set_interrupts_enabled(FALSE);
430	lck_spin_lock(kds_spin_lock);
431
432	if (enabled) {
433		kdebug_enable |= trace_type;
434		kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
435		kd_ctrl_page.enabled = 1;
436	} else {
437		kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
438		kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
439		kd_ctrl_page.enabled = 0;
440	}
441	lck_spin_unlock(kds_spin_lock);
442	ml_set_interrupts_enabled(s);
443
444	if (enabled) {
445		kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_ENABLED, NULL);
446	} else {
447		/*
448		 * If you do not flush the IOP trace buffers, they can linger
449		 * for a considerable period; consider code which disables and
450		 * deallocates without a final sync flush.
451		 */
452		kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_KDEBUG_DISABLED, NULL);
453		kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
454	}
455}
456
457static void
458kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled)
459{
460	int s = ml_set_interrupts_enabled(FALSE);
461	lck_spin_lock(kds_spin_lock);
462
463	if (enabled) {
464		kd_ctrl_page.kdebug_slowcheck |= slowflag;
465		kdebug_enable |= enableflag;
466	} else {
467		kd_ctrl_page.kdebug_slowcheck &= ~slowflag;
468		kdebug_enable &= ~enableflag;
469	}
470
471	lck_spin_unlock(kds_spin_lock);
472	ml_set_interrupts_enabled(s);
473}
474
475void
476disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags)
477{
478	int s = ml_set_interrupts_enabled(FALSE);
479	lck_spin_lock(kds_spin_lock);
480
481	*old_slowcheck = kd_ctrl_page.kdebug_slowcheck;
482	*old_flags = kd_ctrl_page.kdebug_flags;
483
484	kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
485	kd_ctrl_page.kdebug_flags |= KDBG_NOWRAP;
486
487	lck_spin_unlock(kds_spin_lock);
488	ml_set_interrupts_enabled(s);
489}
490
491void
492enable_wrap(uint32_t old_slowcheck, boolean_t lostevents)
493{
494	int s = ml_set_interrupts_enabled(FALSE);
495	lck_spin_lock(kds_spin_lock);
496
497	kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP;
498
499	if ( !(old_slowcheck & SLOW_NOLOG))
500		kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
501
502	if (lostevents == TRUE)
503		kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
504
505	lck_spin_unlock(kds_spin_lock);
506	ml_set_interrupts_enabled(s);
507}
508
509static int
510create_buffers(boolean_t early_trace)
511{
512        int	i;
513	int	p_buffer_size;
514	int	f_buffer_size;
515	int	f_buffers;
516	int	error = 0;
517
518	/*
519	 * For the duration of this allocation, trace code will only reference
520	 * kdebug_iops. Any iops registered after this enabling will not be
521	 * messaged until the buffers are reallocated.
522	 *
523	 * TLDR; Must read kd_iops once and only once!
524	 */
525	kd_ctrl_page.kdebug_iops = kd_iops;
526
527	assert(kdbg_iop_list_is_valid(kd_ctrl_page.kdebug_iops));
528
529	/*
530	 * If the list is valid, it is sorted, newest -> oldest. Each iop entry
531	 * has a cpu_id of "the older entry + 1", so the highest cpu_id will
532	 * be the list head + 1.
533	 */
534
535	kd_ctrl_page.kdebug_cpus = kd_ctrl_page.kdebug_iops ? kd_ctrl_page.kdebug_iops->cpu_id + 1 : kdbg_cpu_count(early_trace);
536
537	if (kmem_alloc(kernel_map, (vm_offset_t *)&kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus) != KERN_SUCCESS) {
538		error = ENOSPC;
539		goto out;
540	}
541
542	if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU))
543		n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU;
544	else
545		n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT;
546
547	nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT;
548
549	f_buffers = n_storage_units / N_STORAGE_UNITS_PER_BUFFER;
550	n_storage_buffers = f_buffers;
551
552	f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage);
553	p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage);
554
555	if (p_buffer_size)
556		n_storage_buffers++;
557
558	kd_bufs = NULL;
559
560	if (kdcopybuf == 0) {
561	        if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE) != KERN_SUCCESS) {
562			error = ENOSPC;
563			goto out;
564		}
565	}
566	if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers))) != KERN_SUCCESS) {
567		error = ENOSPC;
568		goto out;
569	}
570	bzero(kd_bufs, n_storage_buffers * sizeof(struct kd_storage_buffers));
571
572	for (i = 0; i < f_buffers; i++) {
573		if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)f_buffer_size) != KERN_SUCCESS) {
574			error = ENOSPC;
575			goto out;
576		}
577		bzero(kd_bufs[i].kdsb_addr, f_buffer_size);
578
579		kd_bufs[i].kdsb_size = f_buffer_size;
580	}
581	if (p_buffer_size) {
582		if (kmem_alloc(kernel_map, (vm_offset_t *)&kd_bufs[i].kdsb_addr, (vm_size_t)p_buffer_size) != KERN_SUCCESS) {
583			error = ENOSPC;
584			goto out;
585		}
586		bzero(kd_bufs[i].kdsb_addr, p_buffer_size);
587
588		kd_bufs[i].kdsb_size = p_buffer_size;
589	}
590	n_storage_units = 0;
591
592	for (i = 0; i < n_storage_buffers; i++) {
593		struct kd_storage *kds;
594		int	n_elements;
595		int	n;
596
597		n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage);
598		kds = kd_bufs[i].kdsb_addr;
599
600		for (n = 0; n < n_elements; n++) {
601			kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index;
602			kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset;
603
604			kd_ctrl_page.kds_free_list.buffer_index = i;
605			kd_ctrl_page.kds_free_list.offset = n;
606		}
607		n_storage_units += n_elements;
608	}
609
610	bzero((char *)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
611
612	for (i = 0; i < (int)kd_ctrl_page.kdebug_cpus; i++) {
613		kdbip[i].kd_list_head.raw = KDS_PTR_NULL;
614		kdbip[i].kd_list_tail.raw = KDS_PTR_NULL;
615		kdbip[i].kd_lostevents = FALSE;
616		kdbip[i].num_bufs = 0;
617	}
618
619	kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT;
620
621	kd_ctrl_page.kds_inuse_count = 0;
622	n_storage_threshold = n_storage_units / 2;
623out:
624	if (error)
625		delete_buffers();
626
627	return(error);
628}
629
630static void
631delete_buffers(void)
632{
633	int i;
634
635	if (kd_bufs) {
636		for (i = 0; i < n_storage_buffers; i++) {
637			if (kd_bufs[i].kdsb_addr) {
638				kmem_free(kernel_map, (vm_offset_t)kd_bufs[i].kdsb_addr, (vm_size_t)kd_bufs[i].kdsb_size);
639			}
640		}
641		kmem_free(kernel_map, (vm_offset_t)kd_bufs, (vm_size_t)(n_storage_buffers * sizeof(struct kd_storage_buffers)));
642
643		kd_bufs = NULL;
644		n_storage_buffers = 0;
645	}
646	if (kdcopybuf) {
647		kmem_free(kernel_map, (vm_offset_t)kdcopybuf, KDCOPYBUF_SIZE);
648
649		kdcopybuf = NULL;
650	}
651	kd_ctrl_page.kds_free_list.raw = KDS_PTR_NULL;
652
653	if (kdbip) {
654		kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus);
655
656		kdbip = NULL;
657	}
658        kd_ctrl_page.kdebug_iops = NULL;
659	kd_ctrl_page.kdebug_cpus = 0;
660	kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT;
661}
662
663void
664release_storage_unit(int cpu, uint32_t kdsp_raw)
665{
666	int s = 0;
667	struct	kd_storage *kdsp_actual;
668	struct kd_bufinfo *kdbp;
669	union kds_ptr kdsp;
670
671	kdsp.raw = kdsp_raw;
672
673	s = ml_set_interrupts_enabled(FALSE);
674	lck_spin_lock(kds_spin_lock);
675
676	kdbp = &kdbip[cpu];
677
678	if (kdsp.raw == kdbp->kd_list_head.raw) {
679		/*
680		 * it's possible for the storage unit pointed to
681		 * by kdsp to have already been stolen... so
682		 * check to see if it's still the head of the list
683		 * now that we're behind the lock that protects
684		 * adding and removing from the queue...
685		 * since we only ever release and steal units from
686		 * that position, if it's no longer the head
687		 * we having nothing to do in this context
688		 */
689		kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
690		kdbp->kd_list_head = kdsp_actual->kds_next;
691
692		kdsp_actual->kds_next = kd_ctrl_page.kds_free_list;
693		kd_ctrl_page.kds_free_list = kdsp;
694
695		kd_ctrl_page.kds_inuse_count--;
696	}
697	lck_spin_unlock(kds_spin_lock);
698	ml_set_interrupts_enabled(s);
699}
700
701
702boolean_t
703allocate_storage_unit(int cpu)
704{
705	union	kds_ptr kdsp;
706	struct	kd_storage *kdsp_actual, *kdsp_next_actual;
707	struct  kd_bufinfo *kdbp, *kdbp_vict, *kdbp_try;
708	uint64_t	oldest_ts, ts;
709	boolean_t	retval = TRUE;
710	int			s = 0;
711
712	s = ml_set_interrupts_enabled(FALSE);
713	lck_spin_lock(kds_spin_lock);
714
715	kdbp = &kdbip[cpu];
716
717	/* If someone beat us to the allocate, return success */
718	if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) {
719		kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail);
720
721		if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT)
722			goto out;
723	}
724
725	if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) {
726		kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
727		kd_ctrl_page.kds_free_list = kdsp_actual->kds_next;
728
729		kd_ctrl_page.kds_inuse_count++;
730	} else {
731		if (kd_ctrl_page.kdebug_flags & KDBG_NOWRAP) {
732			kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
733			kdbp->kd_lostevents = TRUE;
734			retval = FALSE;
735			goto out;
736		}
737		kdbp_vict = NULL;
738		oldest_ts = (uint64_t)-1;
739
740		for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) {
741
742			if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) {
743				/*
744				 * no storage unit to steal
745				 */
746				continue;
747			}
748
749			kdsp_actual = POINTER_FROM_KDS_PTR(kdbp_try->kd_list_head);
750
751			if (kdsp_actual->kds_bufcnt < EVENTS_PER_STORAGE_UNIT) {
752				/*
753				 * make sure we don't steal the storage unit
754				 * being actively recorded to...  need to
755				 * move on because we don't want an out-of-order
756				 * set of events showing up later
757				 */
758				continue;
759			}
760			ts = kdbg_get_timestamp(&kdsp_actual->kds_records[0]);
761
762			if (ts < oldest_ts) {
763				/*
764				 * when 'wrapping', we want to steal the
765				 * storage unit that has the 'earliest' time
766				 * associated with it (first event time)
767				 */
768				oldest_ts = ts;
769				kdbp_vict = kdbp_try;
770			}
771		}
772		if (kdbp_vict == NULL) {
773			kdebug_enable = 0;
774			kd_ctrl_page.enabled = 0;
775			retval = FALSE;
776			goto out;
777		}
778		kdsp = kdbp_vict->kd_list_head;
779		kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
780		kdbp_vict->kd_list_head = kdsp_actual->kds_next;
781
782		if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) {
783			kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head);
784			kdsp_next_actual->kds_lostevents = TRUE;
785		} else
786			kdbp_vict->kd_lostevents = TRUE;
787
788		kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED;
789	}
790	kdsp_actual->kds_timestamp = mach_absolute_time();
791	kdsp_actual->kds_next.raw = KDS_PTR_NULL;
792	kdsp_actual->kds_bufcnt	  = 0;
793	kdsp_actual->kds_readlast = 0;
794
795	kdsp_actual->kds_lostevents = kdbp->kd_lostevents;
796	kdbp->kd_lostevents = FALSE;
797	kdsp_actual->kds_bufindx  = 0;
798
799	if (kdbp->kd_list_head.raw == KDS_PTR_NULL)
800		kdbp->kd_list_head = kdsp;
801	else
802		POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp;
803	kdbp->kd_list_tail = kdsp;
804out:
805	lck_spin_unlock(kds_spin_lock);
806	ml_set_interrupts_enabled(s);
807
808	return (retval);
809}
810
811int
812kernel_debug_register_callback(kd_callback_t callback)
813{
814	kd_iop_t* iop;
815	if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t)) == KERN_SUCCESS) {
816		memcpy(&iop->callback, &callback, sizeof(kd_callback_t));
817
818		/*
819		 * <rdar://problem/13351477> Some IOP clients are not providing a name.
820		 *
821		 * Remove when fixed.
822		 */
823		{
824			boolean_t is_valid_name = FALSE;
825			for (uint32_t length=0; length<sizeof(callback.iop_name); ++length) {
826				/* This is roughly isprintable(c) */
827				if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F)
828					continue;
829				if (callback.iop_name[length] == 0) {
830					if (length)
831						is_valid_name = TRUE;
832					break;
833				}
834			}
835
836			if (!is_valid_name) {
837				strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name));
838			}
839		}
840
841		iop->last_timestamp = 0;
842
843		do {
844			/*
845			 * We use two pieces of state, the old list head
846			 * pointer, and the value of old_list_head->cpu_id.
847			 * If we read kd_iops more than once, it can change
848			 * between reads.
849			 *
850			 * TLDR; Must not read kd_iops more than once per loop.
851			 */
852			iop->next = kd_iops;
853			iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE);
854
855			/*
856			 * Header says OSCompareAndSwapPtr has a memory barrier
857			 */
858		} while (!OSCompareAndSwapPtr(iop->next, iop, (void* volatile*)&kd_iops));
859
860		return iop->cpu_id;
861	}
862
863	return 0;
864}
865
866void
867kernel_debug_enter(
868	uint32_t	coreid,
869	uint32_t	debugid,
870	uint64_t	timestamp,
871	uintptr_t	arg1,
872	uintptr_t	arg2,
873	uintptr_t	arg3,
874	uintptr_t	arg4,
875	uintptr_t	threadid
876	)
877{
878	uint32_t	bindx;
879	kd_buf		*kd;
880	struct kd_bufinfo *kdbp;
881	struct kd_storage *kdsp_actual;
882	union  kds_ptr kds_raw;
883
884	if (kd_ctrl_page.kdebug_slowcheck) {
885
886		if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
887			goto out1;
888
889		if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
890			if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
891				goto record_event;
892			goto out1;
893		}
894		else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
895			if (debugid >= kdlog_beg && debugid <= kdlog_end)
896				goto record_event;
897			goto out1;
898		}
899		else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
900			if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
901				(debugid & DBG_FUNC_MASK) != kdlog_value2 &&
902				(debugid & DBG_FUNC_MASK) != kdlog_value3 &&
903				(debugid & DBG_FUNC_MASK) != kdlog_value4)
904				goto out1;
905		}
906	}
907
908record_event:
909	assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page.kdebug_iops, coreid));
910	/* Remove when <rdar://problem/13512084> is closed. */
911	assert(kdbg_iop_list_check_for_timestamp_rollback(kd_ctrl_page.kdebug_iops, coreid, timestamp));
912
913	disable_preemption();
914
915	if (kd_ctrl_page.enabled == 0)
916		goto out;
917
918	kdbp = &kdbip[coreid];
919	timestamp &= KDBG_TIMESTAMP_MASK;
920
921retry_q:
922	kds_raw = kdbp->kd_list_tail;
923
924	if (kds_raw.raw != KDS_PTR_NULL) {
925		kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
926		bindx = kdsp_actual->kds_bufindx;
927	} else
928		kdsp_actual = NULL;
929
930	if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
931		if (allocate_storage_unit(coreid) == FALSE) {
932			/*
933			 * this can only happen if wrapping
934			 * has been disabled
935			 */
936			goto out;
937		}
938		goto retry_q;
939	}
940	if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
941		goto retry_q;
942
943	// IOP entries can be allocated before xnu allocates and inits the buffer
944	if (timestamp < kdsp_actual->kds_timestamp)
945		kdsp_actual->kds_timestamp = timestamp;
946
947	kd = &kdsp_actual->kds_records[bindx];
948
949	kd->debugid = debugid;
950	kd->arg1 = arg1;
951	kd->arg2 = arg2;
952	kd->arg3 = arg3;
953	kd->arg4 = arg4;
954	kd->arg5 = threadid;
955
956	kdbg_set_timestamp_and_cpu(kd, timestamp, coreid);
957
958	OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
959out:
960	enable_preemption();
961out1:
962	if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) {
963		boolean_t need_kds_wakeup = FALSE;
964		int	s;
965
966		/*
967		 * try to take the lock here to synchronize with the
968		 * waiter entering the blocked state... use the try
969		 * mode to prevent deadlocks caused by re-entering this
970		 * routine due to various trace points triggered in the
971		 * lck_spin_sleep_xxxx routines used to actually enter
972		 * our wait condition... no problem if we fail,
973		 * there will be lots of additional events coming in that
974		 * will eventually succeed in grabbing this lock
975		 */
976		s = ml_set_interrupts_enabled(FALSE);
977
978		if (lck_spin_try_lock(kdw_spin_lock)) {
979
980			if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
981				kds_waiter = 0;
982				need_kds_wakeup = TRUE;
983			}
984			lck_spin_unlock(kdw_spin_lock);
985
986			ml_set_interrupts_enabled(s);
987
988			if (need_kds_wakeup == TRUE)
989				wakeup(&kds_waiter);
990		}
991	}
992}
993
994
995
996void
997kernel_debug_internal(
998	uint32_t	debugid,
999	uintptr_t	arg1,
1000	uintptr_t	arg2,
1001	uintptr_t	arg3,
1002	uintptr_t	arg4,
1003	uintptr_t	arg5,
1004	int		entropy_flag);
1005
1006__attribute__((always_inline)) void
1007kernel_debug_internal(
1008	uint32_t	debugid,
1009	uintptr_t	arg1,
1010	uintptr_t	arg2,
1011	uintptr_t	arg3,
1012	uintptr_t	arg4,
1013	uintptr_t	arg5,
1014	int		entropy_flag)
1015{
1016	struct proc 	*curproc;
1017	uint64_t 	now;
1018	uint32_t	bindx;
1019	boolean_t	s;
1020	kd_buf		*kd;
1021	int		cpu;
1022	struct kd_bufinfo *kdbp;
1023	struct kd_storage *kdsp_actual;
1024	union  kds_ptr kds_raw;
1025
1026
1027
1028	if (kd_ctrl_page.kdebug_slowcheck) {
1029
1030		if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
1031			kd_chudhook_fn chudhook;
1032			/*
1033			 * Mask interrupts to minimize the interval across
1034			 * which the driver providing the hook could be
1035			 * unloaded.
1036			 */
1037			s = ml_set_interrupts_enabled(FALSE);
1038			chudhook = kdebug_chudhook;
1039			if (chudhook)
1040				chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
1041			ml_set_interrupts_enabled(s);
1042		}
1043		if ((kdebug_enable & KDEBUG_ENABLE_ENTROPY) && entropy_flag) {
1044
1045			now = mach_absolute_time();
1046
1047			s = ml_set_interrupts_enabled(FALSE);
1048			lck_spin_lock(kds_spin_lock);
1049
1050			if (kdebug_enable & KDEBUG_ENABLE_ENTROPY) {
1051
1052				if (kd_entropy_indx < kd_entropy_count)	{
1053					kd_entropy_buffer[kd_entropy_indx] = now;
1054					kd_entropy_indx++;
1055				}
1056				if (kd_entropy_indx == kd_entropy_count) {
1057					/*
1058					 * Disable entropy collection
1059					 */
1060					kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
1061					kd_ctrl_page.kdebug_slowcheck &= ~SLOW_ENTROPY;
1062				}
1063			}
1064			lck_spin_unlock(kds_spin_lock);
1065			ml_set_interrupts_enabled(s);
1066		}
1067		if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT)))
1068			goto out1;
1069
1070		if ( !ml_at_interrupt_context()) {
1071			if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) {
1072				/*
1073				 * If kdebug flag is not set for current proc, return
1074				 */
1075				curproc = current_proc();
1076
1077				if ((curproc && !(curproc->p_kdebug)) &&
1078				    ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1079				      (debugid >> 24 != DBG_TRACE))
1080					goto out1;
1081			}
1082			else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) {
1083				/*
1084				 * If kdebug flag is set for current proc, return
1085				 */
1086				curproc = current_proc();
1087
1088				if ((curproc && curproc->p_kdebug) &&
1089				    ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) &&
1090				      (debugid >> 24 != DBG_TRACE))
1091					goto out1;
1092			}
1093		}
1094
1095		if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1096			/* Always record trace system info */
1097			if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1098				goto record_event;
1099
1100			if (isset(type_filter_bitmap, EXTRACT_CSC(debugid)))
1101				goto record_event;
1102			goto out1;
1103		}
1104		else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) {
1105			/* Always record trace system info */
1106			if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1107				goto record_event;
1108
1109			if (debugid < kdlog_beg || debugid > kdlog_end)
1110				goto out1;
1111		}
1112		else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) {
1113			/* Always record trace system info */
1114			if (EXTRACT_CLASS(debugid) == DBG_TRACE)
1115				goto record_event;
1116
1117			if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
1118			    (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
1119			    (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
1120			    (debugid & DBG_FUNC_MASK) != kdlog_value4)
1121				goto out1;
1122		}
1123	}
1124record_event:
1125	disable_preemption();
1126
1127	if (kd_ctrl_page.enabled == 0)
1128		goto out;
1129
1130	cpu = cpu_number();
1131	kdbp = &kdbip[cpu];
1132retry_q:
1133	kds_raw = kdbp->kd_list_tail;
1134
1135	if (kds_raw.raw != KDS_PTR_NULL) {
1136		kdsp_actual = POINTER_FROM_KDS_PTR(kds_raw);
1137		bindx = kdsp_actual->kds_bufindx;
1138	} else
1139		kdsp_actual = NULL;
1140
1141	if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) {
1142		if (allocate_storage_unit(cpu) == FALSE) {
1143			/*
1144			 * this can only happen if wrapping
1145			 * has been disabled
1146			 */
1147			goto out;
1148		}
1149		goto retry_q;
1150	}
1151	now = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
1152
1153	if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx))
1154		goto retry_q;
1155
1156	kd = &kdsp_actual->kds_records[bindx];
1157
1158	kd->debugid = debugid;
1159	kd->arg1 = arg1;
1160	kd->arg2 = arg2;
1161	kd->arg3 = arg3;
1162	kd->arg4 = arg4;
1163	kd->arg5 = arg5;
1164
1165	kdbg_set_timestamp_and_cpu(kd, now, cpu);
1166
1167	OSAddAtomic(1, &kdsp_actual->kds_bufcnt);
1168out:
1169	enable_preemption();
1170out1:
1171	if ((kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) ||
1172	    (kde_waiter && kd_entropy_indx >= kd_entropy_count)) {
1173		uint32_t	etype;
1174		uint32_t	stype;
1175
1176		etype = debugid & DBG_FUNC_MASK;
1177		stype = debugid & DBG_SCALL_MASK;
1178
1179		if (etype == INTERRUPT || etype == MACH_vmfault ||
1180		    stype == BSC_SysCall || stype == MACH_SysCall) {
1181
1182			boolean_t need_kds_wakeup = FALSE;
1183			boolean_t need_kde_wakeup = FALSE;
1184
1185			/*
1186			 * try to take the lock here to synchronize with the
1187			 * waiter entering the blocked state... use the try
1188			 * mode to prevent deadlocks caused by re-entering this
1189			 * routine due to various trace points triggered in the
1190			 * lck_spin_sleep_xxxx routines used to actually enter
1191			 * one of our 2 wait conditions... no problem if we fail,
1192			 * there will be lots of additional events coming in that
1193			 * will eventually succeed in grabbing this lock
1194			 */
1195			s = ml_set_interrupts_enabled(FALSE);
1196
1197			if (lck_spin_try_lock(kdw_spin_lock)) {
1198
1199				if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) {
1200					kds_waiter = 0;
1201					need_kds_wakeup = TRUE;
1202				}
1203				if (kde_waiter && kd_entropy_indx >= kd_entropy_count) {
1204					kde_waiter = 0;
1205					need_kde_wakeup = TRUE;
1206				}
1207				lck_spin_unlock(kdw_spin_lock);
1208			}
1209			ml_set_interrupts_enabled(s);
1210
1211			if (need_kds_wakeup == TRUE)
1212				wakeup(&kds_waiter);
1213			if (need_kde_wakeup == TRUE)
1214				wakeup(&kde_waiter);
1215		}
1216	}
1217}
1218
1219void
1220kernel_debug(
1221	uint32_t	debugid,
1222	uintptr_t	arg1,
1223	uintptr_t	arg2,
1224	uintptr_t	arg3,
1225	uintptr_t	arg4,
1226	__unused uintptr_t arg5)
1227{
1228	kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, (uintptr_t)thread_tid(current_thread()), 1);
1229}
1230
1231void
1232kernel_debug1(
1233	uint32_t	debugid,
1234	uintptr_t	arg1,
1235	uintptr_t	arg2,
1236	uintptr_t	arg3,
1237	uintptr_t	arg4,
1238	uintptr_t	arg5)
1239{
1240	kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 1);
1241}
1242
1243/*
1244 * Support syscall SYS_kdebug_trace
1245 */
1246int
1247kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
1248{
1249	if ( __probable(kdebug_enable == 0) )
1250		return(EINVAL);
1251
1252	kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()), 0);
1253
1254	return(0);
1255}
1256
1257
1258static void
1259kdbg_lock_init(void)
1260{
1261	if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT)
1262		return;
1263
1264	/*
1265	 * allocate lock group attribute and group
1266	 */
1267	kd_trace_mtx_sysctl_grp_attr = lck_grp_attr_alloc_init();
1268	kd_trace_mtx_sysctl_grp = lck_grp_alloc_init("kdebug", kd_trace_mtx_sysctl_grp_attr);
1269
1270	/*
1271	 * allocate the lock attribute
1272	 */
1273	kd_trace_mtx_sysctl_attr = lck_attr_alloc_init();
1274
1275
1276	/*
1277	 * allocate and initialize mutex's
1278	 */
1279	kd_trace_mtx_sysctl = lck_mtx_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1280	kds_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1281	kdw_spin_lock = lck_spin_alloc_init(kd_trace_mtx_sysctl_grp, kd_trace_mtx_sysctl_attr);
1282
1283	kd_ctrl_page.kdebug_flags |= KDBG_LOCKINIT;
1284}
1285
1286
1287int
1288kdbg_bootstrap(boolean_t early_trace)
1289{
1290        kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED;
1291
1292	return (create_buffers(early_trace));
1293}
1294
1295int
1296kdbg_reinit(boolean_t early_trace)
1297{
1298	int ret = 0;
1299
1300	/*
1301	 * Disable trace collecting
1302	 * First make sure we're not in
1303	 * the middle of cutting a trace
1304	 */
1305	kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1306
1307	/*
1308	 * make sure the SLOW_NOLOG is seen
1309	 * by everyone that might be trying
1310	 * to cut a trace..
1311	 */
1312	IOSleep(100);
1313
1314	delete_buffers();
1315
1316	if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) {
1317		kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1318		kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1319		kd_mapsize = 0;
1320		kd_mapptr = (kd_threadmap *) 0;
1321		kd_mapcount = 0;
1322	}
1323	ret = kdbg_bootstrap(early_trace);
1324
1325	RAW_file_offset = 0;
1326	RAW_file_written = 0;
1327
1328	return(ret);
1329}
1330
1331void
1332kdbg_trace_data(struct proc *proc, long *arg_pid)
1333{
1334	if (!proc)
1335		*arg_pid = 0;
1336	else
1337		*arg_pid = proc->p_pid;
1338}
1339
1340
1341void
1342kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
1343{
1344	char *dbg_nameptr;
1345	int dbg_namelen;
1346	long dbg_parms[4];
1347
1348	if (!proc) {
1349		*arg1 = 0;
1350		*arg2 = 0;
1351		*arg3 = 0;
1352		*arg4 = 0;
1353		return;
1354	}
1355	/*
1356	 * Collect the pathname for tracing
1357	 */
1358	dbg_nameptr = proc->p_comm;
1359	dbg_namelen = (int)strlen(proc->p_comm);
1360	dbg_parms[0]=0L;
1361	dbg_parms[1]=0L;
1362	dbg_parms[2]=0L;
1363	dbg_parms[3]=0L;
1364
1365	if(dbg_namelen > (int)sizeof(dbg_parms))
1366		dbg_namelen = (int)sizeof(dbg_parms);
1367
1368	strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen);
1369
1370	*arg1=dbg_parms[0];
1371	*arg2=dbg_parms[1];
1372	*arg3=dbg_parms[2];
1373	*arg4=dbg_parms[3];
1374}
1375
1376static void
1377kdbg_resolve_map(thread_t th_act, void *opaque)
1378{
1379	kd_threadmap *mapptr;
1380	krt_t *t = (krt_t *)opaque;
1381
1382	if (t->count < t->maxcount) {
1383		mapptr = &t->map[t->count];
1384		mapptr->thread  = (uintptr_t)thread_tid(th_act);
1385
1386		(void) strlcpy (mapptr->command, t->atts->task_comm,
1387				sizeof(t->atts->task_comm));
1388		/*
1389		 * Some kernel threads have no associated pid.
1390		 * We still need to mark the entry as valid.
1391		 */
1392		if (t->atts->pid)
1393			mapptr->valid = t->atts->pid;
1394		else
1395			mapptr->valid = 1;
1396
1397		t->count++;
1398	}
1399}
1400
1401/*
1402 *
1403 * Writes a cpumap for the given iops_list/cpu_count to the provided buffer.
1404 *
1405 * You may provide a buffer and size, or if you set the buffer to NULL, a
1406 * buffer of sufficient size will be allocated.
1407 *
1408 * If you provide a buffer and it is too small, sets cpumap_size to the number
1409 * of bytes required and returns EINVAL.
1410 *
1411 * On success, if you provided a buffer, cpumap_size is set to the number of
1412 * bytes written. If you did not provide a buffer, cpumap is set to the newly
1413 * allocated buffer and cpumap_size is set to the number of bytes allocated.
1414 *
1415 * NOTE: It may seem redundant to pass both iops and a cpu_count.
1416 *
1417 * We may be reporting data from "now", or from the "past".
1418 *
1419 * The "now" data would be for something like kdbg_readcurcpumap().
1420 * The "past" data would be for kdbg_readcpumap().
1421 *
1422 * If we do not pass both iops and cpu_count, and iops is NULL, this function
1423 * will need to read "now" state to get the number of cpus, which would be in
1424 * error if we were reporting "past" state.
1425 */
1426
1427int
1428kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size)
1429{
1430	assert(cpumap);
1431	assert(cpumap_size);
1432	assert(cpu_count);
1433	assert(!iops || iops->cpu_id + 1 == cpu_count);
1434
1435	uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap);
1436	uint32_t bytes_available = *cpumap_size;
1437	*cpumap_size = bytes_needed;
1438
1439	if (*cpumap == NULL) {
1440		if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size) != KERN_SUCCESS) {
1441			return ENOMEM;
1442		}
1443	} else if (bytes_available < bytes_needed) {
1444		return EINVAL;
1445	}
1446
1447	kd_cpumap_header* header = (kd_cpumap_header*)(uintptr_t)*cpumap;
1448
1449	header->version_no = RAW_VERSION1;
1450	header->cpu_count = cpu_count;
1451
1452	kd_cpumap* cpus = (kd_cpumap*)&header[1];
1453
1454	int32_t index = cpu_count - 1;
1455	while (iops) {
1456		cpus[index].cpu_id = iops->cpu_id;
1457		cpus[index].flags = KDBG_CPUMAP_IS_IOP;
1458		bzero(cpus[index].name, sizeof(cpus->name));
1459		strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name));
1460
1461		iops = iops->next;
1462		index--;
1463	}
1464
1465	while (index >= 0) {
1466		cpus[index].cpu_id = index;
1467		cpus[index].flags = 0;
1468		bzero(cpus[index].name, sizeof(cpus->name));
1469		strlcpy(cpus[index].name, "AP", sizeof(cpus->name));
1470
1471		index--;
1472	}
1473
1474	return KERN_SUCCESS;
1475}
1476
1477void
1478kdbg_thrmap_init(void)
1479{
1480        if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT)
1481		return;
1482
1483	kd_mapptr = kdbg_thrmap_init_internal(0, &kd_mapsize, &kd_mapcount);
1484
1485	if (kd_mapptr)
1486		kd_ctrl_page.kdebug_flags |= KDBG_MAPINIT;
1487}
1488
1489
1490kd_threadmap* kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount)
1491{
1492	kd_threadmap	*mapptr;
1493	struct proc	*p;
1494	struct krt	akrt;
1495	int		tts_count;    /* number of task-to-string structures */
1496	struct tts	*tts_mapptr;
1497	unsigned int	tts_mapsize = 0;
1498	int		i;
1499	vm_offset_t	kaddr;
1500
1501	/*
1502	 * need to use PROC_SCANPROCLIST with proc_iterate
1503	 */
1504	proc_list_lock();
1505
1506	/*
1507	 * Calculate the sizes of map buffers
1508	 */
1509	for (p = allproc.lh_first, *mapcount=0, tts_count=0; p; p = p->p_list.le_next) {
1510		*mapcount += get_task_numacts((task_t)p->task);
1511		tts_count++;
1512	}
1513	proc_list_unlock();
1514
1515	/*
1516	 * The proc count could change during buffer allocation,
1517	 * so introduce a small fudge factor to bump up the
1518	 * buffer sizes. This gives new tasks some chance of
1519	 * making into the tables.  Bump up by 25%.
1520	 */
1521	*mapcount += *mapcount/4;
1522	tts_count += tts_count/4;
1523
1524	*mapsize = *mapcount * sizeof(kd_threadmap);
1525
1526	if (count && count < *mapcount)
1527		return (0);
1528
1529	if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize) == KERN_SUCCESS)) {
1530		bzero((void *)kaddr, *mapsize);
1531		mapptr = (kd_threadmap *)kaddr;
1532	} else
1533		return (0);
1534
1535	tts_mapsize = tts_count * sizeof(struct tts);
1536
1537	if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize) == KERN_SUCCESS)) {
1538		bzero((void *)kaddr, tts_mapsize);
1539		tts_mapptr = (struct tts *)kaddr;
1540	} else {
1541		kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize);
1542
1543		return (0);
1544	}
1545	/*
1546	 * We need to save the procs command string
1547	 * and take a reference for each task associated
1548	 * with a valid process
1549	 */
1550
1551	proc_list_lock();
1552
1553	/*
1554	 * should use proc_iterate
1555	 */
1556	for (p = allproc.lh_first, i=0; p && i < tts_count; p = p->p_list.le_next) {
1557		if (p->p_lflag & P_LEXIT)
1558			continue;
1559
1560		if (p->task) {
1561			task_reference(p->task);
1562			tts_mapptr[i].task = p->task;
1563			tts_mapptr[i].pid  = p->p_pid;
1564			(void)strlcpy(tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm));
1565			i++;
1566		}
1567	}
1568	tts_count = i;
1569
1570	proc_list_unlock();
1571
1572	/*
1573	 * Initialize thread map data
1574	 */
1575	akrt.map = mapptr;
1576	akrt.count = 0;
1577	akrt.maxcount = *mapcount;
1578
1579	for (i = 0; i < tts_count; i++) {
1580		akrt.atts = &tts_mapptr[i];
1581		task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
1582		task_deallocate((task_t) tts_mapptr[i].task);
1583	}
1584	kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
1585
1586	*mapcount = akrt.count;
1587
1588	return (mapptr);
1589}
1590
1591static void
1592kdbg_clear(void)
1593{
1594        /*
1595	 * Clean up the trace buffer
1596	 * First make sure we're not in
1597	 * the middle of cutting a trace
1598	 */
1599	kdbg_set_tracing_enabled(FALSE, KDEBUG_ENABLE_TRACE);
1600
1601	/*
1602	 * make sure the SLOW_NOLOG is seen
1603	 * by everyone that might be trying
1604	 * to cut a trace..
1605	 */
1606	IOSleep(100);
1607
1608        global_state_pid = -1;
1609	kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1610	kd_ctrl_page.kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
1611	kd_ctrl_page.kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
1612
1613	kdbg_disable_typefilter();
1614
1615	delete_buffers();
1616	nkdbufs	= 0;
1617
1618	/* Clean up the thread map buffer */
1619	kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
1620	if (kd_mapptr) {
1621		kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
1622		kd_mapptr = (kd_threadmap *) 0;
1623	}
1624	kd_mapsize = 0;
1625	kd_mapcount = 0;
1626
1627	RAW_file_offset = 0;
1628	RAW_file_written = 0;
1629}
1630
1631int
1632kdbg_setpid(kd_regtype *kdr)
1633{
1634	pid_t pid;
1635	int flag, ret=0;
1636	struct proc *p;
1637
1638	pid = (pid_t)kdr->value1;
1639	flag = (int)kdr->value2;
1640
1641	if (pid > 0) {
1642		if ((p = proc_find(pid)) == NULL)
1643			ret = ESRCH;
1644		else {
1645			if (flag == 1) {
1646				/*
1647				 * turn on pid check for this and all pids
1648				 */
1649				kd_ctrl_page.kdebug_flags |= KDBG_PIDCHECK;
1650				kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1651				kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1652
1653				p->p_kdebug = 1;
1654			} else {
1655				/*
1656				 * turn off pid check for this pid value
1657				 * Don't turn off all pid checking though
1658				 *
1659				 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1660				 */
1661				p->p_kdebug = 0;
1662			}
1663			proc_rele(p);
1664		}
1665	}
1666	else
1667		ret = EINVAL;
1668
1669	return(ret);
1670}
1671
1672/* This is for pid exclusion in the trace buffer */
1673int
1674kdbg_setpidex(kd_regtype *kdr)
1675{
1676	pid_t pid;
1677	int flag, ret=0;
1678	struct proc *p;
1679
1680	pid = (pid_t)kdr->value1;
1681	flag = (int)kdr->value2;
1682
1683	if (pid > 0) {
1684		if ((p = proc_find(pid)) == NULL)
1685			ret = ESRCH;
1686		else {
1687			if (flag == 1) {
1688				/*
1689				 * turn on pid exclusion
1690				 */
1691				kd_ctrl_page.kdebug_flags |= KDBG_PIDEXCLUDE;
1692				kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK;
1693				kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1694
1695				p->p_kdebug = 1;
1696			}
1697			else {
1698				/*
1699				 * turn off pid exclusion for this pid value
1700				 * Don't turn off all pid exclusion though
1701				 *
1702				 * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE;
1703				 */
1704				p->p_kdebug = 0;
1705			}
1706			proc_rele(p);
1707		}
1708	} else
1709		ret = EINVAL;
1710
1711	return(ret);
1712}
1713
1714
1715/*
1716 * This is for setting a maximum decrementer value
1717 */
1718int
1719kdbg_setrtcdec(kd_regtype *kdr)
1720{
1721	int ret = 0;
1722	natural_t decval;
1723
1724	decval = (natural_t)kdr->value1;
1725
1726	if (decval && decval < KDBG_MINRTCDEC)
1727		ret = EINVAL;
1728	else
1729		ret = ENOTSUP;
1730
1731	return(ret);
1732}
1733
1734int
1735kdbg_enable_typefilter(void)
1736{
1737	if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) {
1738		/* free the old filter */
1739		kdbg_disable_typefilter();
1740	}
1741
1742	if (kmem_alloc(kernel_map, (vm_offset_t *)&type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE) != KERN_SUCCESS) {
1743		return ENOSPC;
1744	}
1745
1746	bzero(type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1747
1748	/* Turn off range and value checks */
1749	kd_ctrl_page.kdebug_flags &= ~(KDBG_RANGECHECK | KDBG_VALCHECK);
1750
1751	/* Enable filter checking */
1752	kd_ctrl_page.kdebug_flags |= KDBG_TYPEFILTER_CHECK;
1753	kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1754	return 0;
1755}
1756
1757int
1758kdbg_disable_typefilter(void)
1759{
1760	/*  Disable filter checking */
1761	kd_ctrl_page.kdebug_flags &= ~KDBG_TYPEFILTER_CHECK;
1762
1763	/*  Turn off slow checks unless pid checks are using them */
1764	if ( (kd_ctrl_page.kdebug_flags & (KDBG_PIDCHECK | KDBG_PIDEXCLUDE)) )
1765		kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1766	else
1767		kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1768
1769	if(type_filter_bitmap == NULL)
1770		return 0;
1771
1772	vm_offset_t old_bitmap = (vm_offset_t)type_filter_bitmap;
1773	type_filter_bitmap = NULL;
1774
1775	kmem_free(kernel_map, old_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE);
1776	return 0;
1777}
1778
1779int
1780kdbg_setreg(kd_regtype * kdr)
1781{
1782	int ret=0;
1783	unsigned int val_1, val_2, val;
1784	switch (kdr->type) {
1785
1786	case KDBG_CLASSTYPE :
1787		val_1 = (kdr->value1 & 0xff);
1788		val_2 = (kdr->value2 & 0xff);
1789		kdlog_beg = (val_1<<24);
1790		kdlog_end = (val_2<<24);
1791		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1792		kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
1793		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1794		kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1795		break;
1796	case KDBG_SUBCLSTYPE :
1797		val_1 = (kdr->value1 & 0xff);
1798		val_2 = (kdr->value2 & 0xff);
1799		val = val_2 + 1;
1800		kdlog_beg = ((val_1<<24) | (val_2 << 16));
1801		kdlog_end = ((val_1<<24) | (val << 16));
1802		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1803		kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
1804		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1805		kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1806		break;
1807	case KDBG_RANGETYPE :
1808		kdlog_beg = (kdr->value1);
1809		kdlog_end = (kdr->value2);
1810		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1811		kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
1812		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1813		kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1814		break;
1815	case KDBG_VALCHECK:
1816		kdlog_value1 = (kdr->value1);
1817		kdlog_value2 = (kdr->value2);
1818		kdlog_value3 = (kdr->value3);
1819		kdlog_value4 = (kdr->value4);
1820		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1821		kd_ctrl_page.kdebug_flags &= ~KDBG_RANGECHECK;    /* Turn off range check */
1822		kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK;       /* Turn on specific value check  */
1823		kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1824		break;
1825	case KDBG_TYPENONE :
1826		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1827
1828		if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK   |
1829						   KDBG_PIDCHECK   | KDBG_PIDEXCLUDE |
1830						   KDBG_TYPEFILTER_CHECK)) )
1831			kdbg_set_flags(SLOW_CHECKS, 0, TRUE);
1832		else
1833			kdbg_set_flags(SLOW_CHECKS, 0, FALSE);
1834
1835		kdlog_beg = 0;
1836		kdlog_end = 0;
1837		break;
1838	default :
1839		ret = EINVAL;
1840		break;
1841	}
1842	return(ret);
1843}
1844
1845int
1846kdbg_getreg(__unused kd_regtype * kdr)
1847{
1848#if 0
1849	int i,j, ret=0;
1850	unsigned int val_1, val_2, val;
1851
1852	switch (kdr->type) {
1853	case KDBG_CLASSTYPE :
1854		val_1 = (kdr->value1 & 0xff);
1855		val_2 = val_1 + 1;
1856		kdlog_beg = (val_1<<24);
1857		kdlog_end = (val_2<<24);
1858		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1859		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
1860		break;
1861	case KDBG_SUBCLSTYPE :
1862		val_1 = (kdr->value1 & 0xff);
1863		val_2 = (kdr->value2 & 0xff);
1864		val = val_2 + 1;
1865		kdlog_beg = ((val_1<<24) | (val_2 << 16));
1866		kdlog_end = ((val_1<<24) | (val << 16));
1867		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1868		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
1869		break;
1870	case KDBG_RANGETYPE :
1871		kdlog_beg = (kdr->value1);
1872		kdlog_end = (kdr->value2);
1873		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1874		kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
1875		break;
1876	case KDBG_TYPENONE :
1877		kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
1878		kdlog_beg = 0;
1879		kdlog_end = 0;
1880		break;
1881	default :
1882		ret = EINVAL;
1883		break;
1884	}
1885#endif /* 0 */
1886	return(EINVAL);
1887}
1888
1889int
1890kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size)
1891{
1892	uint8_t* cpumap = NULL;
1893	uint32_t cpumap_size = 0;
1894	int ret = KERN_SUCCESS;
1895
1896	if (kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) {
1897		if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size) == KERN_SUCCESS) {
1898			if (user_cpumap) {
1899				size_t bytes_to_copy = (*user_cpumap_size >= cpumap_size) ? cpumap_size : *user_cpumap_size;
1900				if (copyout(cpumap, user_cpumap, (size_t)bytes_to_copy)) {
1901					ret = EFAULT;
1902				}
1903			}
1904			*user_cpumap_size = cpumap_size;
1905			kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size);
1906		} else
1907			ret = EINVAL;
1908	} else
1909		ret = EINVAL;
1910
1911	return (ret);
1912}
1913
1914int
1915kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize)
1916{
1917	kd_threadmap *mapptr;
1918	unsigned int mapsize;
1919	unsigned int mapcount;
1920	unsigned int count = 0;
1921	int ret = 0;
1922
1923	count = *bufsize/sizeof(kd_threadmap);
1924	*bufsize = 0;
1925
1926	if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) {
1927		if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap)))
1928			ret = EFAULT;
1929		else
1930			*bufsize = (mapcount * sizeof(kd_threadmap));
1931
1932		kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize);
1933	} else
1934		ret = EINVAL;
1935
1936	return (ret);
1937}
1938
1939int
1940kdbg_readthrmap(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
1941{
1942	int avail = *number;
1943	int ret = 0;
1944	uint32_t count = 0;
1945	unsigned int mapsize;
1946
1947	count = avail/sizeof (kd_threadmap);
1948
1949	mapsize = kd_mapcount * sizeof(kd_threadmap);
1950
1951	if (count && (count <= kd_mapcount))
1952	{
1953		if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
1954		{
1955			if (*number < mapsize)
1956				ret = EINVAL;
1957			else
1958			{
1959				if (vp)
1960				{
1961					RAW_header	header;
1962					clock_sec_t	secs;
1963					clock_usec_t	usecs;
1964					char	*pad_buf;
1965					uint32_t pad_size;
1966					uint32_t extra_thread_count = 0;
1967					uint32_t cpumap_size;
1968
1969					/*
1970					 * To write a RAW_VERSION1+ file, we
1971					 * must embed a cpumap in the "padding"
1972					 * used to page align the events folloing
1973					 * the threadmap. If the threadmap happens
1974					 * to not require enough padding, we
1975					 * artificially increase its footprint
1976					 * until it needs enough padding.
1977					 */
1978
1979					pad_size = PAGE_SIZE - ((sizeof(RAW_header) + (count * sizeof(kd_threadmap))) & PAGE_MASK_64);
1980					cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap);
1981
1982					if (cpumap_size > pad_size) {
1983						/* Force an overflow onto the next page, we get a full page of padding */
1984						extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1;
1985					}
1986
1987					header.version_no = RAW_VERSION1;
1988					header.thread_count = count + extra_thread_count;
1989
1990					clock_get_calendar_microtime(&secs, &usecs);
1991					header.TOD_secs = secs;
1992					header.TOD_usecs = usecs;
1993
1994					ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset,
1995						      UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
1996					if (ret)
1997						goto write_error;
1998					RAW_file_offset += sizeof(RAW_header);
1999
2000					ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, mapsize, RAW_file_offset,
2001						      UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2002					if (ret)
2003						goto write_error;
2004					RAW_file_offset += mapsize;
2005
2006					if (extra_thread_count) {
2007						pad_size = extra_thread_count * sizeof(kd_threadmap);
2008						pad_buf = (char *)kalloc(pad_size);
2009						memset(pad_buf, 0, pad_size);
2010
2011						ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2012							      UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2013						kfree(pad_buf, pad_size);
2014
2015						if (ret)
2016							goto write_error;
2017						RAW_file_offset += pad_size;
2018
2019					}
2020
2021					pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64);
2022					if (pad_size) {
2023						pad_buf = (char *)kalloc(pad_size);
2024						memset(pad_buf, 0, pad_size);
2025
2026						/*
2027						 * embed a cpumap in the padding bytes.
2028						 * older code will skip this.
2029						 * newer code will know how to read it.
2030						 */
2031						uint32_t temp = pad_size;
2032						if (kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, (uint8_t**)&pad_buf, &temp) != KERN_SUCCESS) {
2033							memset(pad_buf, 0, pad_size);
2034						}
2035
2036						ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset,
2037							      UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2038						kfree(pad_buf, pad_size);
2039
2040						if (ret)
2041							goto write_error;
2042						RAW_file_offset += pad_size;
2043					}
2044					RAW_file_written += sizeof(RAW_header) + mapsize + pad_size;
2045
2046				} else {
2047					if (copyout(kd_mapptr, buffer, mapsize))
2048						ret = EINVAL;
2049				}
2050			}
2051		}
2052		else
2053			ret = EINVAL;
2054	}
2055	else
2056		ret = EINVAL;
2057
2058	if (ret && vp)
2059	{
2060		count = 0;
2061
2062		vn_rdwr(UIO_WRITE, vp, (caddr_t)&count, sizeof(uint32_t), RAW_file_offset,
2063			UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2064		RAW_file_offset += sizeof(uint32_t);
2065		RAW_file_written += sizeof(uint32_t);
2066	}
2067write_error:
2068	if ((kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
2069	{
2070		kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
2071		kd_ctrl_page.kdebug_flags &= ~KDBG_MAPINIT;
2072		kd_mapsize = 0;
2073		kd_mapptr = (kd_threadmap *) 0;
2074		kd_mapcount = 0;
2075	}
2076	return(ret);
2077}
2078
2079int
2080kdbg_getentropy (user_addr_t buffer, size_t *number, int ms_timeout)
2081{
2082	int avail = *number;
2083	int ret = 0;
2084	int s;
2085	u_int64_t abstime;
2086	u_int64_t ns;
2087	int wait_result = THREAD_AWAKENED;
2088
2089
2090	if (kd_entropy_buffer)
2091		return(EBUSY);
2092
2093	if (ms_timeout < 0)
2094		return(EINVAL);
2095
2096	kd_entropy_count = avail/sizeof(uint64_t);
2097
2098	if (kd_entropy_count > MAX_ENTROPY_COUNT || kd_entropy_count == 0) {
2099		/*
2100		 * Enforce maximum entropy entries
2101		 */
2102		return(EINVAL);
2103	}
2104	kd_entropy_bufsize = kd_entropy_count * sizeof(uint64_t);
2105
2106	/*
2107	 * allocate entropy buffer
2108	 */
2109	if (kmem_alloc(kernel_map, &kd_entropy_buftomem, (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS) {
2110		kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
2111	} else {
2112		kd_entropy_buffer = (uint64_t *) 0;
2113		kd_entropy_count = 0;
2114
2115		return (ENOMEM);
2116	}
2117	kd_entropy_indx = 0;
2118
2119	KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_START, ms_timeout, kd_entropy_count, 0, 0, 0);
2120
2121	/*
2122	 * Enable entropy sampling
2123	 */
2124	kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, TRUE);
2125
2126	if (ms_timeout) {
2127		ns = (u_int64_t)ms_timeout * (u_int64_t)(1000 * 1000);
2128		nanoseconds_to_absolutetime(ns,  &abstime );
2129		clock_absolutetime_interval_to_deadline( abstime, &abstime );
2130	} else
2131		abstime = 0;
2132
2133	s = ml_set_interrupts_enabled(FALSE);
2134	lck_spin_lock(kdw_spin_lock);
2135
2136	while (wait_result == THREAD_AWAKENED && kd_entropy_indx < kd_entropy_count) {
2137
2138		kde_waiter = 1;
2139
2140		if (abstime) {
2141			/*
2142			 * wait for the specified timeout or
2143			 * until we've hit our sample limit
2144			 */
2145			wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE, abstime);
2146		} else {
2147			/*
2148			 * wait until we've hit our sample limit
2149			 */
2150			wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kde_waiter, THREAD_ABORTSAFE);
2151		}
2152		kde_waiter = 0;
2153	}
2154	lck_spin_unlock(kdw_spin_lock);
2155	ml_set_interrupts_enabled(s);
2156
2157	/*
2158	 * Disable entropy sampling
2159	 */
2160	kdbg_set_flags(SLOW_ENTROPY, KDEBUG_ENABLE_ENTROPY, FALSE);
2161
2162	KERNEL_DEBUG_CONSTANT(0xbbbbf000 | DBG_FUNC_END, ms_timeout, kd_entropy_indx, 0, 0, 0);
2163
2164	*number = 0;
2165	ret = 0;
2166
2167	if (kd_entropy_indx > 0) {
2168		/*
2169		 * copyout the buffer
2170		 */
2171		if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(uint64_t)))
2172			ret = EINVAL;
2173		else
2174			*number = kd_entropy_indx * sizeof(uint64_t);
2175	}
2176	/*
2177	 * Always cleanup
2178	 */
2179	kd_entropy_count = 0;
2180	kd_entropy_indx = 0;
2181	kd_entropy_buftomem = 0;
2182	kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
2183	kd_entropy_buffer = (uint64_t *) 0;
2184
2185	return(ret);
2186}
2187
2188
2189static int
2190kdbg_set_nkdbufs(unsigned int value)
2191{
2192        /*
2193	 * We allow a maximum buffer size of 50% of either ram or max mapped address, whichever is smaller
2194	 * 'value' is the desired number of trace entries
2195	 */
2196        unsigned int max_entries = (sane_size/2) / sizeof(kd_buf);
2197
2198	if (value <= max_entries)
2199		return (value);
2200	else
2201		return (max_entries);
2202}
2203
2204
2205static int
2206kdbg_enable_bg_trace(void)
2207{
2208	int ret = 0;
2209
2210	if (kdlog_bg_trace == TRUE && kdlog_bg_trace_running == FALSE && n_storage_buffers == 0) {
2211		nkdbufs = bg_nkdbufs;
2212		ret = kdbg_reinit(FALSE);
2213		if (0 == ret) {
2214			kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
2215			kdlog_bg_trace_running = TRUE;
2216		}
2217	}
2218	return ret;
2219}
2220
2221static void
2222kdbg_disable_bg_trace(void)
2223{
2224	if (kdlog_bg_trace_running == TRUE) {
2225		kdlog_bg_trace_running = FALSE;
2226		kdbg_clear();
2227	}
2228}
2229
2230
2231
2232/*
2233 * This function is provided for the CHUD toolkit only.
2234 *    int val:
2235 *        zero disables kdebug_chudhook function call
2236 *        non-zero enables kdebug_chudhook function call
2237 *    char *fn:
2238 *        address of the enabled kdebug_chudhook function
2239*/
2240
2241void
2242kdbg_control_chud(int val, void *fn)
2243{
2244	kdbg_lock_init();
2245
2246	if (val) {
2247		/* enable chudhook */
2248		kdebug_chudhook = fn;
2249		kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, TRUE);
2250	}
2251	else {
2252		/* disable chudhook */
2253		kdbg_set_flags(SLOW_CHUD, KDEBUG_ENABLE_CHUD, FALSE);
2254		kdebug_chudhook = 0;
2255	}
2256}
2257
2258
2259int
2260kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep)
2261{
2262	int ret = 0;
2263	size_t size = *sizep;
2264	unsigned int value = 0;
2265	kd_regtype kd_Reg;
2266	kbufinfo_t kd_bufinfo;
2267	pid_t curpid;
2268	proc_t p, curproc;
2269
2270	if (name[0] == KERN_KDGETENTROPY ||
2271		name[0] == KERN_KDWRITETR ||
2272		name[0] == KERN_KDWRITEMAP ||
2273		name[0] == KERN_KDEFLAGS ||
2274		name[0] == KERN_KDDFLAGS ||
2275		name[0] == KERN_KDENABLE ||
2276		name[0] == KERN_KDENABLE_BG_TRACE ||
2277		name[0] == KERN_KDSETBUF) {
2278
2279		if ( namelen < 2 )
2280			return(EINVAL);
2281		value = name[1];
2282	}
2283
2284	kdbg_lock_init();
2285
2286	if ( !(kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT))
2287		return(ENOSPC);
2288
2289	lck_mtx_lock(kd_trace_mtx_sysctl);
2290
2291	switch(name[0]) {
2292		case KERN_KDGETBUF:
2293			/*
2294			 * Does not alter the global_state_pid
2295			 * This is a passive request.
2296			 */
2297			if (size < sizeof(kd_bufinfo.nkdbufs)) {
2298				/*
2299				 * There is not enough room to return even
2300				 * the first element of the info structure.
2301				 */
2302				ret = EINVAL;
2303				goto out;
2304			}
2305			kd_bufinfo.nkdbufs = nkdbufs;
2306			kd_bufinfo.nkdthreads = kd_mapcount;
2307
2308			if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) )
2309				kd_bufinfo.nolog = 1;
2310			else
2311				kd_bufinfo.nolog = 0;
2312
2313			kd_bufinfo.flags = kd_ctrl_page.kdebug_flags;
2314#if defined(__LP64__)
2315			kd_bufinfo.flags |= KDBG_LP64;
2316#endif
2317			kd_bufinfo.bufid = global_state_pid;
2318
2319			if (size >= sizeof(kd_bufinfo)) {
2320				/*
2321				 * Provide all the info we have
2322				 */
2323				if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo)))
2324					ret = EINVAL;
2325			} else {
2326				/*
2327				 * For backwards compatibility, only provide
2328				 * as much info as there is room for.
2329				 */
2330				if (copyout(&kd_bufinfo, where, size))
2331					ret = EINVAL;
2332			}
2333			goto out;
2334			break;
2335
2336		case KERN_KDGETENTROPY:
2337			if (kd_entropy_buffer)
2338				ret = EBUSY;
2339			else
2340				ret = kdbg_getentropy(where, sizep, value);
2341			goto out;
2342			break;
2343
2344		case KERN_KDENABLE_BG_TRACE:
2345			bg_nkdbufs = kdbg_set_nkdbufs(value);
2346			kdlog_bg_trace = TRUE;
2347			ret = kdbg_enable_bg_trace();
2348			goto out;
2349			break;
2350
2351		case KERN_KDDISABLE_BG_TRACE:
2352			kdlog_bg_trace = FALSE;
2353			kdbg_disable_bg_trace();
2354			goto out;
2355			break;
2356	}
2357
2358	if ((curproc = current_proc()) != NULL)
2359		curpid = curproc->p_pid;
2360	else {
2361		ret = ESRCH;
2362		goto out;
2363	}
2364	if (global_state_pid == -1)
2365		global_state_pid = curpid;
2366	else if (global_state_pid != curpid) {
2367		if ((p = proc_find(global_state_pid)) == NULL) {
2368			/*
2369			 * The global pid no longer exists
2370			 */
2371			global_state_pid = curpid;
2372		} else {
2373			/*
2374			 * The global pid exists, deny this request
2375			 */
2376			proc_rele(p);
2377
2378			ret = EBUSY;
2379			goto out;
2380		}
2381	}
2382
2383	switch(name[0]) {
2384		case KERN_KDEFLAGS:
2385			kdbg_disable_bg_trace();
2386
2387			value &= KDBG_USERFLAGS;
2388			kd_ctrl_page.kdebug_flags |= value;
2389			break;
2390		case KERN_KDDFLAGS:
2391			kdbg_disable_bg_trace();
2392
2393			value &= KDBG_USERFLAGS;
2394			kd_ctrl_page.kdebug_flags &= ~value;
2395			break;
2396		case KERN_KDENABLE:
2397			/*
2398			 * Enable tracing mechanism.  Two types:
2399			 * KDEBUG_TRACE is the standard one,
2400			 * and KDEBUG_PPT which is a carefully
2401			 * chosen subset to avoid performance impact.
2402			 */
2403			if (value) {
2404				/*
2405				 * enable only if buffer is initialized
2406				 */
2407				if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ||
2408				    !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) {
2409					ret = EINVAL;
2410					break;
2411				}
2412				kdbg_thrmap_init();
2413
2414				kdbg_set_tracing_enabled(TRUE, value);
2415			}
2416			else
2417			{
2418				kdbg_set_tracing_enabled(FALSE, 0);
2419			}
2420			break;
2421		case KERN_KDSETBUF:
2422			kdbg_disable_bg_trace();
2423
2424			nkdbufs = kdbg_set_nkdbufs(value);
2425			break;
2426		case KERN_KDSETUP:
2427			kdbg_disable_bg_trace();
2428
2429			ret = kdbg_reinit(FALSE);
2430			break;
2431		case KERN_KDREMOVE:
2432			kdbg_clear();
2433			ret = kdbg_enable_bg_trace();
2434			break;
2435		case KERN_KDSETREG:
2436			if(size < sizeof(kd_regtype)) {
2437				ret = EINVAL;
2438				break;
2439			}
2440			if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2441				ret = EINVAL;
2442				break;
2443			}
2444			kdbg_disable_bg_trace();
2445
2446			ret = kdbg_setreg(&kd_Reg);
2447			break;
2448		case KERN_KDGETREG:
2449			if (size < sizeof(kd_regtype)) {
2450				ret = EINVAL;
2451				break;
2452			}
2453			ret = kdbg_getreg(&kd_Reg);
2454			if (copyout(&kd_Reg, where, sizeof(kd_regtype))) {
2455				ret = EINVAL;
2456			}
2457			kdbg_disable_bg_trace();
2458
2459			break;
2460		case KERN_KDREADTR:
2461			ret = kdbg_read(where, sizep, NULL, NULL);
2462			break;
2463		case KERN_KDWRITETR:
2464		case KERN_KDWRITEMAP:
2465		{
2466			struct	vfs_context context;
2467			struct	fileproc *fp;
2468			size_t	number;
2469			vnode_t	vp;
2470			int	fd;
2471
2472			kdbg_disable_bg_trace();
2473
2474			if (name[0] == KERN_KDWRITETR) {
2475				int s;
2476				int wait_result = THREAD_AWAKENED;
2477				u_int64_t abstime;
2478				u_int64_t ns;
2479
2480				if (*sizep) {
2481					ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2482					nanoseconds_to_absolutetime(ns,  &abstime );
2483					clock_absolutetime_interval_to_deadline( abstime, &abstime );
2484				} else
2485					abstime = 0;
2486
2487				s = ml_set_interrupts_enabled(FALSE);
2488				lck_spin_lock(kdw_spin_lock);
2489
2490				while (wait_result == THREAD_AWAKENED && kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2491
2492					kds_waiter = 1;
2493
2494					if (abstime)
2495						wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2496					else
2497						wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2498
2499					kds_waiter = 0;
2500				}
2501				lck_spin_unlock(kdw_spin_lock);
2502				ml_set_interrupts_enabled(s);
2503			}
2504			p = current_proc();
2505			fd = value;
2506
2507			proc_fdlock(p);
2508			if ( (ret = fp_lookup(p, fd, &fp, 1)) ) {
2509				proc_fdunlock(p);
2510				break;
2511			}
2512			context.vc_thread = current_thread();
2513			context.vc_ucred = fp->f_fglob->fg_cred;
2514
2515			if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) {
2516				fp_drop(p, fd, fp, 1);
2517				proc_fdunlock(p);
2518
2519				ret = EBADF;
2520				break;
2521			}
2522			vp = (struct vnode *)fp->f_fglob->fg_data;
2523			proc_fdunlock(p);
2524
2525			if ((ret = vnode_getwithref(vp)) == 0) {
2526
2527				if (name[0] == KERN_KDWRITETR) {
2528					number = nkdbufs * sizeof(kd_buf);
2529
2530					KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_START, 0, 0, 0, 0, 0);
2531					ret = kdbg_read(0, &number, vp, &context);
2532					KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 3)) | DBG_FUNC_END, number, 0, 0, 0, 0);
2533
2534					*sizep = number;
2535				} else {
2536					number = kd_mapcount * sizeof(kd_threadmap);
2537					kdbg_readthrmap(0, &number, vp, &context);
2538				}
2539				vnode_put(vp);
2540			}
2541			fp_drop(p, fd, fp, 0);
2542
2543			break;
2544		}
2545		case KERN_KDBUFWAIT:
2546		{
2547			/* WRITETR lite -- just block until there's data */
2548			int s;
2549			int wait_result = THREAD_AWAKENED;
2550			u_int64_t abstime;
2551			u_int64_t ns;
2552			size_t	number = 0;
2553
2554			kdbg_disable_bg_trace();
2555
2556
2557			if (*sizep) {
2558				ns = ((u_int64_t)*sizep) * (u_int64_t)(1000 * 1000);
2559				nanoseconds_to_absolutetime(ns,  &abstime );
2560				clock_absolutetime_interval_to_deadline( abstime, &abstime );
2561			} else
2562				abstime = 0;
2563
2564			s = ml_set_interrupts_enabled(FALSE);
2565			if( !s )
2566				panic("trying to wait with interrupts off");
2567			lck_spin_lock(kdw_spin_lock);
2568
2569			/* drop the mutex so don't exclude others from
2570			 * accessing trace
2571			 */
2572			lck_mtx_unlock(kd_trace_mtx_sysctl);
2573
2574			while (wait_result == THREAD_AWAKENED &&
2575				kd_ctrl_page.kds_inuse_count < n_storage_threshold) {
2576
2577				kds_waiter = 1;
2578
2579				if (abstime)
2580					wait_result = lck_spin_sleep_deadline(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE, abstime);
2581				else
2582					wait_result = lck_spin_sleep(kdw_spin_lock, 0, &kds_waiter, THREAD_ABORTSAFE);
2583
2584				kds_waiter = 0;
2585			}
2586
2587			/* check the count under the spinlock */
2588			number = (kd_ctrl_page.kds_inuse_count >= n_storage_threshold);
2589
2590			lck_spin_unlock(kdw_spin_lock);
2591			ml_set_interrupts_enabled(s);
2592
2593			/* pick the mutex back up again */
2594			lck_mtx_lock(kd_trace_mtx_sysctl);
2595
2596			/* write out whether we've exceeded the threshold */
2597			*sizep = number;
2598			break;
2599		}
2600		case KERN_KDPIDTR:
2601			if (size < sizeof(kd_regtype)) {
2602				ret = EINVAL;
2603				break;
2604			}
2605			if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2606				ret = EINVAL;
2607				break;
2608			}
2609			kdbg_disable_bg_trace();
2610
2611			ret = kdbg_setpid(&kd_Reg);
2612			break;
2613		case KERN_KDPIDEX:
2614			if (size < sizeof(kd_regtype)) {
2615				ret = EINVAL;
2616				break;
2617			}
2618			if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2619				ret = EINVAL;
2620				break;
2621			}
2622			kdbg_disable_bg_trace();
2623
2624			ret = kdbg_setpidex(&kd_Reg);
2625			break;
2626		case KERN_KDCPUMAP:
2627			ret = kdbg_readcpumap(where, sizep);
2628			break;
2629		case KERN_KDTHRMAP:
2630			ret = kdbg_readthrmap(where, sizep, NULL, NULL);
2631			break;
2632		case KERN_KDREADCURTHRMAP:
2633			ret = kdbg_readcurthrmap(where, sizep);
2634			break;
2635		case KERN_KDSETRTCDEC:
2636			if (size < sizeof(kd_regtype)) {
2637				ret = EINVAL;
2638				break;
2639			}
2640			if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
2641				ret = EINVAL;
2642				break;
2643			}
2644			kdbg_disable_bg_trace();
2645
2646			ret = kdbg_setrtcdec(&kd_Reg);
2647			break;
2648		case KERN_KDSET_TYPEFILTER:
2649			kdbg_disable_bg_trace();
2650
2651			if ((kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) == 0){
2652				if ((ret = kdbg_enable_typefilter()))
2653					break;
2654			}
2655
2656			if (size != KDBG_TYPEFILTER_BITMAP_SIZE) {
2657				ret = EINVAL;
2658				break;
2659			}
2660
2661			if (copyin(where, type_filter_bitmap, KDBG_TYPEFILTER_BITMAP_SIZE)) {
2662				ret = EINVAL;
2663				break;
2664			}
2665			kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, type_filter_bitmap);
2666			break;
2667		default:
2668			ret = EINVAL;
2669	}
2670out:
2671	lck_mtx_unlock(kd_trace_mtx_sysctl);
2672
2673	return(ret);
2674}
2675
2676
2677/*
2678 * This code can run for the most part concurrently with kernel_debug_internal()...
2679 * 'release_storage_unit' will take the kds_spin_lock which may cause us to briefly
2680 * synchronize with the recording side of this puzzle... otherwise, we are able to
2681 * move through the lists w/o use of any locks
2682 */
2683int
2684kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx)
2685{
2686	unsigned int count;
2687	unsigned int cpu, min_cpu;
2688	uint64_t  mintime, t, barrier = 0;
2689	int error = 0;
2690	kd_buf *tempbuf;
2691	uint32_t rcursor;
2692	kd_buf lostevent;
2693	union kds_ptr kdsp;
2694	struct kd_storage *kdsp_actual;
2695	struct kd_bufinfo *kdbp;
2696	struct kd_bufinfo *min_kdbp;
2697	uint32_t tempbuf_count;
2698	uint32_t tempbuf_number;
2699	uint32_t old_kdebug_flags;
2700	uint32_t old_kdebug_slowcheck;
2701	boolean_t lostevents = FALSE;
2702	boolean_t out_of_events = FALSE;
2703
2704	count = *number/sizeof(kd_buf);
2705	*number = 0;
2706
2707	if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0)
2708		return EINVAL;
2709
2710	memset(&lostevent, 0, sizeof(lostevent));
2711	lostevent.debugid = TRACEDBG_CODE(DBG_TRACE_INFO, 2);
2712
2713	/* Capture timestamp. Only sort events that have occured before the timestamp.
2714	 * Since the iop is being flushed here, its possible that events occur on the AP
2715	 * while running live tracing. If we are disabled, no new events should
2716	 * occur on the AP.
2717	*/
2718
2719	if (kd_ctrl_page.enabled)
2720	{
2721		// timestamp is non-zero value
2722		barrier = mach_absolute_time() & KDBG_TIMESTAMP_MASK;
2723	}
2724
2725	// Request each IOP to provide us with up to date entries before merging buffers together.
2726	kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_SYNC_FLUSH, NULL);
2727
2728	/*
2729	 * because we hold kd_trace_mtx_sysctl, no other control threads can
2730	 * be playing with kdebug_flags... the code that cuts new events could
2731	 * be running, but it grabs kds_spin_lock if it needs to acquire a new
2732	 * storage chunk which is where it examines kdebug_flags... it its adding
2733	 * to the same chunk we're reading from, no problem...
2734	 */
2735
2736	disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags);
2737
2738	if (count > nkdbufs)
2739		count = nkdbufs;
2740
2741	if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2742	        tempbuf_count = KDCOPYBUF_COUNT;
2743
2744	while (count) {
2745		tempbuf = kdcopybuf;
2746		tempbuf_number = 0;
2747
2748		// While space
2749		while (tempbuf_count) {
2750			mintime = 0xffffffffffffffffULL;
2751			min_kdbp = NULL;
2752			min_cpu = 0;
2753
2754			// Check all CPUs
2755			for (cpu = 0, kdbp = &kdbip[0]; cpu < kd_ctrl_page.kdebug_cpus; cpu++, kdbp++) {
2756
2757				// Find one with raw data
2758				if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL)
2759				        continue;
2760				/* Debugging aid: maintain a copy of the "kdsp"
2761				 * index.
2762				 */
2763				volatile union kds_ptr kdsp_shadow;
2764
2765				kdsp_shadow = kdsp;
2766
2767				// Get from cpu data to buffer header to buffer
2768				kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2769
2770				volatile struct kd_storage *kdsp_actual_shadow;
2771
2772				kdsp_actual_shadow = kdsp_actual;
2773
2774				// See if there are actual data left in this buffer
2775				rcursor = kdsp_actual->kds_readlast;
2776
2777				if (rcursor == kdsp_actual->kds_bufindx)
2778					continue;
2779
2780				t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]);
2781
2782				if ((t > barrier) && (barrier > 0)) {
2783					/*
2784					 * Need to wait to flush iop again before we
2785					 * sort any more data from the buffers
2786					*/
2787					out_of_events = TRUE;
2788					break;
2789				}
2790				if (t < kdsp_actual->kds_timestamp) {
2791					/*
2792					 * indicates we've not yet completed filling
2793					 * in this event...
2794					 * this should only occur when we're looking
2795					 * at the buf that the record head is utilizing
2796					 * we'll pick these events up on the next
2797					 * call to kdbg_read
2798					 * we bail at this point so that we don't
2799					 * get an out-of-order timestream by continuing
2800					 * to read events from the other CPUs' timestream(s)
2801					 */
2802					out_of_events = TRUE;
2803					break;
2804				}
2805				if (t < mintime) {
2806				        mintime = t;
2807					min_kdbp = kdbp;
2808					min_cpu = cpu;
2809				}
2810			}
2811			if (min_kdbp == NULL || out_of_events == TRUE) {
2812				/*
2813				 * all buffers ran empty
2814				 */
2815				out_of_events = TRUE;
2816				break;
2817			}
2818
2819			// Get data
2820			kdsp = min_kdbp->kd_list_head;
2821			kdsp_actual = POINTER_FROM_KDS_PTR(kdsp);
2822
2823			if (kdsp_actual->kds_lostevents == TRUE) {
2824				kdbg_set_timestamp_and_cpu(&lostevent, kdsp_actual->kds_records[kdsp_actual->kds_readlast].timestamp, min_cpu);
2825				*tempbuf = lostevent;
2826
2827				kdsp_actual->kds_lostevents = FALSE;
2828				lostevents = TRUE;
2829
2830				goto nextevent;
2831			}
2832
2833			// Copy into buffer
2834			*tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++];
2835
2836			if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT)
2837				release_storage_unit(min_cpu, kdsp.raw);
2838
2839			/*
2840			 * Watch for out of order timestamps
2841			 */
2842			if (mintime < min_kdbp->kd_prev_timebase) {
2843				/*
2844				 * if so, use the previous timestamp + 1 cycle
2845				 */
2846				min_kdbp->kd_prev_timebase++;
2847				kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf));
2848			} else
2849				min_kdbp->kd_prev_timebase = mintime;
2850nextevent:
2851			tempbuf_count--;
2852			tempbuf_number++;
2853			tempbuf++;
2854
2855			if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE)
2856				break;
2857		}
2858		if (tempbuf_number) {
2859
2860			if (vp) {
2861				error = vn_rdwr(UIO_WRITE, vp, (caddr_t)kdcopybuf, tempbuf_number * sizeof(kd_buf), RAW_file_offset,
2862						UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx));
2863
2864				RAW_file_offset += (tempbuf_number * sizeof(kd_buf));
2865
2866				if (RAW_file_written >= RAW_FLUSH_SIZE) {
2867					cluster_push(vp, 0);
2868
2869					RAW_file_written = 0;
2870				}
2871			} else {
2872				error = copyout(kdcopybuf, buffer, tempbuf_number * sizeof(kd_buf));
2873				buffer += (tempbuf_number * sizeof(kd_buf));
2874			}
2875			if (error) {
2876				*number = 0;
2877				error = EINVAL;
2878				break;
2879			}
2880			count   -= tempbuf_number;
2881			*number += tempbuf_number;
2882		}
2883		if (out_of_events == TRUE)
2884		       /*
2885			* all trace buffers are empty
2886			*/
2887		        break;
2888
2889		if ((tempbuf_count = count) > KDCOPYBUF_COUNT)
2890		        tempbuf_count = KDCOPYBUF_COUNT;
2891	}
2892	if ( !(old_kdebug_flags & KDBG_NOWRAP)) {
2893		enable_wrap(old_kdebug_slowcheck, lostevents);
2894	}
2895	return (error);
2896}
2897
2898
2899unsigned char *getProcName(struct proc *proc);
2900unsigned char *getProcName(struct proc *proc) {
2901
2902	return (unsigned char *) &proc->p_comm;	/* Return pointer to the proc name */
2903
2904}
2905
2906#define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
2907#define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
2908#if defined(__i386__) || defined (__x86_64__)
2909#define TRAP_DEBUGGER __asm__ volatile("int3");
2910#else
2911#error No TRAP_DEBUGGER definition for this architecture
2912#endif
2913
2914#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024)
2915#define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024)
2916
2917/* Initialize the mutex governing access to the stack snapshot subsystem */
2918__private_extern__ void
2919stackshot_lock_init( void )
2920{
2921	stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
2922
2923	stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr);
2924
2925	stackshot_subsys_lck_attr = lck_attr_alloc_init();
2926
2927	lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr);
2928}
2929
2930/*
2931 * stack_snapshot:   Obtains a coherent set of stack traces for all threads
2932 *		     on the system, tracing both kernel and user stacks
2933 *		     where available. Uses machine specific trace routines
2934 *		     for ppc, ppc64 and x86.
2935 * Inputs:	     uap->pid - process id of process to be traced, or -1
2936 *		     for the entire system
2937 *		     uap->tracebuf - address of the user space destination
2938 *		     buffer
2939 *		     uap->tracebuf_size - size of the user space trace buffer
2940 *		     uap->options - various options, including the maximum
2941 *		     number of frames to trace.
2942 * Outputs:	     EPERM if the caller is not privileged
2943 *		     EINVAL if the supplied trace buffer isn't sanely sized
2944 *		     ENOMEM if we don't have enough memory to satisfy the
2945 *		     request
2946 *		     ENOENT if the target pid isn't found
2947 *		     ENOSPC if the supplied buffer is insufficient
2948 *		     *retval contains the number of bytes traced, if successful
2949 *		     and -1 otherwise. If the request failed due to
2950 *		     tracebuffer exhaustion, we copyout as much as possible.
2951 */
2952int
2953stack_snapshot(struct proc *p, register struct stack_snapshot_args *uap, int32_t *retval) {
2954	int error = 0;
2955
2956	if ((error = suser(kauth_cred_get(), &p->p_acflag)))
2957                return(error);
2958
2959	return stack_snapshot2(uap->pid, uap->tracebuf, uap->tracebuf_size,
2960	    uap->flags, uap->dispatch_offset, retval);
2961}
2962
2963int
2964stack_snapshot_from_kernel(pid_t pid, void *buf, uint32_t size, uint32_t flags, unsigned *bytesTraced)
2965{
2966	int error = 0;
2967	boolean_t istate;
2968
2969	if ((buf == NULL) || (size <= 0) || (bytesTraced == NULL)) {
2970		return -1;
2971	}
2972
2973	/* cap in individual stackshot to SANE_TRACEBUF_SIZE */
2974	if (size > SANE_TRACEBUF_SIZE) {
2975		size = SANE_TRACEBUF_SIZE;
2976	}
2977
2978/* Serialize tracing */
2979	STACKSHOT_SUBSYS_LOCK();
2980	istate = ml_set_interrupts_enabled(FALSE);
2981
2982
2983/* Preload trace parameters*/
2984	kdp_snapshot_preflight(pid, buf, size, flags, 0);
2985
2986/* Trap to the debugger to obtain a coherent stack snapshot; this populates
2987 * the trace buffer
2988 */
2989	TRAP_DEBUGGER;
2990
2991	ml_set_interrupts_enabled(istate);
2992
2993	*bytesTraced = kdp_stack_snapshot_bytes_traced();
2994
2995	error = kdp_stack_snapshot_geterror();
2996
2997	STACKSHOT_SUBSYS_UNLOCK();
2998
2999    return error;
3000
3001}
3002
3003int
3004stack_snapshot2(pid_t pid, user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, uint32_t dispatch_offset, int32_t *retval)
3005{
3006	boolean_t istate;
3007	int error = 0;
3008	unsigned bytesTraced = 0;
3009
3010#if CONFIG_TELEMETRY
3011	if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
3012		telemetry_global_ctl(1);
3013		*retval = 0;
3014		return (0);
3015	} else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
3016		telemetry_global_ctl(0);
3017		*retval = 0;
3018		return (0);
3019	}
3020#endif
3021
3022	*retval = -1;
3023/* Serialize tracing */
3024	STACKSHOT_SUBSYS_LOCK();
3025
3026	if (tracebuf_size <= 0) {
3027		error = EINVAL;
3028		goto error_exit;
3029	}
3030
3031#if CONFIG_TELEMETRY
3032	if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
3033
3034		if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3035			error = EINVAL;
3036			goto error_exit;
3037		}
3038
3039		bytesTraced = tracebuf_size;
3040		error = telemetry_gather(tracebuf, &bytesTraced,
3041		                         (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
3042		if (error == KERN_NO_SPACE) {
3043			error = ENOSPC;
3044		}
3045
3046		*retval = (int)bytesTraced;
3047		goto error_exit;
3048	}
3049
3050	if (flags & STACKSHOT_GET_BOOT_PROFILE) {
3051
3052		if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) {
3053			error = EINVAL;
3054			goto error_exit;
3055		}
3056
3057		bytesTraced = tracebuf_size;
3058		error = bootprofile_gather(tracebuf, &bytesTraced);
3059		if (error == KERN_NO_SPACE) {
3060			error = ENOSPC;
3061		}
3062
3063		*retval = (int)bytesTraced;
3064		goto error_exit;
3065	}
3066#endif
3067
3068	if (tracebuf_size > SANE_TRACEBUF_SIZE) {
3069		error = EINVAL;
3070		goto error_exit;
3071	}
3072
3073	assert(stackshot_snapbuf == NULL);
3074	if (kmem_alloc_kobject(kernel_map, (vm_offset_t *)&stackshot_snapbuf, tracebuf_size) != KERN_SUCCESS) {
3075		error = ENOMEM;
3076		goto error_exit;
3077	}
3078
3079	if (panic_active()) {
3080		error = ENOMEM;
3081		goto error_exit;
3082	}
3083
3084	istate = ml_set_interrupts_enabled(FALSE);
3085/* Preload trace parameters*/
3086	kdp_snapshot_preflight(pid, stackshot_snapbuf, tracebuf_size, flags, dispatch_offset);
3087
3088/* Trap to the debugger to obtain a coherent stack snapshot; this populates
3089 * the trace buffer
3090 */
3091
3092	TRAP_DEBUGGER;
3093
3094	ml_set_interrupts_enabled(istate);
3095
3096	bytesTraced = kdp_stack_snapshot_bytes_traced();
3097
3098	if (bytesTraced > 0) {
3099		if ((error = copyout(stackshot_snapbuf, tracebuf,
3100			((bytesTraced < tracebuf_size) ?
3101			    bytesTraced : tracebuf_size))))
3102			goto error_exit;
3103		*retval = bytesTraced;
3104	}
3105	else {
3106		error = ENOENT;
3107		goto error_exit;
3108	}
3109
3110	error = kdp_stack_snapshot_geterror();
3111	if (error == -1) {
3112		error = ENOSPC;
3113		*retval = -1;
3114		goto error_exit;
3115	}
3116
3117error_exit:
3118	if (stackshot_snapbuf != NULL)
3119		kmem_free(kernel_map, (vm_offset_t) stackshot_snapbuf, tracebuf_size);
3120	stackshot_snapbuf = NULL;
3121	STACKSHOT_SUBSYS_UNLOCK();
3122	return error;
3123}
3124
3125void
3126start_kern_tracing(unsigned int new_nkdbufs, boolean_t need_map) {
3127
3128	if (!new_nkdbufs)
3129		return;
3130	nkdbufs = kdbg_set_nkdbufs(new_nkdbufs);
3131	kdbg_lock_init();
3132
3133	if (0 == kdbg_reinit(TRUE)) {
3134
3135		if (need_map == TRUE) {
3136			uint32_t old1, old2;
3137
3138			kdbg_thrmap_init();
3139
3140			disable_wrap(&old1, &old2);
3141		}
3142		kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE);
3143
3144#if defined(__i386__) || defined(__x86_64__)
3145		uint64_t now = mach_absolute_time();
3146
3147        KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 1)) | DBG_FUNC_NONE,
3148                              (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time,
3149                              (uint32_t)(now >> 32), (uint32_t)now,
3150                              0);
3151#endif
3152		printf("kernel tracing started\n");
3153	} else {
3154		printf("error from kdbg_reinit,kernel tracing not started\n");
3155	}
3156}
3157
3158void
3159kdbg_dump_trace_to_file(const char *filename)
3160{
3161	vfs_context_t	ctx;
3162	vnode_t		vp;
3163	int		error;
3164	size_t		number;
3165
3166
3167	if ( !(kdebug_enable & KDEBUG_ENABLE_TRACE))
3168		return;
3169
3170        if (global_state_pid != -1) {
3171		if ((proc_find(global_state_pid)) != NULL) {
3172			/*
3173			 * The global pid exists, we're running
3174			 * due to fs_usage, latency, etc...
3175			 * don't cut the panic/shutdown trace file
3176			 * Disable tracing from this point to avoid
3177			 * perturbing state.
3178			 */
3179			kdebug_enable = 0;
3180			kd_ctrl_page.enabled = 0;
3181			return;
3182		}
3183	}
3184	KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_INFO, 0)) | DBG_FUNC_NONE, 0, 0, 0, 0, 0);
3185
3186	kdebug_enable = 0;
3187	kd_ctrl_page.enabled = 0;
3188
3189	ctx = vfs_context_kernel();
3190
3191	if ((error = vnode_open(filename, (O_CREAT | FWRITE | O_NOFOLLOW), 0600, 0, &vp, ctx)))
3192		return;
3193
3194	number = kd_mapcount * sizeof(kd_threadmap);
3195	kdbg_readthrmap(0, &number, vp, ctx);
3196
3197	number = nkdbufs*sizeof(kd_buf);
3198	kdbg_read(0, &number, vp, ctx);
3199
3200	vnode_close(vp, FWRITE, ctx);
3201
3202	sync(current_proc(), (void *)NULL, (int *)NULL);
3203}
3204
3205/* Helper function for filling in the BSD name for an address space
3206 * Defined here because the machine bindings know only Mach threads
3207 * and nothing about BSD processes.
3208 *
3209 * FIXME: need to grab a lock during this?
3210 */
3211void kdbg_get_task_name(char* name_buf, int len, task_t task)
3212{
3213	proc_t proc;
3214
3215	/* Note: we can't use thread->task (and functions that rely on it) here
3216	 * because it hasn't been initialized yet when this function is called.
3217	 * We use the explicitly-passed task parameter instead.
3218	 */
3219	proc = get_bsdtask_info(task);
3220	if (proc != PROC_NULL)
3221		snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid);
3222	else
3223		snprintf(name_buf, len, "%p [!bsd]", task);
3224}
3225