1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/vm_param.h>
31#include <sys/appleapiopts.h>
32#include <kern/debug.h>
33#include <uuid/uuid.h>
34
35#include <kdp/kdp_internal.h>
36#include <kdp/kdp_private.h>
37#include <kdp/kdp_core.h>
38#include <kdp/kdp_dyld.h>
39
40#include <libsa/types.h>
41#include <libkern/version.h>
42
43#include <string.h> /* bcopy */
44
45#include <kern/processor.h>
46#include <kern/thread.h>
47#include <kern/clock.h>
48#include <vm/vm_map.h>
49#include <vm/vm_kern.h>
50#include <vm/vm_pageout.h>
51#include <vm/vm_shared_region.h>
52#include <libkern/OSKextLibPrivate.h>
53
54extern int count_busy_buffers(void);   /* must track with declaration in bsd/sys/buf_internal.h */
55
56#define DO_ALIGN	1	/* align all packet data accesses */
57
58#define KDP_TEST_HARNESS 0
59#if KDP_TEST_HARNESS
60#define dprintf(x) kprintf x
61#else
62#define dprintf(x)
63#endif
64
65static kdp_dispatch_t
66    dispatch_table[KDP_INVALID_REQUEST-KDP_CONNECT] =
67    {
68/* 0 */	kdp_connect,
69/* 1 */	kdp_disconnect,
70/* 2 */	kdp_hostinfo,
71/* 3 */	kdp_version,
72/* 4 */	kdp_maxbytes,
73/* 5 */	kdp_readmem,
74/* 6 */	kdp_writemem,
75/* 7 */	kdp_readregs,
76/* 8 */	kdp_writeregs,
77/* 9 */ kdp_unknown,
78/* A */ kdp_unknown,
79/* B */	kdp_suspend,
80/* C */	kdp_resumecpus,
81/* D */	kdp_unknown,
82/* E */ kdp_unknown,
83/* F */ kdp_breakpoint_set,
84/*10 */ kdp_breakpoint_remove,
85/*11 */	kdp_regions,
86/*12 */ kdp_reattach,
87/*13 */ kdp_reboot,
88/*14 */ kdp_readmem64,
89/*15 */ kdp_writemem64,
90/*16 */ kdp_breakpoint64_set,
91/*17 */ kdp_breakpoint64_remove,
92/*18 */ kdp_kernelversion,
93/*19 */ kdp_readphysmem64,
94/*1A */ kdp_writephysmem64,
95/*1B */ kdp_readioport,
96/*1C */ kdp_writeioport,
97/*1D */ kdp_readmsr64,
98/*1E */ kdp_writemsr64,
99/*1F */ kdp_dumpinfo,
100    };
101
102kdp_glob_t	kdp;
103
104#define MAX_BREAKPOINTS 100
105
106/*
107 * Version 11 of the KDP Protocol adds support for 64-bit wide memory
108 * addresses (read/write and breakpoints) as well as a dedicated
109 * kernelversion request. Version 12 adds read/writing of physical
110 * memory with 64-bit wide memory addresses.
111 */
112#define KDP_VERSION 12
113
114typedef struct{
115	mach_vm_address_t	address;
116	uint32_t	bytesused;
117	uint8_t		oldbytes[MAX_BREAKINSN_BYTES];
118} kdp_breakpoint_record_t;
119
120static kdp_breakpoint_record_t breakpoint_list[MAX_BREAKPOINTS];
121static unsigned int breakpoints_initialized = 0;
122
123int reattach_wait = 0;
124int noresume_on_disconnect = 0;
125extern unsigned int return_on_panic;
126
127typedef struct thread_snapshot *thread_snapshot_t;
128typedef struct task_snapshot *task_snapshot_t;
129
130extern int
131machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
132extern int
133machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
134extern int
135proc_pid(void *p);
136extern uint64_t
137proc_uniqueid(void *p);
138extern uint64_t
139proc_was_throttled(void *p);
140extern uint64_t
141proc_did_throttle(void *p);
142
143extern void
144proc_name_kdp(task_t  task, char *buf, int size);
145
146extern void
147kdp_snapshot_postflight(void);
148
149static int
150pid_from_task(task_t task);
151
152static uint64_t
153proc_uniqueid_from_task(task_t task);
154
155kdp_error_t
156kdp_set_breakpoint_internal(
157							   mach_vm_address_t	address
158							   );
159
160kdp_error_t
161kdp_remove_breakpoint_internal(
162							   mach_vm_address_t	address
163							   );
164
165
166int
167kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced);
168
169boolean_t kdp_copyin(pmap_t, uint64_t, void *, size_t);
170extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
171
172boolean_t
173kdp_packet(
174    unsigned char	*pkt,
175    int			*len,
176    unsigned short	*reply_port
177)
178{
179    static unsigned	aligned_pkt[1538/sizeof(unsigned)+1]; // max ether pkt
180    kdp_pkt_t		*rd = (kdp_pkt_t *)&aligned_pkt;
181    size_t		plen = *len;
182    kdp_req_t		req;
183    boolean_t		ret;
184
185#if DO_ALIGN
186    bcopy((char *)pkt, (char *)rd, sizeof(aligned_pkt));
187#else
188    rd = (kdp_pkt_t *)pkt;
189#endif
190    if (plen < sizeof (rd->hdr) || rd->hdr.len != plen) {
191	printf("kdp_packet bad len pkt %lu hdr %d\n", plen, rd->hdr.len);
192
193	return (FALSE);
194    }
195
196    if (rd->hdr.is_reply) {
197	printf("kdp_packet reply recvd req %x seq %x\n",
198	    rd->hdr.request, rd->hdr.seq);
199
200	return (FALSE);
201    }
202
203    req = rd->hdr.request;
204    if (req >= KDP_INVALID_REQUEST) {
205	printf("kdp_packet bad request %x len %d seq %x key %x\n",
206	    rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
207
208	return (FALSE);
209    }
210
211    ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port));
212#if DO_ALIGN
213    bcopy((char *)rd, (char *) pkt, *len);
214#endif
215    return ret;
216}
217
218static boolean_t
219kdp_unknown(
220    kdp_pkt_t		*pkt,
221    __unused int	*len,
222    __unused unsigned short	*reply_port
223)
224{
225    kdp_pkt_t		*rd = (kdp_pkt_t *)pkt;
226
227    printf("kdp_unknown request %x len %d seq %x key %x\n",
228	rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key);
229
230    return (FALSE);
231}
232
233static boolean_t
234kdp_connect(
235    kdp_pkt_t		*pkt,
236    int			*len,
237    unsigned short	*reply_port
238)
239{
240    kdp_connect_req_t	*rq = &pkt->connect_req;
241    size_t		plen = *len;
242    kdp_connect_reply_t	*rp = &pkt->connect_reply;
243    uint16_t            rport, eport;
244    uint32_t            key;
245    uint8_t             seq;
246
247    if (plen < sizeof (*rq))
248	return (FALSE);
249
250    dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting));
251
252    rport = rq->req_reply_port;
253    eport = rq->exc_note_port;
254    key   = rq->hdr.key;
255    seq   = rq->hdr.seq;
256    if (kdp.is_conn) {
257	if ((seq == kdp.conn_seq) &&	/* duplicate request */
258            (rport == kdp.reply_port) &&
259            (eport == kdp.exception_port) &&
260            (key == kdp.session_key))
261	    rp->error = KDPERR_NO_ERROR;
262	else
263	    rp->error = KDPERR_ALREADY_CONNECTED;
264    }
265    else {
266	kdp.reply_port     = rport;
267	kdp.exception_port = eport;
268	kdp.is_conn        = TRUE;
269	kdp.conn_seq       = seq;
270        kdp.session_key    = key;
271
272	rp->error = KDPERR_NO_ERROR;
273    }
274
275    rp->hdr.is_reply = 1;
276    rp->hdr.len = sizeof (*rp);
277
278    *reply_port = rport;
279    *len = rp->hdr.len;
280
281    if (current_debugger == KDP_CUR_DB)
282    	active_debugger=1;
283
284    return (TRUE);
285}
286
287static boolean_t
288kdp_disconnect(
289    kdp_pkt_t		*pkt,
290    int			*len,
291    unsigned short	*reply_port
292)
293{
294    kdp_disconnect_req_t	*rq = &pkt->disconnect_req;
295    size_t			plen = *len;
296    kdp_disconnect_reply_t	*rp = &pkt->disconnect_reply;
297
298    if (plen < sizeof (*rq))
299	return (FALSE);
300
301    if (!kdp.is_conn)
302	return (FALSE);
303
304    dprintf(("kdp_disconnect\n"));
305
306    *reply_port = kdp.reply_port;
307
308    kdp.reply_port = kdp.exception_port = 0;
309    kdp.is_halted = kdp.is_conn = FALSE;
310    kdp.exception_seq = kdp.conn_seq = 0;
311    kdp.session_key = 0;
312
313    if ((panicstr != NULL) && (return_on_panic == 0))
314	reattach_wait = 1;
315
316    if (noresume_on_disconnect == 1) {
317	reattach_wait = 1;
318	noresume_on_disconnect = 0;
319    }
320
321    rp->hdr.is_reply = 1;
322    rp->hdr.len = sizeof (*rp);
323
324    *len = rp->hdr.len;
325
326    if (current_debugger == KDP_CUR_DB)
327    	active_debugger=0;
328
329    return (TRUE);
330}
331
332static boolean_t
333kdp_reattach(
334    kdp_pkt_t		*pkt,
335    int			*len,
336    unsigned short	*reply_port
337)
338{
339    kdp_reattach_req_t            *rq = &pkt->reattach_req;
340
341    kdp.is_conn = TRUE;
342    kdp_disconnect(pkt, len, reply_port);
343    *reply_port = rq->req_reply_port;
344    reattach_wait = 1;
345    return (TRUE);
346}
347
348static boolean_t
349kdp_hostinfo(
350    kdp_pkt_t		*pkt,
351    int			*len,
352    unsigned short	*reply_port
353)
354{
355    kdp_hostinfo_req_t	*rq = &pkt->hostinfo_req;
356    size_t		plen = *len;
357    kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply;
358
359    if (plen < sizeof (*rq))
360	return (FALSE);
361
362    dprintf(("kdp_hostinfo\n"));
363
364    rp->hdr.is_reply = 1;
365    rp->hdr.len = sizeof (*rp);
366
367    kdp_machine_hostinfo(&rp->hostinfo);
368
369    *reply_port = kdp.reply_port;
370    *len = rp->hdr.len;
371
372    return (TRUE);
373}
374
375static boolean_t
376kdp_kernelversion(
377    kdp_pkt_t		*pkt,
378    int			*len,
379    unsigned short	*reply_port
380)
381{
382    kdp_kernelversion_req_t	*rq = &pkt->kernelversion_req;
383    size_t		plen = *len;
384    kdp_kernelversion_reply_t *rp = &pkt->kernelversion_reply;
385	size_t		slen;
386
387    if (plen < sizeof (*rq))
388		return (FALSE);
389
390    rp->hdr.is_reply = 1;
391    rp->hdr.len = sizeof (*rp);
392
393    dprintf(("kdp_kernelversion\n"));
394    slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE);
395
396    rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */
397
398    *reply_port = kdp.reply_port;
399    *len = rp->hdr.len;
400
401    return (TRUE);
402}
403
404static boolean_t
405kdp_suspend(
406    kdp_pkt_t		*pkt,
407    int			*len,
408    unsigned short	*reply_port
409)
410{
411    kdp_suspend_req_t	*rq = &pkt->suspend_req;
412    size_t		plen = *len;
413    kdp_suspend_reply_t *rp = &pkt->suspend_reply;
414
415    if (plen < sizeof (*rq))
416	return (FALSE);
417
418    rp->hdr.is_reply = 1;
419    rp->hdr.len = sizeof (*rp);
420
421    dprintf(("kdp_suspend\n"));
422
423    kdp.is_halted = TRUE;
424
425    *reply_port = kdp.reply_port;
426    *len = rp->hdr.len;
427
428    return (TRUE);
429}
430
431static boolean_t
432kdp_resumecpus(
433    kdp_pkt_t		*pkt,
434    int			*len,
435    unsigned short	*reply_port
436)
437{
438    kdp_resumecpus_req_t	*rq = &pkt->resumecpus_req;
439    size_t			plen = *len;
440    kdp_resumecpus_reply_t 	*rp = &pkt->resumecpus_reply;
441
442    if (plen < sizeof (*rq))
443	return (FALSE);
444
445    rp->hdr.is_reply = 1;
446    rp->hdr.len = sizeof (*rp);
447
448    dprintf(("kdp_resumecpus %x\n", rq->cpu_mask));
449
450    kdp.is_halted = FALSE;
451
452    *reply_port = kdp.reply_port;
453    *len = rp->hdr.len;
454
455    return (TRUE);
456}
457
458static boolean_t
459kdp_writemem(
460    kdp_pkt_t		*pkt,
461    int			*len,
462    unsigned short	*reply_port
463)
464{
465    kdp_writemem_req_t	*rq = &pkt->writemem_req;
466    size_t		plen = *len;
467    kdp_writemem_reply_t *rp = &pkt->writemem_reply;
468    mach_vm_size_t 	cnt;
469
470    if (plen < sizeof (*rq))
471	return (FALSE);
472
473    if (rq->nbytes > MAX_KDP_DATA_SIZE)
474	rp->error = KDPERR_BAD_NBYTES;
475    else {
476	dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes));
477	cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, rq->nbytes);
478	rp->error = KDPERR_ACCESS(rq->nbytes, cnt);
479	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
480    }
481
482    rp->hdr.is_reply = 1;
483    rp->hdr.len = sizeof (*rp);
484
485    *reply_port = kdp.reply_port;
486    *len = rp->hdr.len;
487
488    return (TRUE);
489}
490
491static boolean_t
492kdp_writemem64(
493    kdp_pkt_t		*pkt,
494    int			*len,
495    unsigned short	*reply_port
496)
497{
498    kdp_writemem64_req_t	*rq = &pkt->writemem64_req;
499    size_t		plen = *len;
500    kdp_writemem64_reply_t *rp = &pkt->writemem64_reply;
501    mach_vm_size_t 		cnt;
502
503    if (plen < sizeof (*rq))
504		return (FALSE);
505
506    if (rq->nbytes > MAX_KDP_DATA_SIZE)
507	rp->error = KDPERR_BAD_NBYTES;
508    else {
509	dprintf(("kdp_writemem64 addr %llx size %d\n", rq->address, rq->nbytes));
510	cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, (mach_vm_size_t)rq->nbytes);
511	rp->error = KDPERR_ACCESS(rq->nbytes, cnt);
512	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
513    }
514
515    rp->hdr.is_reply = 1;
516    rp->hdr.len = sizeof (*rp);
517
518    *reply_port = kdp.reply_port;
519    *len = rp->hdr.len;
520
521    return (TRUE);
522}
523
524static boolean_t
525kdp_writephysmem64(
526    kdp_pkt_t		*pkt,
527    int			*len,
528    unsigned short	*reply_port
529)
530{
531    kdp_writephysmem64_req_t	*rq = &pkt->writephysmem64_req;
532    size_t		plen = *len;
533    kdp_writephysmem64_reply_t *rp = &pkt->writephysmem64_reply;
534    mach_vm_size_t 		cnt;
535    unsigned int		size;
536
537    if (plen < sizeof (*rq))
538	return (FALSE);
539
540    size = rq->nbytes;
541    if (size > MAX_KDP_DATA_SIZE)
542	rp->error = KDPERR_BAD_NBYTES;
543    else {
544	dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq->address, size));
545	cnt = kdp_machine_phys_write(rq, rq->data, rq->lcpu);
546	rp->error = KDPERR_ACCESS(size, cnt);
547	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
548    }
549
550    rp->hdr.is_reply = 1;
551    rp->hdr.len = sizeof (*rp);
552
553    *reply_port = kdp.reply_port;
554    *len = rp->hdr.len;
555
556    return (TRUE);
557}
558
559static boolean_t
560kdp_readmem(
561    kdp_pkt_t		*pkt,
562    int			*len,
563    unsigned short	*reply_port
564)
565{
566    kdp_readmem_req_t	*rq = &pkt->readmem_req;
567    size_t		plen = *len;
568    kdp_readmem_reply_t *rp = &pkt->readmem_reply;
569    mach_vm_size_t	cnt;
570    unsigned int	size;
571
572    if (plen < sizeof (*rq))
573	return (FALSE);
574
575    rp->hdr.is_reply = 1;
576    rp->hdr.len = sizeof (*rp);
577
578    size = rq->nbytes;
579    if (size > MAX_KDP_DATA_SIZE)
580	rp->error = KDPERR_BAD_NBYTES;
581    else {
582	dprintf(("kdp_readmem addr %x size %d\n", rq->address, size));
583	cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes);
584	rp->error = KDPERR_ACCESS(size, cnt);
585	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
586
587	rp->hdr.len += cnt;
588    }
589
590    *reply_port = kdp.reply_port;
591    *len = rp->hdr.len;
592
593    return (TRUE);
594}
595
596static boolean_t
597kdp_readmem64(
598    kdp_pkt_t		*pkt,
599    int			*len,
600    unsigned short	*reply_port
601)
602{
603    kdp_readmem64_req_t	*rq = &pkt->readmem64_req;
604    size_t		plen = *len;
605    kdp_readmem64_reply_t *rp = &pkt->readmem64_reply;
606    mach_vm_size_t	cnt;
607    unsigned int	size;
608
609    if (plen < sizeof (*rq))
610		return (FALSE);
611
612    rp->hdr.is_reply = 1;
613    rp->hdr.len = sizeof (*rp);
614
615    size = rq->nbytes;
616    if (size > MAX_KDP_DATA_SIZE)
617	rp->error = KDPERR_BAD_NBYTES;
618    else {
619	dprintf(("kdp_readmem64 addr %llx size %d\n", rq->address, size));
620	cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes);
621	rp->error = KDPERR_ACCESS(size, cnt);
622	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
623
624	rp->hdr.len += cnt;
625    }
626
627    *reply_port = kdp.reply_port;
628    *len = rp->hdr.len;
629
630    return (TRUE);
631}
632
633static boolean_t
634kdp_readphysmem64(
635    kdp_pkt_t		*pkt,
636    int			*len,
637    unsigned short	*reply_port
638)
639{
640    kdp_readphysmem64_req_t	*rq = &pkt->readphysmem64_req;
641    size_t		plen = *len;
642    kdp_readphysmem64_reply_t *rp = &pkt->readphysmem64_reply;
643    mach_vm_size_t	cnt;
644    unsigned int	size;
645
646    if (plen < sizeof (*rq))
647	return (FALSE);
648
649    rp->hdr.is_reply = 1;
650    rp->hdr.len = sizeof (*rp);
651
652    size = rq->nbytes;
653    if (size > MAX_KDP_DATA_SIZE)
654	rp->error = KDPERR_BAD_NBYTES;
655    else {
656	dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq->address, size));
657	cnt = kdp_machine_phys_read(rq, rp->data, rq->lcpu);
658	rp->error = KDPERR_ACCESS(size, cnt);
659	dprintf(("  cnt %lld error %d\n", cnt, rp->error));
660
661	rp->hdr.len += cnt;
662    }
663
664    *reply_port = kdp.reply_port;
665    *len = rp->hdr.len;
666
667    return (TRUE);
668}
669
670static boolean_t
671kdp_maxbytes(
672    kdp_pkt_t		*pkt,
673    int			*len,
674    unsigned short	*reply_port
675)
676{
677    kdp_maxbytes_req_t	*rq = &pkt->maxbytes_req;
678    size_t		plen = *len;
679    kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply;
680
681    if (plen < sizeof (*rq))
682	return (FALSE);
683
684    rp->hdr.is_reply = 1;
685    rp->hdr.len = sizeof (*rp);
686
687    dprintf(("kdp_maxbytes\n"));
688
689    rp->max_bytes = MAX_KDP_DATA_SIZE;
690
691    *reply_port = kdp.reply_port;
692    *len = rp->hdr.len;
693
694    return (TRUE);
695}
696
697static boolean_t
698kdp_version(
699    kdp_pkt_t		*pkt,
700    int			*len,
701    unsigned short	*reply_port
702)
703{
704    kdp_version_req_t	*rq = &pkt->version_req;
705    size_t		plen = *len;
706    kdp_version_reply_t *rp = &pkt->version_reply;
707
708    if (plen < sizeof (*rq))
709	return (FALSE);
710
711    rp->hdr.is_reply = 1;
712    rp->hdr.len = sizeof (*rp);
713
714    dprintf(("kdp_version\n"));
715
716    rp->version = KDP_VERSION;
717    if (!(kdp_flag & KDP_BP_DIS))
718      rp->feature = KDP_FEATURE_BP;
719    else
720      rp->feature = 0;
721
722    *reply_port = kdp.reply_port;
723    *len = rp->hdr.len;
724
725    return (TRUE);
726}
727
728static boolean_t
729kdp_regions(
730    kdp_pkt_t		*pkt,
731    int			*len,
732    unsigned short	*reply_port
733)
734{
735    kdp_regions_req_t	*rq = &pkt->regions_req;
736    size_t		plen = *len;
737    kdp_regions_reply_t *rp = &pkt->regions_reply;
738    kdp_region_t	*r;
739
740    if (plen < sizeof (*rq))
741	return (FALSE);
742
743    rp->hdr.is_reply = 1;
744    rp->hdr.len = sizeof (*rp);
745
746    dprintf(("kdp_regions\n"));
747
748    r = rp->regions;
749    rp->nregions = 0;
750
751    r->address = 0;
752    r->nbytes = 0xffffffff;
753
754    r->protection = VM_PROT_ALL; r++; rp->nregions++;
755
756    rp->hdr.len += rp->nregions * sizeof (kdp_region_t);
757
758    *reply_port = kdp.reply_port;
759    *len = rp->hdr.len;
760
761    return (TRUE);
762}
763
764static boolean_t
765kdp_writeregs(
766    kdp_pkt_t		*pkt,
767    int			*len,
768    unsigned short	*reply_port
769)
770{
771    kdp_writeregs_req_t	*rq = &pkt->writeregs_req;
772    size_t		plen = *len;
773    int			size;
774    kdp_writeregs_reply_t *rp = &pkt->writeregs_reply;
775
776    if (plen < sizeof (*rq))
777	return (FALSE);
778
779    size = rq->hdr.len - (unsigned)sizeof(kdp_hdr_t) - (unsigned)sizeof(unsigned int);
780    rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size);
781
782    rp->hdr.is_reply = 1;
783    rp->hdr.len = sizeof (*rp);
784
785    *reply_port = kdp.reply_port;
786    *len = rp->hdr.len;
787
788    return (TRUE);
789}
790
791static boolean_t
792kdp_readregs(
793    kdp_pkt_t		*pkt,
794    int			*len,
795    unsigned short	*reply_port
796)
797{
798    kdp_readregs_req_t	*rq = &pkt->readregs_req;
799    size_t		plen = *len;
800    kdp_readregs_reply_t *rp = &pkt->readregs_reply;
801    int			size;
802
803    if (plen < sizeof (*rq))
804	return (FALSE);
805
806    rp->hdr.is_reply = 1;
807    rp->hdr.len = sizeof (*rp);
808
809    rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size);
810    rp->hdr.len += size;
811
812    *reply_port = kdp.reply_port;
813    *len = rp->hdr.len;
814
815    return (TRUE);
816}
817
818
819boolean_t
820kdp_breakpoint_set(
821    kdp_pkt_t		*pkt,
822    int			*len,
823    unsigned short	*reply_port
824)
825{
826	kdp_breakpoint_req_t	*rq = &pkt->breakpoint_req;
827	kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
828	size_t		plen = *len;
829	kdp_error_t	kerr;
830
831	if (plen < sizeof (*rq))
832		return (FALSE);
833
834	dprintf(("kdp_breakpoint_set %x\n", rq->address));
835
836	kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
837
838	rp->error = kerr;
839
840	rp->hdr.is_reply = 1;
841	rp->hdr.len = sizeof (*rp);
842	*reply_port = kdp.reply_port;
843	*len = rp->hdr.len;
844
845	return (TRUE);
846}
847
848boolean_t
849kdp_breakpoint64_set(
850    kdp_pkt_t		*pkt,
851    int			*len,
852    unsigned short	*reply_port
853)
854{
855	kdp_breakpoint64_req_t	*rq = &pkt->breakpoint64_req;
856	kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
857	size_t		plen = *len;
858	kdp_error_t	kerr;
859
860	if (plen < sizeof (*rq))
861		return (FALSE);
862
863	dprintf(("kdp_breakpoint64_set %llx\n", rq->address));
864
865	kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address);
866
867	rp->error = kerr;
868
869	rp->hdr.is_reply = 1;
870	rp->hdr.len = sizeof (*rp);
871	*reply_port = kdp.reply_port;
872	*len = rp->hdr.len;
873
874	return (TRUE);
875}
876
877boolean_t
878kdp_breakpoint_remove(
879    kdp_pkt_t		*pkt,
880    int			*len,
881    unsigned short	*reply_port
882)
883{
884	kdp_breakpoint_req_t	*rq = &pkt->breakpoint_req;
885	kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply;
886	size_t		plen = *len;
887	kdp_error_t	kerr;
888	if (plen < sizeof (*rq))
889		return (FALSE);
890
891	dprintf(("kdp_breakpoint_remove %x\n", rq->address));
892
893	kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
894
895	rp->error = kerr;
896
897	rp->hdr.is_reply = 1;
898	rp->hdr.len = sizeof (*rp);
899	*reply_port = kdp.reply_port;
900	*len = rp->hdr.len;
901
902	return (TRUE);
903}
904
905boolean_t
906kdp_breakpoint64_remove(
907    kdp_pkt_t		*pkt,
908    int			*len,
909    unsigned short	*reply_port
910)
911{
912	kdp_breakpoint64_req_t	*rq = &pkt->breakpoint64_req;
913	kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply;
914	size_t		plen = *len;
915	kdp_error_t	kerr;
916
917	if (plen < sizeof (*rq))
918		return (FALSE);
919
920	dprintf(("kdp_breakpoint64_remove %llx\n", rq->address));
921
922	kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address);
923
924	rp->error = kerr;
925
926	rp->hdr.is_reply = 1;
927	rp->hdr.len = sizeof (*rp);
928	*reply_port = kdp.reply_port;
929	*len = rp->hdr.len;
930
931	return (TRUE);
932}
933
934
935kdp_error_t
936kdp_set_breakpoint_internal(
937    mach_vm_address_t	address
938)
939{
940
941	uint8_t		breakinstr[MAX_BREAKINSN_BYTES], oldinstr[MAX_BREAKINSN_BYTES];
942	uint32_t	breakinstrsize = sizeof(breakinstr);
943	mach_vm_size_t	cnt;
944	int			i;
945
946	kdp_machine_get_breakinsn(breakinstr, &breakinstrsize);
947
948	if(breakpoints_initialized == 0)
949    {
950		for(i=0;(i < MAX_BREAKPOINTS); breakpoint_list[i].address=0, i++);
951		breakpoints_initialized++;
952    }
953
954	cnt = kdp_machine_vm_read(address, (caddr_t)&oldinstr, (mach_vm_size_t)breakinstrsize);
955
956	if (0 == memcmp(oldinstr, breakinstr, breakinstrsize)) {
957		printf("A trap was already set at that address, not setting new breakpoint\n");
958
959		return KDPERR_BREAKPOINT_ALREADY_SET;
960	}
961
962	for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != 0); i++);
963
964	if (i == MAX_BREAKPOINTS) {
965		return KDPERR_MAX_BREAKPOINTS;
966	}
967
968	breakpoint_list[i].address =  address;
969	memcpy(breakpoint_list[i].oldbytes, oldinstr, breakinstrsize);
970	breakpoint_list[i].bytesused =  breakinstrsize;
971
972	cnt = kdp_machine_vm_write((caddr_t)&breakinstr, address, breakinstrsize);
973
974	return KDPERR_NO_ERROR;
975}
976
977kdp_error_t
978kdp_remove_breakpoint_internal(
979    mach_vm_address_t	address
980)
981{
982	mach_vm_size_t	cnt;
983	int		i;
984
985	for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != address); i++);
986
987	if (i == MAX_BREAKPOINTS)
988	{
989		return KDPERR_BREAKPOINT_NOT_FOUND;
990	}
991
992	breakpoint_list[i].address = 0;
993	cnt = kdp_machine_vm_write((caddr_t)&breakpoint_list[i].oldbytes, address, breakpoint_list[i].bytesused);
994
995	return KDPERR_NO_ERROR;
996}
997
998boolean_t
999kdp_remove_all_breakpoints(void)
1000{
1001	int i;
1002	boolean_t breakpoint_found = FALSE;
1003
1004	if (breakpoints_initialized)
1005	{
1006		for(i=0;i < MAX_BREAKPOINTS; i++)
1007		{
1008			if (breakpoint_list[i].address)
1009			{
1010				kdp_machine_vm_write((caddr_t)&(breakpoint_list[i].oldbytes), (mach_vm_address_t)breakpoint_list[i].address, (mach_vm_size_t)breakpoint_list[i].bytesused);
1011				breakpoint_found = TRUE;
1012				breakpoint_list[i].address = 0;
1013			}
1014		}
1015
1016		if (breakpoint_found)
1017			printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n");
1018	}
1019	return breakpoint_found;
1020}
1021
1022boolean_t
1023kdp_reboot(
1024	__unused kdp_pkt_t *pkt,
1025	__unused int	*len,
1026	__unused unsigned short *reply_port
1027)
1028{
1029	dprintf(("kdp_reboot\n"));
1030
1031	kdp_machine_reboot();
1032
1033	return (TRUE); // no, not really, we won't return
1034}
1035
1036#define MAX_FRAMES 1000
1037
1038static int pid_from_task(task_t task)
1039{
1040	int pid = -1;
1041
1042	if (task->bsd_info)
1043		pid = proc_pid(task->bsd_info);
1044
1045	return pid;
1046}
1047
1048static uint64_t
1049proc_uniqueid_from_task(task_t task)
1050{
1051	uint64_t uniqueid = ~(0ULL);
1052
1053	if (task->bsd_info)
1054		uniqueid = proc_uniqueid(task->bsd_info);
1055
1056	return uniqueid;
1057}
1058
1059static uint64_t
1060proc_was_throttled_from_task(task_t task)
1061{
1062	uint64_t was_throttled = 0;
1063
1064	if (task->bsd_info)
1065		was_throttled = proc_was_throttled(task->bsd_info);
1066
1067	return was_throttled;
1068}
1069
1070static uint64_t
1071proc_did_throttle_from_task(task_t task)
1072{
1073	uint64_t did_throttle = 0;
1074
1075	if (task->bsd_info)
1076		did_throttle = proc_did_throttle(task->bsd_info);
1077
1078	return did_throttle;
1079}
1080
1081boolean_t
1082kdp_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) {
1083	size_t rem = size;
1084	char *kvaddr = dest;
1085
1086	while (rem) {
1087		ppnum_t upn = pmap_find_phys(p, uaddr);
1088		uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
1089		uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
1090		uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
1091		uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
1092		size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
1093		cur_size = MIN(cur_size, rem);
1094
1095		if (upn && pmap_valid_page(upn) && phys_dest) {
1096			bcopy_phys(phys_src, phys_dest, cur_size);
1097		}
1098		else
1099			break;
1100		uaddr += cur_size;
1101		kvaddr += cur_size;
1102		rem -= cur_size;
1103	}
1104	return (rem == 0);
1105}
1106
1107
1108static void
1109kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
1110{
1111  unsigned int pages_reclaimed;
1112  unsigned int pages_wanted;
1113  kern_return_t kErr;
1114
1115  processor_t processor;
1116  vm_statistics64_t stat;
1117  vm_statistics64_data_t host_vm_stat;
1118
1119  processor = processor_list;
1120  stat = &PROCESSOR_DATA(processor, vm_stat);
1121  host_vm_stat = *stat;
1122
1123  if (processor_count > 1) {
1124    simple_lock(&processor_list_lock);
1125
1126    while ((processor = processor->processor_list) != NULL) {
1127      stat = &PROCESSOR_DATA(processor, vm_stat);
1128      host_vm_stat.compressions += stat->compressions;
1129      host_vm_stat.decompressions += stat->decompressions;
1130    }
1131
1132    simple_unlock(&processor_list_lock);
1133  }
1134
1135  memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
1136  memio_snap->free_pages = vm_page_free_count;
1137  memio_snap->active_pages = vm_page_active_count;
1138  memio_snap->inactive_pages = vm_page_inactive_count;
1139  memio_snap->purgeable_pages = vm_page_purgeable_count;
1140  memio_snap->wired_pages = vm_page_wire_count;
1141  memio_snap->speculative_pages = vm_page_speculative_count;
1142  memio_snap->throttled_pages = vm_page_throttled_count;
1143  memio_snap->busy_buffer_count = count_busy_buffers();
1144  memio_snap->filebacked_pages = vm_page_external_count;
1145  memio_snap->compressions = (uint32_t)host_vm_stat.compressions;
1146  memio_snap->decompressions = (uint32_t)host_vm_stat.decompressions;
1147  memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT;
1148  kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
1149  if ( ! kErr ) {
1150	memio_snap->pages_wanted = (uint32_t)pages_wanted;
1151	memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
1152	memio_snap->pages_wanted_reclaimed_valid = 1;
1153  } else {
1154	memio_snap->pages_wanted = 0;
1155	memio_snap->pages_reclaimed = 0;
1156	memio_snap->pages_wanted_reclaimed_valid = 0;
1157  }
1158}
1159
1160
1161
1162/*
1163 * Method for grabbing timer values safely, in the sense that no infinite loop will occur
1164 * Certain flavors of the timer_grab function, which would seem to be the thing to use,
1165 * can loop infinitely if called while the timer is in the process of being updated.
1166 * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
1167 * the timer using this method. This seems insoluble, since stackshot runs in a context
1168 * where the timer might be half-updated, and has no way of yielding control just long
1169 * enough to finish the update.
1170 */
1171
1172static uint64_t safe_grab_timer_value(struct timer *t)
1173{
1174#if   defined(__LP64__)
1175  return t->all_bits;
1176#else
1177  uint64_t time = t->high_bits;    /* endian independent grab */
1178  time = (time << 32) | t->low_bits;
1179  return time;
1180#endif
1181}
1182
1183int
1184kdp_stackshot(int pid, void *tracebuf, uint32_t tracebuf_size, uint32_t trace_flags, uint32_t dispatch_offset, uint32_t *pbytesTraced)
1185{
1186	char *tracepos = (char *) tracebuf;
1187	char *tracebound = tracepos + tracebuf_size;
1188	uint32_t tracebytes = 0;
1189	int error = 0;
1190
1191	task_t task = TASK_NULL;
1192	thread_t thread = THREAD_NULL;
1193	thread_snapshot_t tsnap = NULL;
1194	unsigned framesize = 2 * sizeof(vm_offset_t);
1195
1196	queue_head_t *task_list = &tasks;
1197	boolean_t is_active_list = TRUE;
1198
1199	boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0);
1200	boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
1201	boolean_t save_kextloadinfo_p = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0);
1202	boolean_t save_userframes_p = ((trace_flags & STACKSHOT_SAVE_KERNEL_FRAMES_ONLY) == 0);
1203
1204	if(trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
1205	  if(tracepos + sizeof(struct mem_and_io_snapshot) > tracebound) {
1206	    error = -1;
1207	    goto error_exit;
1208	  }
1209	  kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)tracepos);
1210	  tracepos += sizeof(struct mem_and_io_snapshot);
1211	}
1212
1213walk_list:
1214	queue_iterate(task_list, task, task_t, tasks) {
1215		if ((task == NULL) || !ml_validate_nofault((vm_offset_t) task, sizeof(struct task)))
1216			goto error_exit;
1217
1218		int task_pid = pid_from_task(task);
1219		uint64_t task_uniqueid = proc_uniqueid_from_task(task);
1220		boolean_t task64 = task_has_64BitAddr(task);
1221
1222		if (!task->active) {
1223			/*
1224			 * Not interested in terminated tasks without threads, and
1225			 * at the moment, stackshot can't handle a task  without a name.
1226			 */
1227			if (queue_empty(&task->threads) || task_pid == -1) {
1228				continue;
1229			}
1230		}
1231
1232		/* Trace everything, unless a process was specified */
1233		if ((pid == -1) || (pid == task_pid)) {
1234			task_snapshot_t task_snap;
1235			uint32_t uuid_info_count = 0;
1236			mach_vm_address_t uuid_info_addr = 0;
1237			boolean_t have_map = (task->map != NULL) &&
1238				(ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
1239			boolean_t have_pmap = have_map && (task->map->pmap != NULL) &&
1240				(ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
1241			uint64_t shared_cache_base_address = 0;
1242
1243			if (have_pmap && task->active && save_loadinfo_p && task_pid > 0) {
1244				// Read the dyld_all_image_infos struct from the task memory to get UUID array count and location
1245				if (task64) {
1246					struct user64_dyld_all_image_infos task_image_infos;
1247					if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
1248						uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1249						uuid_info_addr = task_image_infos.uuidArray;
1250					}
1251				} else {
1252					struct user32_dyld_all_image_infos task_image_infos;
1253					if (kdp_copyin(task->map->pmap, task->all_image_info_addr, &task_image_infos, sizeof(struct user32_dyld_all_image_infos))) {
1254						uuid_info_count = task_image_infos.uuidArrayCount;
1255						uuid_info_addr = task_image_infos.uuidArray;
1256					}
1257				}
1258
1259				// If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1260				// this data structure), we zero the uuid_info_count so that we won't even try to save load info
1261				// for this task.
1262				if (!uuid_info_addr) {
1263					uuid_info_count = 0;
1264				}
1265			}
1266
1267			if (have_pmap && save_kextloadinfo_p && task_pid == 0) {
1268				if (ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader))) {
1269					uuid_info_count = gLoadedKextSummaries->numSummaries + 1; /* include main kernel UUID */
1270				}
1271			}
1272
1273			if (tracepos + sizeof(struct task_snapshot) > tracebound) {
1274				error = -1;
1275				goto error_exit;
1276			}
1277
1278			task_snap = (task_snapshot_t) tracepos;
1279			task_snap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC;
1280			task_snap->pid = task_pid;
1281			task_snap->uniqueid = task_uniqueid;
1282			task_snap->nloadinfos = uuid_info_count;
1283			/* Add the BSD process identifiers */
1284			if (task_pid != -1)
1285				proc_name_kdp(task, task_snap->p_comm, sizeof(task_snap->p_comm));
1286			else
1287				task_snap->p_comm[0] = '\0';
1288			task_snap->ss_flags = 0;
1289			if (task64)
1290				task_snap->ss_flags |= kUser64_p;
1291			if (task64 && task_pid == 0)
1292				task_snap->ss_flags |= kKernel64_p;
1293			if (!task->active)
1294				task_snap->ss_flags |= kTerminatedSnapshot;
1295			if(task->pidsuspended) task_snap->ss_flags |= kPidSuspended;
1296			if(task->frozen) task_snap->ss_flags |= kFrozen;
1297
1298			if (task->effective_policy.darwinbg ==  1) {
1299				task_snap->ss_flags |= kTaskDarwinBG;
1300			}
1301
1302			if (task->effective_policy.t_sup_active == 1)
1303				task_snap->ss_flags |= kTaskIsSuppressed;
1304
1305			task_snap->latency_qos = (task->effective_policy.t_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ?
1306			                         LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.t_latency_qos);
1307
1308			task_snap->suspend_count = task->suspend_count;
1309			task_snap->task_size = have_pmap ? pmap_resident_count(task->map->pmap) : 0;
1310			task_snap->faults = task->faults;
1311			task_snap->pageins = task->pageins;
1312			task_snap->cow_faults = task->cow_faults;
1313
1314			task_snap->user_time_in_terminated_threads = task->total_user_time;
1315			task_snap->system_time_in_terminated_threads = task->total_system_time;
1316			/*
1317			 * The throttling counters are maintained as 64-bit counters in the proc
1318			 * structure. However, we reserve 32-bits (each) for them in the task_snapshot
1319			 * struct to save space and since we do not expect them to overflow 32-bits. If we
1320			 * find these values overflowing in the future, the fix would be to simply
1321			 * upgrade these counters to 64-bit in the task_snapshot struct
1322			 */
1323			task_snap->was_throttled = (uint32_t) proc_was_throttled_from_task(task);
1324			task_snap->did_throttle = (uint32_t) proc_did_throttle_from_task(task);
1325
1326			if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region,
1327														   sizeof(struct vm_shared_region))) {
1328				struct vm_shared_region *sr = task->shared_region;
1329
1330				shared_cache_base_address = sr->sr_base_address + sr->sr_first_mapping;
1331			}
1332			if (!shared_cache_base_address
1333				|| !kdp_copyin(task->map->pmap, shared_cache_base_address, task_snap->shared_cache_identifier, sizeof(task_snap->shared_cache_identifier))) {
1334				memset(task_snap->shared_cache_identifier, 0x0, sizeof(task_snap->shared_cache_identifier));
1335			}
1336			if (task->shared_region) {
1337				/*
1338				 * No refcounting here, but we are in debugger
1339				 * context, so that should be safe.
1340				 */
1341				task_snap->shared_cache_slide = task->shared_region->sr_slide_info.slide;
1342			} else {
1343				task_snap->shared_cache_slide = 0;
1344			}
1345
1346			tracepos += sizeof(struct task_snapshot);
1347
1348			if (task_pid > 0 && uuid_info_count > 0) {
1349				uint32_t uuid_info_size = (uint32_t)(task64 ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1350				uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1351
1352				if (tracepos + uuid_info_array_size > tracebound) {
1353					error = -1;
1354					goto error_exit;
1355				}
1356
1357				// Copy in the UUID info array
1358				// It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap
1359				if (have_pmap && !kdp_copyin(task->map->pmap, uuid_info_addr, tracepos, uuid_info_array_size))
1360					task_snap->nloadinfos = 0;
1361				else
1362					tracepos += uuid_info_array_size;
1363			} else if (task_pid == 0 && uuid_info_count > 0) {
1364				uint32_t uuid_info_size = (uint32_t)sizeof(kernel_uuid_info);
1365				uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size;
1366				kernel_uuid_info *output_uuids;
1367
1368				if (tracepos + uuid_info_array_size > tracebound) {
1369					error = -1;
1370					goto error_exit;
1371				}
1372
1373				output_uuids = (kernel_uuid_info *)tracepos;
1374
1375				do {
1376
1377					if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) {
1378						/* Kernel UUID not found or inaccessible */
1379						task_snap->nloadinfos = 0;
1380						break;
1381					}
1382
1383					output_uuids[0].imageLoadAddress = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext);
1384					memcpy(&output_uuids[0].imageUUID, kernel_uuid, sizeof(uuid_t));
1385
1386					if (ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]),
1387											gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) {
1388						uint32_t kexti;
1389
1390						for (kexti=0 ; kexti < gLoadedKextSummaries->numSummaries; kexti++) {
1391							output_uuids[1+kexti].imageLoadAddress = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address);
1392							memcpy(&output_uuids[1+kexti].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t));
1393						}
1394
1395						tracepos += uuid_info_array_size;
1396					} else {
1397						/* kext summary invalid, but kernel UUID was copied */
1398						task_snap->nloadinfos = 1;
1399						tracepos += uuid_info_size;
1400						break;
1401					}
1402				} while(0);
1403			}
1404
1405			queue_iterate(&task->threads, thread, thread_t, task_threads){
1406				uint64_t tval;
1407
1408				if ((thread == NULL) || !ml_validate_nofault((vm_offset_t) thread, sizeof(struct thread)))
1409					goto error_exit;
1410
1411				if (((tracepos + 4 * sizeof(struct thread_snapshot)) > tracebound)) {
1412					error = -1;
1413					goto error_exit;
1414				}
1415                if (!save_userframes_p && thread->kernel_stack == 0)
1416                    continue;
1417
1418				/* Populate the thread snapshot header */
1419				tsnap = (thread_snapshot_t) tracepos;
1420				tsnap->thread_id = thread_tid(thread);
1421				tsnap->state = thread->state;
1422				tsnap->priority = thread->priority;
1423				tsnap->sched_pri = thread->sched_pri;
1424				tsnap->sched_flags = thread->sched_flags;
1425				tsnap->wait_event = VM_KERNEL_UNSLIDE(thread->wait_event);
1426				tsnap->continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1427				tval = safe_grab_timer_value(&thread->user_timer);
1428				tsnap->user_time = tval;
1429				tval = safe_grab_timer_value(&thread->system_timer);
1430				if (thread->precise_user_kernel_time) {
1431					tsnap->system_time = tval;
1432				} else {
1433					tsnap->user_time += tval;
1434					tsnap->system_time = 0;
1435				}
1436				tsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC;
1437				tracepos += sizeof(struct thread_snapshot);
1438				tsnap->ss_flags = 0;
1439
1440				if (thread->effective_policy.darwinbg) {
1441					tsnap->ss_flags |= kThreadDarwinBG;
1442				}
1443
1444				if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1445					uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1446					if (dqkeyaddr != 0) {
1447						uint64_t dqaddr = 0;
1448						if (kdp_copyin(task->map->pmap, dqkeyaddr, &dqaddr, (task64 ? 8 : 4)) && (dqaddr != 0)) {
1449							uint64_t dqserialnumaddr = dqaddr + dispatch_offset;
1450							uint64_t dqserialnum = 0;
1451							if (kdp_copyin(task->map->pmap, dqserialnumaddr, &dqserialnum, (task64 ? 8 : 4))) {
1452								tsnap->ss_flags |= kHasDispatchSerial;
1453								*(uint64_t *)tracepos = dqserialnum;
1454								tracepos += 8;
1455							}
1456						}
1457					}
1458				}
1459/* Call through to the machine specific trace routines
1460 * Frames are added past the snapshot header.
1461 */
1462				tracebytes = 0;
1463				if (thread->kernel_stack != 0) {
1464#if defined(__LP64__)
1465					tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1466					tsnap->ss_flags |= kKernel64_p;
1467					framesize = 16;
1468#else
1469					tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, FALSE);
1470					framesize = 8;
1471#endif
1472				}
1473				tsnap->nkern_frames = tracebytes/framesize;
1474				tracepos += tracebytes;
1475				tracebytes = 0;
1476				/* Trace user stack, if any */
1477				if (save_userframes_p && task->active && thread->task->map != kernel_map) {
1478					/* 64-bit task? */
1479					if (task_has_64BitAddr(thread->task)) {
1480						tracebytes = machine_trace_thread64(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1481						tsnap->ss_flags |= kUser64_p;
1482						framesize = 16;
1483					}
1484					else {
1485						tracebytes = machine_trace_thread(thread, tracepos, tracebound, MAX_FRAMES, TRUE);
1486						framesize = 8;
1487					}
1488				}
1489				tsnap->nuser_frames = tracebytes/framesize;
1490				tracepos += tracebytes;
1491				tracebytes = 0;
1492			}
1493		}
1494	}
1495
1496	if (is_active_list) {
1497		is_active_list = FALSE;
1498		task_list = &terminated_tasks;
1499		goto walk_list;
1500	}
1501
1502error_exit:
1503	/* Release stack snapshot wait indicator */
1504	kdp_snapshot_postflight();
1505
1506	*pbytesTraced = (uint32_t)(tracepos - (char *) tracebuf);
1507
1508	return error;
1509}
1510
1511static boolean_t
1512kdp_readioport(
1513    kdp_pkt_t		*pkt,
1514    int			*len,
1515    unsigned short	*reply_port
1516	       )
1517{
1518	kdp_readioport_req_t   *rq = &pkt->readioport_req;
1519	kdp_readioport_reply_t *rp = &pkt->readioport_reply;
1520	size_t plen = *len;
1521
1522	if (plen < sizeof (*rq))
1523		return (FALSE);
1524
1525	rp->hdr.is_reply = 1;
1526	rp->hdr.len = sizeof (*rp);
1527
1528	if (rq->nbytes > MAX_KDP_DATA_SIZE)
1529		rp->error = KDPERR_BAD_NBYTES;
1530	else {
1531#if KDP_TEST_HARNESS
1532                uint16_t addr = rq->address;
1533#endif
1534		uint16_t size = rq->nbytes;
1535		dprintf(("kdp_readioport addr %x size %d\n", addr, size));
1536
1537		rp->error = kdp_machine_ioport_read(rq, rp->data, rq->lcpu);
1538		if (rp->error == KDPERR_NO_ERROR)
1539			rp->hdr.len += size;
1540	}
1541
1542	*reply_port = kdp.reply_port;
1543	*len = rp->hdr.len;
1544
1545	return (TRUE);
1546}
1547
1548static boolean_t
1549kdp_writeioport(
1550	kdp_pkt_t	*pkt,
1551	int		*len,
1552	unsigned short	*reply_port
1553                )
1554{
1555	kdp_writeioport_req_t   *rq = &pkt->writeioport_req;
1556	kdp_writeioport_reply_t *rp = &pkt->writeioport_reply;
1557	size_t	plen = *len;
1558
1559	if (plen < sizeof (*rq))
1560		return (FALSE);
1561
1562	if (rq->nbytes > MAX_KDP_DATA_SIZE)
1563		rp->error = KDPERR_BAD_NBYTES;
1564	else {
1565		dprintf(("kdp_writeioport addr %x size %d\n", rq->address,
1566			rq->nbytes));
1567
1568		rp->error = kdp_machine_ioport_write(rq, rq->data, rq->lcpu);
1569	}
1570
1571	rp->hdr.is_reply = 1;
1572	rp->hdr.len = sizeof (*rp);
1573
1574	*reply_port = kdp.reply_port;
1575	*len = rp->hdr.len;
1576
1577	return (TRUE);
1578}
1579
1580static boolean_t
1581kdp_readmsr64(
1582	kdp_pkt_t		*pkt,
1583	int			*len,
1584	unsigned short	*reply_port
1585)
1586{
1587	kdp_readmsr64_req_t   *rq = &pkt->readmsr64_req;
1588	kdp_readmsr64_reply_t *rp = &pkt->readmsr64_reply;
1589	size_t plen = *len;
1590
1591	if (plen < sizeof (*rq))
1592		return (FALSE);
1593
1594	rp->hdr.is_reply = 1;
1595	rp->hdr.len = sizeof (*rp);
1596
1597	dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1598	rp->error = kdp_machine_msr64_read(rq, rp->data, rq->lcpu);
1599	if (rp->error == KDPERR_NO_ERROR)
1600		rp->hdr.len += sizeof(uint64_t);
1601
1602	*reply_port = kdp.reply_port;
1603	*len = rp->hdr.len;
1604
1605	return (TRUE);
1606}
1607
1608static boolean_t
1609kdp_writemsr64(
1610	kdp_pkt_t	*pkt,
1611	int		*len,
1612	unsigned short	*reply_port
1613	       )
1614{
1615	kdp_writemsr64_req_t   *rq = &pkt->writemsr64_req;
1616	kdp_writemsr64_reply_t *rp = &pkt->writemsr64_reply;
1617	size_t	plen = *len;
1618
1619	if (plen < sizeof (*rq))
1620		return (FALSE);
1621
1622	dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq->lcpu, rq->address));
1623	rp->error = kdp_machine_msr64_write(rq, rq->data, rq->lcpu);
1624
1625	rp->hdr.is_reply = 1;
1626	rp->hdr.len = sizeof (*rp);
1627
1628	*reply_port = kdp.reply_port;
1629	*len = rp->hdr.len;
1630
1631	return (TRUE);
1632}
1633
1634static boolean_t
1635kdp_dumpinfo(
1636	kdp_pkt_t	*pkt,
1637	int		*len,
1638	unsigned short	*reply_port
1639	       )
1640{
1641	kdp_dumpinfo_req_t   *rq = &pkt->dumpinfo_req;
1642	kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply;
1643	size_t	plen = *len;
1644
1645	if (plen < sizeof (*rq))
1646		return (FALSE);
1647
1648	dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip));
1649	rp->hdr.is_reply = 1;
1650	rp->hdr.len = sizeof (*rp);
1651
1652        if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) {
1653            kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip,
1654                                rq->port);
1655        }
1656
1657        /* gather some stats for reply */
1658        kdp_get_dump_info(&rp->type, rp->name, rp->destip, rp->routerip,
1659                          &rp->port);
1660
1661	*reply_port = kdp.reply_port;
1662	*len = rp->hdr.len;
1663
1664	return (TRUE);
1665}
1666