1/*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <mach_assert.h>
58#include <mach_kdp.h>
59
60#include <kern/cpu_number.h>
61#include <kern/kalloc.h>
62#include <kern/spl.h>
63#include <kern/thread.h>
64#include <kern/assert.h>
65#include <kern/sched_prim.h>
66#include <kern/misc_protos.h>
67#include <kern/clock.h>
68#include <kern/telemetry.h>
69#include <kern/ecc.h>
70#include <vm/vm_kern.h>
71#include <vm/pmap.h>
72#include <stdarg.h>
73#if !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
74#include <kdp/kdp_udp.h>
75#endif
76
77#if defined(__i386__) || defined(__x86_64__)
78#include <i386/cpu_threads.h>
79#include <i386/pmCPU.h>
80#endif
81
82#include <IOKit/IOPlatformExpert.h>
83#include <machine/pal_routines.h>
84
85#include <sys/kdebug.h>
86#include <libkern/OSKextLibPrivate.h>
87#include <libkern/OSAtomic.h>
88#include <libkern/kernel_mach_header.h>
89#include <uuid/uuid.h>
90
91#if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
92#include <pexpert/pexpert.h> /* For gPanicBase */
93#endif
94
95unsigned int	halt_in_debugger = 0;
96unsigned int	switch_debugger = 0;
97unsigned int	current_debugger = 0;
98unsigned int	active_debugger = 0;
99unsigned int	debug_mode=0;
100unsigned int 	disable_debug_output = TRUE;
101unsigned int 	systemLogDiags = FALSE;
102unsigned int 	panicDebugging = FALSE;
103unsigned int	logPanicDataToScreen = FALSE;
104
105int mach_assert = 1;
106
107const char		*panicstr = (char *) 0;
108decl_simple_lock_data(,panic_lock)
109int			paniccpu;
110volatile int		panicwait;
111volatile unsigned int	nestedpanic= 0;
112unsigned int		panic_is_inited = 0;
113unsigned int		return_on_panic = 0;
114unsigned long		panic_caller;
115
116#define DEBUG_BUF_SIZE (3 * PAGE_SIZE)
117
118/* debug_buf is directly linked with iBoot panic region for ARM64 targets */
119#if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
120char *debug_buf_addr = NULL;
121char *debug_buf_ptr = NULL;
122unsigned int debug_buf_size = 0;
123#else
124char debug_buf[DEBUG_BUF_SIZE];
125__used char *debug_buf_addr = debug_buf;
126char *debug_buf_ptr = debug_buf;
127unsigned int debug_buf_size = sizeof(debug_buf);
128#endif
129
130static char model_name[64];
131unsigned char *kernel_uuid;
132/* uuid_string_t */ char kernel_uuid_string[37];
133
134static spl_t panic_prologue(const char *str);
135static void panic_epilogue(spl_t s);
136
137struct pasc {
138  unsigned a: 7;
139  unsigned b: 7;
140  unsigned c: 7;
141  unsigned d: 7;
142  unsigned e: 7;
143  unsigned f: 7;
144  unsigned g: 7;
145  unsigned h: 7;
146}  __attribute__((packed));
147
148typedef struct pasc pasc_t;
149
150/* Prevent CPP from breaking the definition below */
151#if CONFIG_NO_PANIC_STRINGS
152#undef Assert
153#endif
154
155void __attribute__((noinline))
156Assert(
157	const char	*file,
158	int		line,
159	const char	*expression
160      )
161{
162	int saved_return_on_panic;
163
164	if (!mach_assert) {
165		return;
166	}
167
168	saved_return_on_panic = return_on_panic;
169
170	/*
171	 * If we don't have a debugger configured, returning from an
172	 * assert is a bad, bad idea; there is no guarantee that we
173	 * didn't simply assert before we were able to restart the
174	 * platform.
175	 */
176	if (current_debugger != NO_CUR_DB)
177		return_on_panic = 1;
178
179	panic_plain("%s:%d Assertion failed: %s", file, line, expression);
180
181	return_on_panic = saved_return_on_panic;
182}
183
184/*
185 *	Carefully use the panic_lock.  There's always a chance that
186 *	somehow we'll call panic before getting to initialize the
187 *	panic_lock -- in this case, we'll assume that the world is
188 *	in uniprocessor mode and just avoid using the panic lock.
189 */
190#define	PANIC_LOCK()							\
191MACRO_BEGIN								\
192	if (panic_is_inited)						\
193		simple_lock(&panic_lock);				\
194MACRO_END
195
196#define	PANIC_UNLOCK()							\
197MACRO_BEGIN								\
198	if (panic_is_inited)						\
199		simple_unlock(&panic_lock);				\
200MACRO_END
201
202void
203panic_init(void)
204{
205	unsigned long uuidlen = 0;
206	void *uuid;
207
208	uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
209	if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
210		kernel_uuid = uuid;
211		uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string);
212	}
213
214	simple_lock_init(&panic_lock, 0);
215	panic_is_inited = 1;
216	panic_caller = 0;
217}
218
219void
220debug_log_init(void)
221{
222	if (debug_buf_size != 0)
223		return;
224#if (defined(__arm64__) || defined(NAND_PANIC_DEVICE)) && !defined(LEGACY_PANIC_LOGS)
225	if (!gPanicBase) {
226		printf("debug_log_init: Error!! gPanicBase is still not initialized\n");
227		return;
228	}
229	/* Shift debug buf start location and size by 8 bytes for magic header and crc value */
230	debug_buf_addr = (char*)gPanicBase + 8;
231	debug_buf_ptr = debug_buf_addr;
232	debug_buf_size = gPanicSize - 8;
233#else
234	debug_buf_addr = debug_buf;
235	debug_buf_ptr = debug_buf;
236	debug_buf_size = sizeof(debug_buf);
237#endif
238}
239
240#if defined(__i386__) || defined(__x86_64__)
241#define panic_stop()	pmCPUHalt(PM_HALT_PANIC)
242#define panic_safe()	pmSafeMode(x86_lcpu(), PM_SAFE_FL_SAFE)
243#define panic_normal()	pmSafeMode(x86_lcpu(), PM_SAFE_FL_NORMAL)
244#else
245#define panic_stop()	{ while (1) ; }
246#define panic_safe()
247#define panic_normal()
248#endif
249
250/*
251 * Prevent CPP from breaking the definition below,
252 * since all clients get a #define to prepend line numbers
253 */
254#undef panic
255
256void _consume_panic_args(int a __unused, ...)
257{
258    panic("panic");
259}
260
261extern unsigned int write_trace_on_panic;
262
263static spl_t
264panic_prologue(const char *str)
265{
266	spl_t	s;
267
268	if (write_trace_on_panic && kdebug_enable) {
269		if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
270			ml_set_interrupts_enabled(TRUE);
271			kdbg_dump_trace_to_file("/var/tmp/panic.trace");
272		}
273	}
274
275	s = splhigh();
276	disable_preemption();
277
278#if	defined(__i386__) || defined(__x86_64__)
279	/* Attempt to display the unparsed panic string */
280	const char *tstr = str;
281
282	kprintf("Panic initiated, string: ");
283	while (tstr && *tstr)
284		kprintf("%c", *tstr++);
285	kprintf("\n");
286#endif
287
288	panic_safe();
289
290	if( logPanicDataToScreen )
291		disable_debug_output = FALSE;
292
293	debug_mode = TRUE;
294
295restart:
296	PANIC_LOCK();
297
298	if (panicstr) {
299		if (cpu_number() != paniccpu) {
300			PANIC_UNLOCK();
301			/*
302			 * Wait until message has been printed to identify correct
303			 * cpu that made the first panic.
304			 */
305			while (panicwait)
306				continue;
307			goto restart;
308	    } else {
309			nestedpanic +=1;
310			PANIC_UNLOCK();
311			Debugger("double panic");
312			printf("double panic:  We are hanging here...\n");
313			panic_stop();
314			/* NOTREACHED */
315		}
316	}
317	panicstr = str;
318	paniccpu = cpu_number();
319	panicwait = 1;
320
321	PANIC_UNLOCK();
322	return(s);
323}
324
325
326static void
327panic_epilogue(spl_t	s)
328{
329	/*
330	 * Release panicstr so that we can handle normally other panics.
331	 */
332	PANIC_LOCK();
333	panicstr = (char *)0;
334	PANIC_UNLOCK();
335
336	if (return_on_panic) {
337		panic_normal();
338		enable_preemption();
339		splx(s);
340		return;
341	}
342	kdb_printf("panic: We are hanging here...\n");
343	panic_stop();
344	/* NOTREACHED */
345}
346
347void
348panic(const char *str, ...)
349{
350	va_list	listp;
351	spl_t	s;
352
353
354	/* panic_caller is initialized to 0.  If set, don't change it */
355	if ( ! panic_caller )
356		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
357
358	s = panic_prologue(str);
359	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
360	if (str) {
361		va_start(listp, str);
362		_doprnt(str, &listp, consdebug_putc, 0);
363		va_end(listp);
364	}
365	kdb_printf("\n");
366
367	/*
368	 * Release panicwait indicator so that other cpus may call Debugger().
369	 */
370	panicwait = 0;
371	Debugger("panic");
372	panic_epilogue(s);
373}
374
375void
376panic_context(unsigned int reason, void *ctx, const char *str, ...)
377{
378	va_list	listp;
379	spl_t	s;
380
381
382	/* panic_caller is initialized to 0.  If set, don't change it */
383	if ( ! panic_caller )
384		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
385
386	s = panic_prologue(str);
387	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
388	if (str) {
389		va_start(listp, str);
390		_doprnt(str, &listp, consdebug_putc, 0);
391		va_end(listp);
392	}
393	kdb_printf("\n");
394
395	/*
396	 * Release panicwait indicator so that other cpus may call Debugger().
397	 */
398	panicwait = 0;
399	DebuggerWithContext(reason, ctx, "panic");
400	panic_epilogue(s);
401}
402
403void
404log(__unused int level, char *fmt, ...)
405{
406	va_list	listp;
407
408#ifdef lint
409	level++;
410#endif /* lint */
411#ifdef	MACH_BSD
412	disable_preemption();
413	va_start(listp, fmt);
414	_doprnt(fmt, &listp, conslog_putc, 0);
415	va_end(listp);
416	enable_preemption();
417#endif
418}
419
420void
421debug_putc(char c)
422{
423	if ((debug_buf_size != 0) &&
424		((debug_buf_ptr-debug_buf_addr) < (int)debug_buf_size)) {
425		*debug_buf_ptr=c;
426		debug_buf_ptr++;
427	}
428}
429
430/* In-place packing routines -- inefficient, but they're called at most once.
431 * Assumes "buflen" is a multiple of 8.
432 */
433
434int packA(char *inbuf, uint32_t length, uint32_t buflen)
435{
436  unsigned int i, j = 0;
437  pasc_t pack;
438
439  length = MIN(((length + 7) & ~7), buflen);
440
441  for (i = 0; i < length; i+=8)
442    {
443      pack.a = inbuf[i];
444      pack.b = inbuf[i+1];
445      pack.c = inbuf[i+2];
446      pack.d = inbuf[i+3];
447      pack.e = inbuf[i+4];
448      pack.f = inbuf[i+5];
449      pack.g = inbuf[i+6];
450      pack.h = inbuf[i+7];
451      bcopy ((char *) &pack, inbuf + j, 7);
452      j += 7;
453    }
454  return j;
455}
456
457void unpackA(char *inbuf, uint32_t length)
458{
459	pasc_t packs;
460	unsigned i = 0;
461	length = (length * 8)/7;
462
463	while (i < length) {
464	  packs = *(pasc_t *)&inbuf[i];
465	  bcopy(&inbuf[i+7], &inbuf[i+8], MAX(0, (int) (length - i - 8)));
466	  inbuf[i++] = packs.a;
467	  inbuf[i++] = packs.b;
468	  inbuf[i++] = packs.c;
469	  inbuf[i++] = packs.d;
470	  inbuf[i++] = packs.e;
471	  inbuf[i++] = packs.f;
472	  inbuf[i++] = packs.g;
473	  inbuf[i++] = packs.h;
474	}
475}
476
477extern void *proc_name_address(void *p);
478
479static void
480panic_display_process_name(void) {
481	char proc_name[32] = "Unknown";
482	task_t ctask = 0;
483	void *cbsd_info = 0;
484
485	if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
486		if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(&ctask->bsd_info)) == sizeof(&ctask->bsd_info))
487			if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
488				proc_name[sizeof(proc_name) - 1] = '\0';
489	kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
490}
491
492unsigned	panic_active(void) {
493	return ((panicstr != (char *) 0));
494}
495
496void populate_model_name(char *model_string) {
497	strlcpy(model_name, model_string, sizeof(model_name));
498}
499
500static void panic_display_model_name(void) {
501	char tmp_model_name[sizeof(model_name)];
502
503	if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name))
504		return;
505
506	tmp_model_name[sizeof(tmp_model_name) - 1] = '\0';
507
508	if (tmp_model_name[0] != 0)
509		kdb_printf("System model name: %s\n", tmp_model_name);
510}
511
512static void panic_display_kernel_uuid(void) {
513	char tmp_kernel_uuid[sizeof(kernel_uuid_string)];
514
515	if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string))
516		return;
517
518	if (tmp_kernel_uuid[0] != '\0')
519		kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid);
520}
521
522void panic_display_kernel_aslr(void) {
523	if (vm_kernel_slide) {
524		kdb_printf("Kernel slide:     0x%016lx\n", (unsigned long) vm_kernel_slide);
525		kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext);
526	}
527}
528
529void panic_display_hibb(void) {
530#if defined(__i386__) || defined (__x86_64__)
531	kdb_printf("__HIB  text base: %p\n", (void *) vm_hib_base);
532#endif
533}
534
535static void panic_display_uptime(void) {
536	uint64_t	uptime;
537	absolutetime_to_nanoseconds(mach_absolute_time(), &uptime);
538
539	kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime);
540}
541
542extern const char version[];
543extern char osversion[];
544
545static volatile uint32_t config_displayed = 0;
546
547__private_extern__ void panic_display_system_configuration(void) {
548
549	panic_display_process_name();
550	if (OSCompareAndSwap(0, 1, &config_displayed)) {
551		char buf[256];
552		if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
553			kdb_printf("Boot args: %s\n", buf);
554		kdb_printf("\nMac OS version:\n%s\n",
555		    (osversion[0] != 0) ? osversion : "Not yet set");
556		kdb_printf("\nKernel version:\n%s\n",version);
557		panic_display_kernel_uuid();
558		panic_display_kernel_aslr();
559		panic_display_hibb();
560		panic_display_pal_info();
561		panic_display_model_name();
562		panic_display_uptime();
563		panic_display_zprint();
564#if CONFIG_ZLEAKS
565		panic_display_ztrace();
566#endif /* CONFIG_ZLEAKS */
567		kext_dump_panic_lists(&kdb_log);
568	}
569}
570
571extern zone_t		first_zone;
572extern unsigned int	num_zones, stack_total;
573extern unsigned long long stack_allocs;
574
575#if defined(__i386__) || defined (__x86_64__)
576extern unsigned int	inuse_ptepages_count;
577extern long long alloc_ptepages_count;
578#endif
579
580extern boolean_t	panic_include_zprint;
581
582__private_extern__ void panic_display_zprint()
583{
584	if(panic_include_zprint == TRUE) {
585
586		unsigned int	i;
587		struct zone	zone_copy;
588
589		if(first_zone!=NULL) {
590			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
591				for (i = 0; i < num_zones; i++) {
592					if(zone_copy.cur_size > (1024*1024)) {
593						kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
594					}
595
596					if(zone_copy.next_zone == NULL) {
597						break;
598					}
599
600					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
601						break;
602					}
603				}
604			}
605		}
606
607		kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
608
609#if defined(__i386__) || defined (__x86_64__)
610		kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
611#endif
612
613		kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
614	}
615}
616
617#if CONFIG_ECC_LOGGING
618__private_extern__ void panic_display_ecc_errors()
619{
620	uint32_t count = ecc_log_get_correction_count();
621
622	if (count > 0) {
623		kdb_printf("ECC Corrections:%u\n", count);
624	}
625}
626#endif /* CONFIG_ECC_LOGGING */
627
628#if CONFIG_ZLEAKS
629extern boolean_t	panic_include_ztrace;
630extern struct ztrace* top_ztrace;
631/*
632 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
633 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
634 */
635__private_extern__ void panic_display_ztrace(void)
636{
637	if(panic_include_ztrace == TRUE) {
638		unsigned int i = 0;
639		struct ztrace top_ztrace_copy;
640
641		/* Make sure not to trip another panic if there's something wrong with memory */
642		if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
643			kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
644			/* Print the backtrace addresses */
645			for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
646				kdb_printf("%p\n", top_ztrace_copy.zt_stack[i]);
647			}
648			/* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
649			kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
650		}
651		else {
652			kdb_printf("\nCan't access top_ztrace...\n");
653		}
654		kdb_printf("\n");
655	}
656}
657#endif /* CONFIG_ZLEAKS */
658
659#if ! (MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING)
660static struct kdp_ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}};
661
662/* XXX ugly forward declares to stop warnings */
663void *kdp_get_interface(void);
664void kdp_set_ip_and_mac_addresses(struct kdp_in_addr *, struct kdp_ether_addr *);
665void kdp_set_gateway_mac(void *);
666void kdp_set_interface(void *);
667void kdp_register_send_receive(void *, void *);
668void kdp_unregister_send_receive(void *, void *);
669void kdp_snapshot_preflight(int, void *, uint32_t, uint32_t);
670int kdp_stack_snapshot_geterror(void);
671int kdp_stack_snapshot_bytes_traced(void);
672
673void *
674kdp_get_interface( void)
675{
676        return(void *)0;
677}
678
679unsigned int
680kdp_get_ip_address(void )
681{ return 0; }
682
683struct kdp_ether_addr
684kdp_get_mac_addr(void)
685{
686        return kdp_current_mac_address;
687}
688
689void
690kdp_set_ip_and_mac_addresses(
691        __unused struct kdp_in_addr          *ipaddr,
692        __unused struct kdp_ether_addr       *macaddr)
693{}
694
695void
696kdp_set_gateway_mac(__unused void *gatewaymac)
697{}
698
699void
700kdp_set_interface(__unused void *ifp)
701{}
702
703void
704kdp_register_send_receive(__unused void *send, __unused void *receive)
705{}
706
707void
708kdp_unregister_send_receive(__unused void *send, __unused void *receive)
709{}
710
711void kdp_register_link(__unused kdp_link_t link, __unused kdp_mode_t mode)
712{}
713
714void kdp_unregister_link(__unused kdp_link_t link, __unused kdp_mode_t mode)
715{}
716
717#endif
718
719#if !CONFIG_TELEMETRY
720int telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean_t mark __unused)
721{
722	return KERN_NOT_SUPPORTED;
723}
724#endif
725