1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 *	Copyright (C) 1988, 1989,  NeXT, Inc.
30 *
31 *	File:	kern/mach_loader.c
32 *	Author:	Avadis Tevanian, Jr.
33 *
34 *	Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88  Avadis Tevanian, Jr. (avie) at NeXT
37 *	Started.
38 */
39
40#include <sys/param.h>
41#include <sys/vnode_internal.h>
42#include <sys/uio.h>
43#include <sys/namei.h>
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
46#include <sys/stat.h>
47#include <sys/malloc.h>
48#include <sys/mount_internal.h>
49#include <sys/fcntl.h>
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
52#include <sys/codesign.h>
53
54#include <mach/mach_types.h>
55#include <mach/vm_map.h>	/* vm_allocate() */
56#include <mach/mach_vm.h>	/* mach_vm_allocate() */
57#include <mach/vm_statistics.h>
58#include <mach/task.h>
59#include <mach/thread_act.h>
60
61#include <machine/vmparam.h>
62#include <machine/exec.h>
63#include <machine/pal_routines.h>
64
65#include <kern/kern_types.h>
66#include <kern/cpu_number.h>
67#include <kern/mach_loader.h>
68#include <kern/mach_fat.h>
69#include <kern/kalloc.h>
70#include <kern/task.h>
71#include <kern/thread.h>
72#include <kern/page_decrypt.h>
73
74#include <mach-o/fat.h>
75#include <mach-o/loader.h>
76
77#include <vm/pmap.h>
78#include <vm/vm_map.h>
79#include <vm/vm_kern.h>
80#include <vm/vm_pager.h>
81#include <vm/vnode_pager.h>
82#include <vm/vm_protos.h>
83#include <IOKit/IOReturn.h>	/* for kIOReturnNotPrivileged */
84
85/*
86 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
87 * when KERNEL is defined.
88 */
89extern pmap_t	pmap_create(ledger_t ledger, vm_map_size_t size,
90				boolean_t is_64bit);
91
92/* XXX should have prototypes in a shared header file */
93extern int	get_map_nentries(vm_map_t);
94
95extern kern_return_t	memory_object_signed(memory_object_control_t control,
96					     boolean_t is_signed);
97
98/* An empty load_result_t */
99static load_result_t load_result_null = {
100	.mach_header = MACH_VM_MIN_ADDRESS,
101	.entry_point = MACH_VM_MIN_ADDRESS,
102	.user_stack = MACH_VM_MIN_ADDRESS,
103	.user_stack_size = 0,
104	.all_image_info_addr = MACH_VM_MIN_ADDRESS,
105	.all_image_info_size = 0,
106	.thread_count = 0,
107	.unixproc = 0,
108	.dynlinker = 0,
109	.needs_dynlinker = 0,
110	.prog_allocated_stack = 0,
111	.prog_stack_size = 0,
112	.validentry = 0,
113	.csflags = 0,
114	.uuid = { 0 },
115	.min_vm_addr = MACH_VM_MAX_ADDRESS,
116	.max_vm_addr = MACH_VM_MIN_ADDRESS
117};
118
119/*
120 * Prototypes of static functions.
121 */
122static load_return_t
123parse_machfile(
124	struct vnode		*vp,
125	vm_map_t		map,
126	thread_t		thread,
127	struct mach_header	*header,
128	off_t			file_offset,
129	off_t			macho_size,
130	int			depth,
131	int64_t			slide,
132	int64_t			dyld_slide,
133	load_result_t		*result
134);
135
136static load_return_t
137load_segment(
138	struct load_command		*lcp,
139	uint32_t			filetype,
140	void				*control,
141	off_t				pager_offset,
142	off_t				macho_size,
143	struct vnode			*vp,
144	vm_map_t			map,
145	int64_t				slide,
146	load_result_t			*result
147);
148
149static load_return_t
150load_uuid(
151	struct uuid_command		*uulp,
152	char				*command_end,
153	load_result_t			*result
154);
155
156static load_return_t
157load_code_signature(
158	struct linkedit_data_command	*lcp,
159	struct vnode			*vp,
160	off_t				macho_offset,
161	off_t				macho_size,
162	cpu_type_t			cputype,
163	load_result_t			*result);
164
165#if CONFIG_CODE_DECRYPTION
166static load_return_t
167set_code_unprotect(
168	struct encryption_info_command	*lcp,
169	caddr_t				addr,
170	vm_map_t			map,
171	int64_t				slide,
172	struct vnode		*vp,
173	cpu_type_t			cputype,
174	cpu_subtype_t		cpusubtype);
175#endif
176
177static
178load_return_t
179load_main(
180	struct entry_point_command	*epc,
181	thread_t		thread,
182	int64_t				slide,
183	load_result_t		*result
184);
185
186static load_return_t
187load_unixthread(
188	struct thread_command	*tcp,
189	thread_t			thread,
190	int64_t				slide,
191	load_result_t			*result
192);
193
194static load_return_t
195load_threadstate(
196	thread_t		thread,
197	uint32_t	*ts,
198	uint32_t	total_size
199);
200
201static load_return_t
202load_threadstack(
203	thread_t		thread,
204	uint32_t	*ts,
205	uint32_t	total_size,
206	mach_vm_offset_t	*user_stack,
207	int				*customstack
208);
209
210static load_return_t
211load_threadentry(
212	thread_t		thread,
213	uint32_t	*ts,
214	uint32_t	total_size,
215	mach_vm_offset_t	*entry_point
216);
217
218static load_return_t
219load_dylinker(
220	struct dylinker_command	*lcp,
221	integer_t		archbits,
222	vm_map_t				map,
223	thread_t			thread,
224	int						depth,
225	int64_t			slide,
226	load_result_t			*result
227);
228
229struct macho_data;
230
231static load_return_t
232get_macho_vnode(
233	char				*path,
234	integer_t		archbits,
235	struct mach_header	*mach_header,
236	off_t			*file_offset,
237	off_t			*macho_size,
238	struct macho_data	*macho_data,
239	struct vnode		**vpp
240);
241
242static inline void
243widen_segment_command(const struct segment_command *scp32,
244    struct segment_command_64 *scp)
245{
246	scp->cmd = scp32->cmd;
247	scp->cmdsize = scp32->cmdsize;
248	bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
249	scp->vmaddr = scp32->vmaddr;
250	scp->vmsize = scp32->vmsize;
251	scp->fileoff = scp32->fileoff;
252	scp->filesize = scp32->filesize;
253	scp->maxprot = scp32->maxprot;
254	scp->initprot = scp32->initprot;
255	scp->nsects = scp32->nsects;
256	scp->flags = scp32->flags;
257}
258
259static void
260note_all_image_info_section(const struct segment_command_64 *scp,
261    boolean_t is64, size_t section_size, const void *sections,
262    int64_t slide, load_result_t *result)
263{
264	const union {
265		struct section s32;
266		struct section_64 s64;
267	} *sectionp;
268	unsigned int i;
269
270	if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
271		return;
272	for (i = 0; i < scp->nsects; ++i) {
273		sectionp = (const void *)
274		    ((const char *)sections + section_size * i);
275		if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
276		    sizeof(sectionp->s64.sectname))) {
277			result->all_image_info_addr =
278			    is64 ? sectionp->s64.addr : sectionp->s32.addr;
279			result->all_image_info_addr += slide;
280			result->all_image_info_size =
281			    is64 ? sectionp->s64.size : sectionp->s32.size;
282			return;
283		}
284	}
285}
286
287load_return_t
288load_machfile(
289	struct image_params	*imgp,
290	struct mach_header	*header,
291	thread_t 		thread,
292	vm_map_t 		new_map,
293	load_result_t		*result
294)
295{
296	struct vnode		*vp = imgp->ip_vp;
297	off_t			file_offset = imgp->ip_arch_offset;
298	off_t			macho_size = imgp->ip_arch_size;
299	off_t			file_size = imgp->ip_vattr->va_data_size;
300
301	pmap_t			pmap = 0;	/* protected by create_map */
302	vm_map_t		map;
303	vm_map_t		old_map;
304	task_t			old_task = TASK_NULL; /* protected by create_map */
305	load_result_t		myresult;
306	load_return_t		lret;
307	boolean_t create_map = FALSE;
308	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
309	task_t task = current_task();
310	proc_t p = current_proc();
311	mach_vm_offset_t	aslr_offset = 0;
312	mach_vm_offset_t	dyld_aslr_offset = 0;
313	kern_return_t 		kret;
314
315	if (macho_size > file_size) {
316		return(LOAD_BADMACHO);
317	}
318
319	if (new_map == VM_MAP_NULL) {
320		create_map = TRUE;
321		old_task = current_task();
322	}
323
324	/*
325	 * If we are spawning, we have created backing objects for the process
326	 * already, which include non-lazily creating the task map.  So we
327	 * are going to switch out the task map with one appropriate for the
328	 * bitness of the image being loaded.
329	 */
330	if (spawn) {
331		create_map = TRUE;
332		old_task = get_threadtask(thread);
333	}
334
335	if (create_map) {
336		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
337				(imgp->ip_flags & IMGPF_IS_64BIT));
338		pal_switch_pmap(thread, pmap, imgp->ip_flags & IMGPF_IS_64BIT);
339		map = vm_map_create(pmap,
340				0,
341				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
342				TRUE);
343	} else
344		map = new_map;
345
346#ifndef	CONFIG_ENFORCE_SIGNED_CODE
347	/* This turns off faulting for executable pages, which allows
348	 * to circumvent Code Signing Enforcement. The per process
349	 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
350	 * global flag.
351	 */
352	if ( !cs_enforcement(NULL) && (header->flags & MH_ALLOW_STACK_EXECUTION) )
353	        vm_map_disable_NX(map);
354#endif
355
356	/* Forcibly disallow execution from data pages on even if the arch
357	 * normally permits it. */
358	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
359		vm_map_disallow_data_exec(map);
360
361	/*
362	 * Compute a random offset for ASLR, and an independent random offset for dyld.
363	 */
364	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
365		uint64_t max_slide_pages;
366
367		max_slide_pages = vm_map_get_max_aslr_slide_pages(map);
368
369		aslr_offset = random();
370		aslr_offset %= max_slide_pages;
371		aslr_offset <<= vm_map_page_shift(map);
372
373		dyld_aslr_offset = random();
374		dyld_aslr_offset %= max_slide_pages;
375		dyld_aslr_offset <<= vm_map_page_shift(map);
376	}
377
378	if (!result)
379		result = &myresult;
380
381	*result = load_result_null;
382
383	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
384	                      0, (int64_t)aslr_offset, (int64_t)dyld_aslr_offset, result);
385
386	if (lret != LOAD_SUCCESS) {
387		if (create_map) {
388			vm_map_deallocate(map);	/* will lose pmap reference too */
389		}
390		return(lret);
391	}
392
393	/*
394	 * For 64-bit users, check for presence of a 4GB page zero
395	 * which will enable the kernel to share the user's address space
396	 * and hence avoid TLB flushes on kernel entry/exit
397	 */
398
399	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
400	     vm_map_has_4GB_pagezero(map)) {
401		vm_map_set_4GB_pagezero(map);
402	}
403	/*
404	 *	Commit to new map.
405	 *
406	 *	Swap the new map for the old, which  consumes our new map
407	 *	reference but each leaves us responsible for the old_map reference.
408	 *	That lets us get off the pmap associated with it, and
409	 *	then we can release it.
410	 */
411
412	 if (create_map) {
413		/*
414		 * If this is an exec, then we are going to destroy the old
415		 * task, and it's correct to halt it; if it's spawn, the
416		 * task is not yet running, and it makes no sense.
417		 */
418	 	if (!spawn) {
419			/*
420			 * Mark the task as halting and start the other
421			 * threads towards terminating themselves.  Then
422			 * make sure any threads waiting for a process
423			 * transition get informed that we are committed to
424			 * this transition, and then finally complete the
425			 * task halting (wait for threads and then cleanup
426			 * task resources).
427			 *
428			 * NOTE: task_start_halt() makes sure that no new
429			 * threads are created in the task during the transition.
430			 * We need to mark the workqueue as exiting before we
431			 * wait for threads to terminate (at the end of which
432			 * we no longer have a prohibition on thread creation).
433			 *
434			 * Finally, clean up any lingering workqueue data structures
435			 * that may have been left behind by the workqueue threads
436			 * as they exited (and then clean up the work queue itself).
437			 */
438			kret = task_start_halt(task);
439			if (kret != KERN_SUCCESS) {
440				return(kret);
441			}
442			proc_transcommit(p, 0);
443			workqueue_mark_exiting(p);
444			task_complete_halt(task);
445			workqueue_exit(p);
446		}
447		old_map = swap_task_map(old_task, thread, map, !spawn);
448		vm_map_clear_4GB_pagezero(old_map);
449		vm_map_deallocate(old_map);
450	}
451	return(LOAD_SUCCESS);
452}
453
454/*
455 * The file size of a mach-o file is limited to 32 bits; this is because
456 * this is the limit on the kalloc() of enough bytes for a mach_header and
457 * the contents of its sizeofcmds, which is currently constrained to 32
458 * bits in the file format itself.  We read into the kernel buffer the
459 * commands section, and then parse it in order to parse the mach-o file
460 * format load_command segment(s).  We are only interested in a subset of
461 * the total set of possible commands. If "map"==VM_MAP_NULL or
462 * "thread"==THREAD_NULL, do not make permament VM modifications,
463 * just preflight the parse.
464 */
465static
466load_return_t
467parse_machfile(
468	struct vnode 		*vp,
469	vm_map_t		map,
470	thread_t		thread,
471	struct mach_header	*header,
472	off_t			file_offset,
473	off_t			macho_size,
474	int			depth,
475	int64_t			aslr_offset,
476	int64_t			dyld_aslr_offset,
477	load_result_t		*result
478)
479{
480	uint32_t		ncmds;
481	struct load_command	*lcp;
482	struct dylinker_command	*dlp = 0;
483	integer_t		dlarchbits = 0;
484	void *			control;
485	load_return_t		ret = LOAD_SUCCESS;
486	caddr_t			addr;
487	void *			kl_addr;
488	vm_size_t		size,kl_size;
489	size_t			offset;
490	size_t			oldoffset;	/* for overflow check */
491	int			pass;
492	proc_t			p = current_proc();		/* XXXX */
493	int			error;
494	int resid=0;
495	size_t			mach_header_sz = sizeof(struct mach_header);
496	boolean_t		abi64;
497	boolean_t		got_code_signatures = FALSE;
498	int64_t			slide = 0;
499
500	if (header->magic == MH_MAGIC_64 ||
501	    header->magic == MH_CIGAM_64) {
502	    	mach_header_sz = sizeof(struct mach_header_64);
503	}
504
505	/*
506	 *	Break infinite recursion
507	 */
508	if (depth > 6) {
509		return(LOAD_FAILURE);
510	}
511
512	depth++;
513
514	/*
515	 *	Check to see if right machine type.
516	 */
517	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
518	    !grade_binary(header->cputype,
519	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
520		return(LOAD_BADARCH);
521
522	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
523
524	switch (header->filetype) {
525
526	case MH_OBJECT:
527	case MH_EXECUTE:
528	case MH_PRELOAD:
529		if (depth != 1) {
530			return (LOAD_FAILURE);
531		}
532		break;
533
534	case MH_FVMLIB:
535	case MH_DYLIB:
536		if (depth == 1) {
537			return (LOAD_FAILURE);
538		}
539		break;
540
541	case MH_DYLINKER:
542		if (depth != 2) {
543			return (LOAD_FAILURE);
544		}
545		break;
546
547	default:
548		return (LOAD_FAILURE);
549	}
550
551	/*
552	 *	Get the pager for the file.
553	 */
554	control = ubc_getobject(vp, UBC_FLAGS_NONE);
555
556	/*
557	 *	Map portion that must be accessible directly into
558	 *	kernel's map.
559	 */
560	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
561		return(LOAD_BADMACHO);
562
563	/*
564	 *	Round size of Mach-O commands up to page boundry.
565	 */
566	size = round_page(mach_header_sz + header->sizeofcmds);
567	if (size <= 0)
568		return(LOAD_BADMACHO);
569
570	/*
571	 * Map the load commands into kernel memory.
572	 */
573	addr = 0;
574	kl_size = size;
575	kl_addr = kalloc(size);
576	addr = (caddr_t)kl_addr;
577	if (addr == NULL)
578		return(LOAD_NOSPACE);
579
580	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
581	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
582	if (error) {
583		if (kl_addr )
584			kfree(kl_addr, kl_size);
585		return(LOAD_IOERROR);
586	}
587
588	/*
589	 *	For PIE and dyld, slide everything by the ASLR offset.
590	 */
591	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
592		slide = aslr_offset;
593	}
594
595	 /*
596	 *  Scan through the commands, processing each one as necessary.
597	 *  We parse in three passes through the headers:
598	 *  1: thread state, uuid, code signature
599	 *  2: segments
600	 *  3: dyld, encryption, check entry point
601	 */
602
603	for (pass = 1; pass <= 3; pass++) {
604
605		/*
606		 * Check that the entry point is contained in an executable segments
607		 */
608		if ((pass == 3) && (result->validentry == 0)) {
609			thread_state_initialize(thread);
610			ret = LOAD_FAILURE;
611			break;
612		}
613
614		/*
615		 * Loop through each of the load_commands indicated by the
616		 * Mach-O header; if an absurd value is provided, we just
617		 * run off the end of the reserved section by incrementing
618		 * the offset too far, so we are implicitly fail-safe.
619		 */
620		offset = mach_header_sz;
621		ncmds = header->ncmds;
622
623		while (ncmds--) {
624			/*
625			 *	Get a pointer to the command.
626			 */
627			lcp = (struct load_command *)(addr + offset);
628			oldoffset = offset;
629			offset += lcp->cmdsize;
630
631			/*
632			 * Perform prevalidation of the struct load_command
633			 * before we attempt to use its contents.  Invalid
634			 * values are ones which result in an overflow, or
635			 * which can not possibly be valid commands, or which
636			 * straddle or exist past the reserved section at the
637			 * start of the image.
638			 */
639			if (oldoffset > offset ||
640			    lcp->cmdsize < sizeof(struct load_command) ||
641			    offset > header->sizeofcmds + mach_header_sz) {
642				ret = LOAD_BADMACHO;
643				break;
644			}
645
646			/*
647			 * Act on struct load_command's for which kernel
648			 * intervention is required.
649			 */
650			switch(lcp->cmd) {
651			case LC_SEGMENT:
652				if (pass != 2)
653					break;
654
655				if (abi64) {
656					/*
657					 * Having an LC_SEGMENT command for the
658					 * wrong ABI is invalid <rdar://problem/11021230>
659					 */
660					ret = LOAD_BADMACHO;
661					break;
662				}
663
664				ret = load_segment(lcp,
665				                   header->filetype,
666				                   control,
667				                   file_offset,
668				                   macho_size,
669				                   vp,
670				                   map,
671				                   slide,
672				                   result);
673				break;
674			case LC_SEGMENT_64:
675				if (pass != 2)
676					break;
677
678				if (!abi64) {
679					/*
680					 * Having an LC_SEGMENT_64 command for the
681					 * wrong ABI is invalid <rdar://problem/11021230>
682					 */
683					ret = LOAD_BADMACHO;
684					break;
685				}
686
687				ret = load_segment(lcp,
688				                   header->filetype,
689				                   control,
690				                   file_offset,
691				                   macho_size,
692				                   vp,
693				                   map,
694				                   slide,
695				                   result);
696				break;
697			case LC_UNIXTHREAD:
698				if (pass != 1)
699					break;
700				ret = load_unixthread(
701						 (struct thread_command *) lcp,
702						 thread,
703						 slide,
704						 result);
705				break;
706			case LC_MAIN:
707				if (pass != 1)
708					break;
709				if (depth != 1)
710					break;
711				ret = load_main(
712						 (struct entry_point_command *) lcp,
713						 thread,
714						 slide,
715						 result);
716				break;
717			case LC_LOAD_DYLINKER:
718				if (pass != 3)
719					break;
720				if ((depth == 1) && (dlp == 0)) {
721					dlp = (struct dylinker_command *)lcp;
722					dlarchbits = (header->cputype & CPU_ARCH_MASK);
723				} else {
724					ret = LOAD_FAILURE;
725				}
726				break;
727			case LC_UUID:
728				if (pass == 1 && depth == 1) {
729					ret = load_uuid((struct uuid_command *) lcp,
730							(char *)addr + mach_header_sz + header->sizeofcmds,
731							result);
732				}
733				break;
734			case LC_CODE_SIGNATURE:
735				/* CODE SIGNING */
736				if (pass != 1)
737					break;
738				/* pager -> uip ->
739				   load signatures & store in uip
740				   set VM object "signed_pages"
741				*/
742				ret = load_code_signature(
743					(struct linkedit_data_command *) lcp,
744					vp,
745					file_offset,
746					macho_size,
747					header->cputype,
748					(depth == 1) ? result : NULL);
749				if (ret != LOAD_SUCCESS) {
750					printf("proc %d: load code signature error %d "
751					       "for file \"%s\"\n",
752					       p->p_pid, ret, vp->v_name);
753					ret = LOAD_SUCCESS; /* ignore error */
754				} else {
755					got_code_signatures = TRUE;
756				}
757				break;
758#if CONFIG_CODE_DECRYPTION
759			case LC_ENCRYPTION_INFO:
760			case LC_ENCRYPTION_INFO_64:
761				if (pass != 3)
762					break;
763				ret = set_code_unprotect(
764					(struct encryption_info_command *) lcp,
765					addr, map, slide, vp,
766					header->cputype, header->cpusubtype);
767				if (ret != LOAD_SUCCESS) {
768					printf("proc %d: set_code_unprotect() error %d "
769					       "for file \"%s\"\n",
770					       p->p_pid, ret, vp->v_name);
771					/*
772					 * Don't let the app run if it's
773					 * encrypted but we failed to set up the
774					 * decrypter. If the keys are missing it will
775					 * return LOAD_DECRYPTFAIL.
776					 */
777					 if (ret == LOAD_DECRYPTFAIL) {
778						/* failed to load due to missing FP keys */
779						proc_lock(p);
780						p->p_lflag |= P_LTERM_DECRYPTFAIL;
781						proc_unlock(p);
782					}
783					 psignal(p, SIGKILL);
784				}
785				break;
786#endif
787			default:
788				/* Other commands are ignored by the kernel */
789				ret = LOAD_SUCCESS;
790				break;
791			}
792			if (ret != LOAD_SUCCESS)
793				break;
794		}
795		if (ret != LOAD_SUCCESS)
796			break;
797	}
798	if (ret == LOAD_SUCCESS) {
799	    if (! got_code_signatures) {
800		    struct cs_blob *blob;
801		    /* no embedded signatures: look for detached ones */
802		    blob = ubc_cs_blob_get(vp, -1, file_offset);
803		    if (blob != NULL) {
804			    /* get flags to be applied to the process */
805			    result->csflags |= blob->csb_flags;
806		    }
807	    }
808
809		/* Make sure if we need dyld, we got it */
810		if (result->needs_dynlinker && !dlp) {
811			ret = LOAD_FAILURE;
812		}
813
814	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
815		/*
816		 * load the dylinker, and slide it by the independent DYLD ASLR
817		 * offset regardless of the PIE-ness of the main binary.
818		 */
819
820		ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
821		                    dyld_aslr_offset, result);
822	    }
823
824	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
825			if (result->thread_count == 0) {
826				ret = LOAD_FAILURE;
827			}
828	    }
829	}
830
831	if (kl_addr )
832		kfree(kl_addr, kl_size);
833
834	return(ret);
835}
836
837#if CONFIG_CODE_DECRYPTION
838
839#define	APPLE_UNPROTECTED_HEADER_SIZE	(3 * PAGE_SIZE_64)
840
841static load_return_t
842unprotect_segment(
843	uint64_t	file_off,
844	uint64_t	file_size,
845	struct vnode	*vp,
846	off_t		macho_offset,
847	vm_map_t	map,
848	vm_map_offset_t	map_addr,
849	vm_map_size_t	map_size)
850{
851	kern_return_t	kr;
852
853	/*
854	 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
855	 * this part of a Universal binary) are not protected...
856	 * The rest needs to be "transformed".
857	 */
858	if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
859	    file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
860		/* it's all unprotected, nothing to do... */
861		kr = KERN_SUCCESS;
862	} else {
863		if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
864			/*
865			 * We start mapping in the unprotected area.
866			 * Skip the unprotected part...
867			 */
868			vm_map_offset_t	delta;
869
870			delta = APPLE_UNPROTECTED_HEADER_SIZE;
871			delta -= file_off;
872			map_addr += delta;
873			map_size -= delta;
874		}
875		/* ... transform the rest of the mapping. */
876		struct pager_crypt_info crypt_info;
877		crypt_info.page_decrypt = dsmos_page_transform;
878		crypt_info.crypt_ops = NULL;
879		crypt_info.crypt_end = NULL;
880#pragma unused(vp, macho_offset)
881		crypt_info.crypt_ops = (void *)0x2e69cf40;
882		kr = vm_map_apple_protected(map,
883					    map_addr,
884					    map_addr + map_size,
885					    &crypt_info);
886	}
887
888	if (kr != KERN_SUCCESS) {
889		return LOAD_FAILURE;
890	}
891	return LOAD_SUCCESS;
892}
893#else	/* CONFIG_CODE_DECRYPTION */
894static load_return_t
895unprotect_segment(
896	__unused	uint64_t	file_off,
897	__unused	uint64_t	file_size,
898	__unused	struct vnode	*vp,
899	__unused	off_t		macho_offset,
900	__unused	vm_map_t	map,
901	__unused	vm_map_offset_t	map_addr,
902	__unused	vm_map_size_t	map_size)
903{
904	return LOAD_SUCCESS;
905}
906#endif	/* CONFIG_CODE_DECRYPTION */
907
908static
909load_return_t
910load_segment(
911	struct load_command		*lcp,
912	uint32_t			filetype,
913	void *				control,
914	off_t				pager_offset,
915	off_t				macho_size,
916	struct vnode			*vp,
917	vm_map_t			map,
918	int64_t				slide,
919	load_result_t		*result
920)
921{
922	struct segment_command_64 segment_command, *scp;
923	kern_return_t		ret;
924	vm_map_offset_t		map_addr, map_offset;
925	vm_map_size_t		map_size, seg_size, delta_size;
926	vm_prot_t 		initprot;
927	vm_prot_t		maxprot;
928	size_t			segment_command_size, total_section_size,
929				single_section_size;
930	boolean_t		prohibit_pagezero_mapping = FALSE;
931
932	if (LC_SEGMENT_64 == lcp->cmd) {
933		segment_command_size = sizeof(struct segment_command_64);
934		single_section_size  = sizeof(struct section_64);
935	} else {
936		segment_command_size = sizeof(struct segment_command);
937		single_section_size  = sizeof(struct section);
938	}
939	if (lcp->cmdsize < segment_command_size)
940		return (LOAD_BADMACHO);
941	total_section_size = lcp->cmdsize - segment_command_size;
942
943	if (LC_SEGMENT_64 == lcp->cmd)
944		scp = (struct segment_command_64 *)lcp;
945	else {
946		scp = &segment_command;
947		widen_segment_command((struct segment_command *)lcp, scp);
948	}
949
950	/*
951	 * Make sure what we get from the file is really ours (as specified
952	 * by macho_size).
953	 */
954	if (scp->fileoff + scp->filesize < scp->fileoff ||
955	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
956		return (LOAD_BADMACHO);
957	/*
958	 * Ensure that the number of sections specified would fit
959	 * within the load command size.
960	 */
961	if (total_section_size / single_section_size < scp->nsects)
962		return (LOAD_BADMACHO);
963	/*
964	 * Make sure the segment is page-aligned in the file.
965	 */
966	if ((scp->fileoff & PAGE_MASK_64) != 0)
967		return (LOAD_BADMACHO);
968
969	/*
970	 *	Round sizes to page size.
971	 */
972	seg_size = round_page_64(scp->vmsize);
973	map_size = round_page_64(scp->filesize);
974	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
975
976	seg_size = vm_map_round_page(seg_size, vm_map_page_mask(map));
977	map_size = vm_map_round_page(map_size, vm_map_page_mask(map));
978
979	if (seg_size == 0)
980		return (KERN_SUCCESS);
981	if (map_addr == 0 &&
982	    map_size == 0 &&
983	    seg_size != 0 &&
984	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
985	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
986		/*
987		 * For PIE, extend page zero rather than moving it.  Extending
988		 * page zero keeps early allocations from falling predictably
989		 * between the end of page zero and the beginning of the first
990		 * slid segment.
991		 */
992		seg_size += slide;
993		slide = 0;
994		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
995		if (scp->cmd == LC_SEGMENT_64) {
996		        prohibit_pagezero_mapping = TRUE;
997		}
998
999		if (prohibit_pagezero_mapping) {
1000			/*
1001			 * This is a "page zero" segment:  it starts at address 0,
1002			 * is not mapped from the binary file and is not accessible.
1003			 * User-space should never be able to access that memory, so
1004			 * make it completely off limits by raising the VM map's
1005			 * minimum offset.
1006			 */
1007			ret = vm_map_raise_min_offset(map, seg_size);
1008			if (ret != KERN_SUCCESS) {
1009				return (LOAD_FAILURE);
1010			}
1011			return (LOAD_SUCCESS);
1012		}
1013	}
1014
1015	/* If a non-zero slide was specified by the caller, apply now */
1016	map_addr += slide;
1017
1018	if (map_addr < result->min_vm_addr)
1019		result->min_vm_addr = map_addr;
1020	if (map_addr+seg_size > result->max_vm_addr)
1021		result->max_vm_addr = map_addr+seg_size;
1022
1023	if (map == VM_MAP_NULL)
1024		return (LOAD_SUCCESS);
1025
1026	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */
1027
1028	if (map_size > 0) {
1029		initprot = (scp->initprot) & VM_PROT_ALL;
1030		maxprot = (scp->maxprot) & VM_PROT_ALL;
1031		/*
1032		 *	Map a copy of the file into the address space.
1033		 */
1034		ret = vm_map_enter_mem_object_control(map,
1035				&map_addr, map_size, (mach_vm_offset_t)0,
1036			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
1037				initprot, maxprot,
1038				VM_INHERIT_DEFAULT);
1039		if (ret != KERN_SUCCESS) {
1040			return (LOAD_NOSPACE);
1041		}
1042
1043		/*
1044		 *	If the file didn't end on a page boundary,
1045		 *	we need to zero the leftover.
1046		 */
1047		delta_size = map_size - scp->filesize;
1048#if FIXME
1049		if (delta_size > 0) {
1050			mach_vm_offset_t	tmp;
1051
1052			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
1053			if (ret != KERN_SUCCESS)
1054				return(LOAD_RESOURCE);
1055
1056			if (copyout(tmp, map_addr + scp->filesize,
1057								delta_size)) {
1058				(void) mach_vm_deallocate(
1059						kernel_map, tmp, delta_size);
1060				return (LOAD_FAILURE);
1061			}
1062
1063			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
1064		}
1065#endif /* FIXME */
1066	}
1067
1068	/*
1069	 *	If the virtual size of the segment is greater
1070	 *	than the size from the file, we need to allocate
1071	 *	zero fill memory for the rest.
1072	 */
1073	delta_size = seg_size - map_size;
1074	if (delta_size > 0) {
1075		mach_vm_offset_t tmp = map_addr + map_size;
1076
1077		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
1078				  NULL, 0, FALSE,
1079				  scp->initprot, scp->maxprot,
1080				  VM_INHERIT_DEFAULT);
1081		if (ret != KERN_SUCCESS)
1082			return(LOAD_NOSPACE);
1083	}
1084
1085	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
1086		result->mach_header = map_addr;
1087
1088	if (scp->flags & SG_PROTECTED_VERSION_1) {
1089		ret = unprotect_segment(scp->fileoff,
1090					scp->filesize,
1091					vp,
1092					pager_offset,
1093					map,
1094					map_addr,
1095					map_size);
1096	} else {
1097		ret = LOAD_SUCCESS;
1098	}
1099	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
1100	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
1101		note_all_image_info_section(scp,
1102		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
1103		    (const char *)lcp + segment_command_size, slide, result);
1104
1105	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
1106		result->validentry = 1;
1107
1108	return ret;
1109}
1110
1111static
1112load_return_t
1113load_uuid(
1114	struct uuid_command	*uulp,
1115	char			*command_end,
1116	load_result_t		*result
1117)
1118{
1119		/*
1120		 * We need to check the following for this command:
1121		 * - The command size should be atleast the size of struct uuid_command
1122		 * - The UUID part of the command should be completely within the mach-o header
1123		 */
1124
1125		if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
1126		    (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
1127			return (LOAD_BADMACHO);
1128		}
1129
1130		memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
1131		return (LOAD_SUCCESS);
1132}
1133
1134static
1135load_return_t
1136load_main(
1137	struct entry_point_command	*epc,
1138	thread_t		thread,
1139	int64_t				slide,
1140	load_result_t		*result
1141)
1142{
1143	mach_vm_offset_t addr;
1144	kern_return_t	ret;
1145
1146	if (epc->cmdsize < sizeof(*epc))
1147		return (LOAD_BADMACHO);
1148	if (result->thread_count != 0) {
1149		printf("load_main: already have a thread!");
1150		return (LOAD_FAILURE);
1151	}
1152
1153	if (thread == THREAD_NULL)
1154		return (LOAD_SUCCESS);
1155
1156	/* LC_MAIN specifies stack size but not location */
1157	if (epc->stacksize) {
1158		result->prog_stack_size = 1;
1159		result->user_stack_size = epc->stacksize;
1160	} else {
1161		result->prog_stack_size = 0;
1162		result->user_stack_size = MAXSSIZ;
1163	}
1164	result->prog_allocated_stack = 0;
1165
1166	/* use default location for stack */
1167	ret = thread_userstackdefault(thread, &addr);
1168	if (ret != KERN_SUCCESS)
1169		return(LOAD_FAILURE);
1170
1171	/* The stack slides down from the default location */
1172	result->user_stack = addr;
1173	result->user_stack -= slide;
1174
1175	/* kernel does *not* use entryoff from LC_MAIN.	 Dyld uses it. */
1176	result->needs_dynlinker = TRUE;
1177	result->validentry = TRUE;
1178
1179	ret = thread_state_initialize( thread );
1180	if (ret != KERN_SUCCESS) {
1181		return(LOAD_FAILURE);
1182	}
1183
1184	result->unixproc = TRUE;
1185	result->thread_count++;
1186
1187	return(LOAD_SUCCESS);
1188}
1189
1190
1191static
1192load_return_t
1193load_unixthread(
1194	struct thread_command	*tcp,
1195	thread_t		thread,
1196	int64_t				slide,
1197	load_result_t		*result
1198)
1199{
1200	load_return_t	ret;
1201	int customstack =0;
1202	mach_vm_offset_t addr;
1203
1204	if (tcp->cmdsize < sizeof(*tcp))
1205		return (LOAD_BADMACHO);
1206	if (result->thread_count != 0) {
1207		printf("load_unixthread: already have a thread!");
1208		return (LOAD_FAILURE);
1209	}
1210
1211	if (thread == THREAD_NULL)
1212		return (LOAD_SUCCESS);
1213
1214	ret = load_threadstack(thread,
1215		       (uint32_t *)(((vm_offset_t)tcp) +
1216		       		sizeof(struct thread_command)),
1217		       tcp->cmdsize - sizeof(struct thread_command),
1218		       &addr,
1219			   &customstack);
1220	if (ret != LOAD_SUCCESS)
1221		return(ret);
1222
1223	/* LC_UNIXTHREAD optionally specifies stack size and location */
1224
1225	if (customstack) {
1226		result->prog_stack_size = 0;	/* unknown */
1227		result->prog_allocated_stack = 1;
1228	} else {
1229		result->prog_allocated_stack = 0;
1230		result->prog_stack_size = 0;
1231		result->user_stack_size = MAXSSIZ;
1232	}
1233
1234	/* The stack slides down from the default location */
1235	result->user_stack = addr;
1236	result->user_stack -= slide;
1237
1238	ret = load_threadentry(thread,
1239		       (uint32_t *)(((vm_offset_t)tcp) +
1240		       		sizeof(struct thread_command)),
1241		       tcp->cmdsize - sizeof(struct thread_command),
1242		       &addr);
1243	if (ret != LOAD_SUCCESS)
1244		return(ret);
1245
1246	result->entry_point = addr;
1247	result->entry_point += slide;
1248
1249	ret = load_threadstate(thread,
1250		       (uint32_t *)(((vm_offset_t)tcp) +
1251		       		sizeof(struct thread_command)),
1252		       tcp->cmdsize - sizeof(struct thread_command));
1253	if (ret != LOAD_SUCCESS)
1254		return (ret);
1255
1256	result->unixproc = TRUE;
1257	result->thread_count++;
1258
1259	return(LOAD_SUCCESS);
1260}
1261
1262static
1263load_return_t
1264load_threadstate(
1265	thread_t	thread,
1266	uint32_t	*ts,
1267	uint32_t	total_size
1268)
1269{
1270	kern_return_t	ret;
1271	uint32_t	size;
1272	int		flavor;
1273	uint32_t	thread_size;
1274
1275    ret = thread_state_initialize( thread );
1276    if (ret != KERN_SUCCESS) {
1277        return(LOAD_FAILURE);
1278    }
1279
1280	/*
1281	 *	Set the new thread state; iterate through the state flavors in
1282     *  the mach-o file.
1283	 */
1284	while (total_size > 0) {
1285		flavor = *ts++;
1286		size = *ts++;
1287		if (UINT32_MAX-2 < size ||
1288		    UINT32_MAX/sizeof(uint32_t) < size+2)
1289			return (LOAD_BADMACHO);
1290		thread_size = (size+2)*sizeof(uint32_t);
1291		if (thread_size > total_size)
1292			return(LOAD_BADMACHO);
1293		total_size -= thread_size;
1294		/*
1295		 * Third argument is a kernel space pointer; it gets cast
1296		 * to the appropriate type in machine_thread_set_state()
1297		 * based on the value of flavor.
1298		 */
1299		ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
1300		if (ret != KERN_SUCCESS) {
1301			return(LOAD_FAILURE);
1302		}
1303		ts += size;	/* ts is a (uint32_t *) */
1304	}
1305	return(LOAD_SUCCESS);
1306}
1307
1308static
1309load_return_t
1310load_threadstack(
1311	thread_t	thread,
1312	uint32_t	*ts,
1313	uint32_t	total_size,
1314	mach_vm_offset_t	*user_stack,
1315	int *customstack
1316)
1317{
1318	kern_return_t	ret;
1319	uint32_t	size;
1320	int		flavor;
1321	uint32_t	stack_size;
1322
1323	while (total_size > 0) {
1324		flavor = *ts++;
1325		size = *ts++;
1326		if (UINT32_MAX-2 < size ||
1327		    UINT32_MAX/sizeof(uint32_t) < size+2)
1328			return (LOAD_BADMACHO);
1329		stack_size = (size+2)*sizeof(uint32_t);
1330		if (stack_size > total_size)
1331			return(LOAD_BADMACHO);
1332		total_size -= stack_size;
1333
1334		/*
1335		 * Third argument is a kernel space pointer; it gets cast
1336		 * to the appropriate type in thread_userstack() based on
1337		 * the value of flavor.
1338		 */
1339		ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
1340		if (ret != KERN_SUCCESS) {
1341			return(LOAD_FAILURE);
1342		}
1343		ts += size;	/* ts is a (uint32_t *) */
1344	}
1345	return(LOAD_SUCCESS);
1346}
1347
1348static
1349load_return_t
1350load_threadentry(
1351	thread_t	thread,
1352	uint32_t	*ts,
1353	uint32_t	total_size,
1354	mach_vm_offset_t	*entry_point
1355)
1356{
1357	kern_return_t	ret;
1358	uint32_t	size;
1359	int		flavor;
1360	uint32_t	entry_size;
1361
1362	/*
1363	 *	Set the thread state.
1364	 */
1365	*entry_point = MACH_VM_MIN_ADDRESS;
1366	while (total_size > 0) {
1367		flavor = *ts++;
1368		size = *ts++;
1369		if (UINT32_MAX-2 < size ||
1370		    UINT32_MAX/sizeof(uint32_t) < size+2)
1371			return (LOAD_BADMACHO);
1372		entry_size = (size+2)*sizeof(uint32_t);
1373		if (entry_size > total_size)
1374			return(LOAD_BADMACHO);
1375		total_size -= entry_size;
1376		/*
1377		 * Third argument is a kernel space pointer; it gets cast
1378		 * to the appropriate type in thread_entrypoint() based on
1379		 * the value of flavor.
1380		 */
1381		ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
1382		if (ret != KERN_SUCCESS) {
1383			return(LOAD_FAILURE);
1384		}
1385		ts += size;	/* ts is a (uint32_t *) */
1386	}
1387	return(LOAD_SUCCESS);
1388}
1389
1390struct macho_data {
1391	struct nameidata	__nid;
1392	union macho_vnode_header {
1393		struct mach_header	mach_header;
1394		struct fat_header	fat_header;
1395		char	__pad[512];
1396	} __header;
1397};
1398
1399static load_return_t
1400load_dylinker(
1401	struct dylinker_command	*lcp,
1402	integer_t		archbits,
1403	vm_map_t		map,
1404	thread_t	thread,
1405	int			depth,
1406	int64_t			slide,
1407	load_result_t		*result
1408)
1409{
1410	char			*name;
1411	char			*p;
1412	struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
1413	struct mach_header	*header;
1414	off_t			file_offset = 0; /* set by get_macho_vnode() */
1415	off_t			macho_size = 0;	/* set by get_macho_vnode() */
1416	load_result_t		*myresult;
1417	kern_return_t		ret;
1418	struct macho_data	*macho_data;
1419	struct {
1420		struct mach_header	__header;
1421		load_result_t		__myresult;
1422		struct macho_data	__macho_data;
1423	} *dyld_data;
1424
1425	if (lcp->cmdsize < sizeof(*lcp))
1426		return (LOAD_BADMACHO);
1427
1428	name = (char *)lcp + lcp->name.offset;
1429	/*
1430	 *	Check for a proper null terminated string.
1431	 */
1432	p = name;
1433	do {
1434		if (p >= (char *)lcp + lcp->cmdsize)
1435			return(LOAD_BADMACHO);
1436	} while (*p++);
1437
1438	/* Allocate wad-of-data from heap to reduce excessively deep stacks */
1439
1440	MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK);
1441	header = &dyld_data->__header;
1442	myresult = &dyld_data->__myresult;
1443	macho_data = &dyld_data->__macho_data;
1444
1445	ret = get_macho_vnode(name, archbits, header,
1446	    &file_offset, &macho_size, macho_data, &vp);
1447	if (ret)
1448		goto novp_out;
1449
1450	*myresult = load_result_null;
1451
1452	/*
1453	 *	First try to map dyld in directly.  This should work most of
1454	 *	the time since there shouldn't normally be something already
1455	 *	mapped to its address.
1456	 */
1457
1458	ret = parse_machfile(vp, map, thread, header, file_offset,
1459	                     macho_size, depth, slide, 0, myresult);
1460
1461	/*
1462	 *	If it turned out something was in the way, then we'll take
1463	 *	take this longer path to preflight dyld's vm ranges, then
1464	 *	map it at a free location in the address space.
1465	 */
1466
1467	if (ret == LOAD_NOSPACE) {
1468		mach_vm_offset_t	dyl_start, map_addr;
1469		mach_vm_size_t	dyl_length;
1470		int64_t			slide_amount;
1471
1472		*myresult = load_result_null;
1473
1474		/*
1475		 * Preflight parsing the Mach-O file with a NULL
1476		 * map, which will return the ranges needed for a
1477		 * subsequent map attempt (with a slide) in "myresult"
1478		 */
1479		ret = parse_machfile(vp, VM_MAP_NULL, THREAD_NULL, header,
1480		                     file_offset, macho_size, depth,
1481		                     0 /* slide */, 0, myresult);
1482
1483		if (ret != LOAD_SUCCESS) {
1484			goto out;
1485		}
1486
1487		dyl_start = myresult->min_vm_addr;
1488		dyl_length = myresult->max_vm_addr - myresult->min_vm_addr;
1489
1490		dyl_length += slide;
1491
1492		/* To find an appropriate load address, do a quick allocation */
1493		map_addr = dyl_start;
1494		ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
1495		if (ret != KERN_SUCCESS) {
1496			ret = LOAD_NOSPACE;
1497			goto out;
1498		}
1499
1500		ret = mach_vm_deallocate(map, map_addr, dyl_length);
1501		if (ret != KERN_SUCCESS) {
1502			ret = LOAD_NOSPACE;
1503			goto out;
1504		}
1505
1506		if (map_addr < dyl_start)
1507			slide_amount = -(int64_t)(dyl_start - map_addr);
1508		else
1509			slide_amount = (int64_t)(map_addr - dyl_start);
1510
1511		slide_amount += slide;
1512
1513		*myresult = load_result_null;
1514
1515		ret = parse_machfile(vp, map, thread, header,
1516		                     file_offset, macho_size, depth,
1517		                     slide_amount, 0, myresult);
1518
1519		if (ret) {
1520			goto out;
1521		}
1522	}
1523
1524	if (ret == LOAD_SUCCESS) {
1525		result->dynlinker = TRUE;
1526		result->entry_point = myresult->entry_point;
1527		result->validentry = myresult->validentry;
1528		result->all_image_info_addr = myresult->all_image_info_addr;
1529		result->all_image_info_size = myresult->all_image_info_size;
1530	}
1531out:
1532	vnode_put(vp);
1533novp_out:
1534	FREE(dyld_data, M_TEMP);
1535	return (ret);
1536
1537}
1538
1539static load_return_t
1540load_code_signature(
1541	struct linkedit_data_command	*lcp,
1542	struct vnode			*vp,
1543	off_t				macho_offset,
1544	off_t				macho_size,
1545	cpu_type_t			cputype,
1546	load_result_t			*result)
1547{
1548	int		ret;
1549	kern_return_t	kr;
1550	vm_offset_t	addr;
1551	int		resid;
1552	struct cs_blob	*blob;
1553	int		error;
1554	vm_size_t	blob_size;
1555
1556	addr = 0;
1557	blob = NULL;
1558
1559	if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
1560	    lcp->dataoff + lcp->datasize > macho_size) {
1561		ret = LOAD_BADMACHO;
1562		goto out;
1563	}
1564
1565	blob = ubc_cs_blob_get(vp, cputype, -1);
1566	if (blob != NULL &&
1567	    blob->csb_cpu_type == cputype &&
1568	    blob->csb_base_offset == macho_offset &&
1569	    blob->csb_blob_offset == lcp->dataoff &&
1570	    blob->csb_mem_size == lcp->datasize) {
1571		/*
1572		 * we already have a blob for this vnode and cputype
1573		 * and its at the same offset in Mach-O.  Optimize to
1574		 * not reload, revalidate, and compare the blob hashes.
1575		 * Security will not be compromised, but we might miss
1576		 * out on some messagetracer info about the differences
1577		 * in blob content.
1578		 */
1579		ret = LOAD_SUCCESS;
1580		goto out;
1581	}
1582
1583	blob_size = lcp->datasize;
1584	kr = ubc_cs_blob_allocate(&addr, &blob_size);
1585	if (kr != KERN_SUCCESS) {
1586		ret = LOAD_NOSPACE;
1587		goto out;
1588	}
1589
1590	resid = 0;
1591	error = vn_rdwr(UIO_READ,
1592			vp,
1593			(caddr_t) addr,
1594			lcp->datasize,
1595			macho_offset + lcp->dataoff,
1596			UIO_SYSSPACE,
1597			0,
1598			kauth_cred_get(),
1599			&resid,
1600			current_proc());
1601	if (error || resid != 0) {
1602		ret = LOAD_IOERROR;
1603		goto out;
1604	}
1605
1606	if (ubc_cs_blob_add(vp,
1607			    cputype,
1608			    macho_offset,
1609			    addr,
1610			    lcp->dataoff,
1611			    lcp->datasize)) {
1612		ret = LOAD_FAILURE;
1613		goto out;
1614	} else {
1615		/* ubc_cs_blob_add() has consumed "addr" */
1616		addr = 0;
1617	}
1618
1619#if CHECK_CS_VALIDATION_BITMAP
1620	ubc_cs_validation_bitmap_allocate( vp );
1621#endif
1622
1623	blob = ubc_cs_blob_get(vp, cputype, -1);
1624
1625	ret = LOAD_SUCCESS;
1626out:
1627	if (result && ret == LOAD_SUCCESS) {
1628		result->csflags |= blob->csb_flags;
1629	}
1630	if (addr != 0) {
1631		ubc_cs_blob_deallocate(addr, blob_size);
1632		addr = 0;
1633	}
1634
1635	return ret;
1636}
1637
1638
1639#if CONFIG_CODE_DECRYPTION
1640
1641static load_return_t
1642set_code_unprotect(
1643		   struct encryption_info_command *eip,
1644		   caddr_t addr,
1645		   vm_map_t map,
1646		   int64_t slide,
1647		   struct vnode	*vp,
1648		   cpu_type_t cputype,
1649		   cpu_subtype_t cpusubtype)
1650{
1651	int result, len;
1652	pager_crypt_info_t crypt_info;
1653	const char * cryptname = 0;
1654	char *vpath;
1655
1656	size_t offset;
1657	struct segment_command_64 *seg64;
1658	struct segment_command *seg32;
1659	vm_map_offset_t map_offset, map_size;
1660	kern_return_t kr;
1661
1662	if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
1663
1664	switch(eip->cryptid) {
1665		case 0:
1666			/* not encrypted, just an empty load command */
1667			return LOAD_SUCCESS;
1668		case 1:
1669			cryptname="com.apple.unfree";
1670			break;
1671		case 0x10:
1672			/* some random cryptid that you could manually put into
1673			 * your binary if you want NULL */
1674			cryptname="com.apple.null";
1675			break;
1676		default:
1677			return LOAD_BADMACHO;
1678	}
1679
1680	if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
1681	if (NULL == text_crypter_create) return LOAD_FAILURE;
1682
1683	MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1684	if(vpath == NULL) return LOAD_FAILURE;
1685
1686	len = MAXPATHLEN;
1687	result = vn_getpath(vp, vpath, &len);
1688	if(result) {
1689		FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1690		return LOAD_FAILURE;
1691	}
1692
1693	/* set up decrypter first */
1694	crypt_file_data_t crypt_data = {
1695		.filename = vpath,
1696		.cputype = cputype,
1697		.cpusubtype = cpusubtype};
1698	kr=text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
1699	FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1700
1701	if(kr) {
1702		printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
1703		       cryptname, kr);
1704		if (kr == kIOReturnNotPrivileged) {
1705			/* text encryption returned decryption failure */
1706			return(LOAD_DECRYPTFAIL);
1707		 }else
1708			return LOAD_RESOURCE;
1709	}
1710
1711	/* this is terrible, but we have to rescan the load commands to find the
1712	 * virtual address of this encrypted stuff. This code is gonna look like
1713	 * the dyld source one day... */
1714	struct mach_header *header = (struct mach_header *)addr;
1715	size_t mach_header_sz = sizeof(struct mach_header);
1716	if (header->magic == MH_MAGIC_64 ||
1717	    header->magic == MH_CIGAM_64) {
1718	    	mach_header_sz = sizeof(struct mach_header_64);
1719	}
1720	offset = mach_header_sz;
1721	uint32_t ncmds = header->ncmds;
1722	while (ncmds--) {
1723		/*
1724		 *	Get a pointer to the command.
1725		 */
1726		struct load_command *lcp = (struct load_command *)(addr + offset);
1727		offset += lcp->cmdsize;
1728
1729		switch(lcp->cmd) {
1730			case LC_SEGMENT_64:
1731				seg64 = (struct segment_command_64 *)lcp;
1732				if ((seg64->fileoff <= eip->cryptoff) &&
1733				    (seg64->fileoff+seg64->filesize >=
1734				     eip->cryptoff+eip->cryptsize)) {
1735					map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
1736					map_size = eip->cryptsize;
1737					goto remap_now;
1738				}
1739			case LC_SEGMENT:
1740				seg32 = (struct segment_command *)lcp;
1741				if ((seg32->fileoff <= eip->cryptoff) &&
1742				    (seg32->fileoff+seg32->filesize >=
1743				     eip->cryptoff+eip->cryptsize)) {
1744					map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
1745					map_size = eip->cryptsize;
1746					goto remap_now;
1747				}
1748		}
1749	}
1750
1751	/* if we get here, did not find anything */
1752	return LOAD_BADMACHO;
1753
1754remap_now:
1755	/* now remap using the decrypter */
1756	kr = vm_map_apple_protected(map, map_offset, map_offset+map_size, &crypt_info);
1757	if(kr) {
1758		printf("set_code_unprotect(): mapping failed with %x\n", kr);
1759		crypt_info.crypt_end(crypt_info.crypt_ops);
1760		return LOAD_PROTECT;
1761	}
1762
1763	return LOAD_SUCCESS;
1764}
1765
1766#endif
1767
1768/*
1769 * This routine exists to support the load_dylinker().
1770 *
1771 * This routine has its own, separate, understanding of the FAT file format,
1772 * which is terrifically unfortunate.
1773 */
1774static
1775load_return_t
1776get_macho_vnode(
1777	char			*path,
1778	integer_t		archbits,
1779	struct mach_header	*mach_header,
1780	off_t			*file_offset,
1781	off_t			*macho_size,
1782	struct macho_data	*data,
1783	struct vnode		**vpp
1784)
1785{
1786	struct vnode		*vp;
1787	vfs_context_t		ctx = vfs_context_current();
1788	proc_t			p = vfs_context_proc(ctx);
1789	kauth_cred_t		kerncred;
1790	struct nameidata	*ndp = &data->__nid;
1791	boolean_t		is_fat;
1792	struct fat_arch		fat_arch;
1793	int			error;
1794	int resid;
1795	union macho_vnode_header *header = &data->__header;
1796	off_t fsize = (off_t)0;
1797
1798	/*
1799	 * Capture the kernel credential for use in the actual read of the
1800	 * file, since the user doing the execution may have execute rights
1801	 * but not read rights, but to exec something, we have to either map
1802	 * or read it into the new process address space, which requires
1803	 * read rights.  This is to deal with lack of common credential
1804	 * serialization code which would treat NOCRED as "serialize 'root'".
1805	 */
1806	kerncred = vfs_context_ucred(vfs_context_kernel());
1807
1808	/* init the namei data to point the file user's program name */
1809	NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
1810
1811	if ((error = namei(ndp)) != 0) {
1812		if (error == ENOENT) {
1813			error = LOAD_ENOENT;
1814		} else {
1815			error = LOAD_FAILURE;
1816		}
1817		return(error);
1818	}
1819	nameidone(ndp);
1820	vp = ndp->ni_vp;
1821
1822	/* check for regular file */
1823	if (vp->v_type != VREG) {
1824		error = LOAD_PROTECT;
1825		goto bad1;
1826	}
1827
1828	/* get size */
1829	if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
1830		error = LOAD_FAILURE;
1831		goto bad1;
1832	}
1833
1834	/* Check mount point */
1835	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
1836		error = LOAD_PROTECT;
1837		goto bad1;
1838	}
1839
1840	/* check access */
1841	if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
1842		error = LOAD_PROTECT;
1843		goto bad1;
1844	}
1845
1846	/* try to open it */
1847	if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
1848		error = LOAD_PROTECT;
1849		goto bad1;
1850	}
1851
1852	if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0,
1853	    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
1854		error = LOAD_IOERROR;
1855		goto bad2;
1856	}
1857
1858	if (header->mach_header.magic == MH_MAGIC ||
1859	    header->mach_header.magic == MH_MAGIC_64) {
1860		is_fat = FALSE;
1861	} else if (header->fat_header.magic == FAT_MAGIC ||
1862	    header->fat_header.magic == FAT_CIGAM) {
1863		is_fat = TRUE;
1864	} else {
1865		error = LOAD_BADMACHO;
1866		goto bad2;
1867	}
1868
1869	if (is_fat) {
1870		/* Look up our architecture in the fat file. */
1871		error = fatfile_getarch_with_bits(vp, archbits,
1872		    (vm_offset_t)(&header->fat_header), &fat_arch);
1873		if (error != LOAD_SUCCESS)
1874			goto bad2;
1875
1876		/* Read the Mach-O header out of it */
1877		error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
1878		    sizeof (header->mach_header), fat_arch.offset,
1879		    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
1880		if (error) {
1881			error = LOAD_IOERROR;
1882			goto bad2;
1883		}
1884
1885		/* Is this really a Mach-O? */
1886		if (header->mach_header.magic != MH_MAGIC &&
1887		    header->mach_header.magic != MH_MAGIC_64) {
1888			error = LOAD_BADMACHO;
1889			goto bad2;
1890		}
1891
1892		*file_offset = fat_arch.offset;
1893		*macho_size = fat_arch.size;
1894	} else {
1895		/*
1896		 * Force get_macho_vnode() to fail if the architecture bits
1897		 * do not match the expected architecture bits.  This in
1898		 * turn causes load_dylinker() to fail for the same reason,
1899		 * so it ensures the dynamic linker and the binary are in
1900		 * lock-step.  This is potentially bad, if we ever add to
1901		 * the CPU_ARCH_* bits any bits that are desirable but not
1902		 * required, since the dynamic linker might work, but we will
1903		 * refuse to load it because of this check.
1904		 */
1905		if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
1906			error = LOAD_BADARCH;
1907			goto bad2;
1908		}
1909
1910		*file_offset = 0;
1911		*macho_size = fsize;
1912	}
1913
1914	*mach_header = header->mach_header;
1915	*vpp = vp;
1916
1917	ubc_setsize(vp, fsize);
1918	return (error);
1919
1920bad2:
1921	(void) VNOP_CLOSE(vp, FREAD, ctx);
1922bad1:
1923	vnode_put(vp);
1924	return(error);
1925}
1926