1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 *	Copyright (C) 1988, 1989,  NeXT, Inc.
30 *
31 *	File:	kern/mach_loader.c
32 *	Author:	Avadis Tevanian, Jr.
33 *
34 *	Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88  Avadis Tevanian, Jr. (avie) at NeXT
37 *	Started.
38 */
39
40#include <sys/param.h>
41#include <sys/vnode_internal.h>
42#include <sys/uio.h>
43#include <sys/namei.h>
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
46#include <sys/stat.h>
47#include <sys/malloc.h>
48#include <sys/mount_internal.h>
49#include <sys/fcntl.h>
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
52
53#include <mach/mach_types.h>
54#include <mach/vm_map.h>	/* vm_allocate() */
55#include <mach/mach_vm.h>	/* mach_vm_allocate() */
56#include <mach/vm_statistics.h>
57#include <mach/task.h>
58#include <mach/thread_act.h>
59
60#include <machine/vmparam.h>
61#include <machine/exec.h>
62#include <machine/pal_routines.h>
63
64#include <kern/kern_types.h>
65#include <kern/cpu_number.h>
66#include <kern/mach_loader.h>
67#include <kern/mach_fat.h>
68#include <kern/kalloc.h>
69#include <kern/task.h>
70#include <kern/thread.h>
71#include <kern/page_decrypt.h>
72
73#include <mach-o/fat.h>
74#include <mach-o/loader.h>
75
76#include <vm/pmap.h>
77#include <vm/vm_map.h>
78#include <vm/vm_kern.h>
79#include <vm/vm_pager.h>
80#include <vm/vnode_pager.h>
81#include <vm/vm_protos.h>
82
83/*
84 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
85 * when KERNEL is defined.
86 */
87extern pmap_t	pmap_create(ledger_t ledger, vm_map_size_t size,
88				boolean_t is_64bit);
89
90/* XXX should have prototypes in a shared header file */
91extern int	get_map_nentries(vm_map_t);
92
93extern kern_return_t	memory_object_signed(memory_object_control_t control,
94					     boolean_t is_signed);
95
96/* An empty load_result_t */
97static load_result_t load_result_null = {
98	.mach_header = MACH_VM_MIN_ADDRESS,
99	.entry_point = MACH_VM_MIN_ADDRESS,
100	.user_stack = MACH_VM_MIN_ADDRESS,
101	.user_stack_size = 0,
102	.all_image_info_addr = MACH_VM_MIN_ADDRESS,
103	.all_image_info_size = 0,
104	.thread_count = 0,
105	.unixproc = 0,
106	.dynlinker = 0,
107	.needs_dynlinker = 0,
108	.prog_allocated_stack = 0,
109	.prog_stack_size = 0,
110	.validentry = 0,
111	.csflags = 0,
112	.uuid = { 0 },
113	.min_vm_addr = MACH_VM_MAX_ADDRESS,
114	.max_vm_addr = MACH_VM_MIN_ADDRESS
115};
116
117/*
118 * Prototypes of static functions.
119 */
120static load_return_t
121parse_machfile(
122	struct vnode		*vp,
123	vm_map_t		map,
124	thread_t		thread,
125	struct mach_header	*header,
126	off_t			file_offset,
127	off_t			macho_size,
128	int			depth,
129	int64_t			slide,
130	load_result_t		*result
131);
132
133static load_return_t
134load_segment(
135	struct load_command		*lcp,
136	uint32_t			filetype,
137	void				*control,
138	off_t				pager_offset,
139	off_t				macho_size,
140	struct vnode			*vp,
141	vm_map_t			map,
142	int64_t				slide,
143	load_result_t			*result
144);
145
146static load_return_t
147load_code_signature(
148	struct linkedit_data_command	*lcp,
149	struct vnode			*vp,
150	off_t				macho_offset,
151	off_t				macho_size,
152	cpu_type_t			cputype,
153	load_result_t			*result);
154
155#if CONFIG_CODE_DECRYPTION
156static load_return_t
157set_code_unprotect(
158	struct encryption_info_command	*lcp,
159	caddr_t				addr,
160	vm_map_t			map,
161	int64_t				slide,
162	struct vnode			*vp);
163#endif
164
165static
166load_return_t
167load_main(
168	struct entry_point_command	*epc,
169	thread_t		thread,
170	int64_t				slide,
171	load_result_t		*result
172);
173
174static load_return_t
175load_unixthread(
176	struct thread_command	*tcp,
177	thread_t			thread,
178	int64_t				slide,
179	load_result_t			*result
180);
181
182static load_return_t
183load_threadstate(
184	thread_t		thread,
185	uint32_t	*ts,
186	uint32_t	total_size
187);
188
189static load_return_t
190load_threadstack(
191	thread_t		thread,
192	uint32_t	*ts,
193	uint32_t	total_size,
194	mach_vm_offset_t	*user_stack,
195	int				*customstack
196);
197
198static load_return_t
199load_threadentry(
200	thread_t		thread,
201	uint32_t	*ts,
202	uint32_t	total_size,
203	mach_vm_offset_t	*entry_point
204);
205
206static load_return_t
207load_dylinker(
208	struct dylinker_command	*lcp,
209	integer_t		archbits,
210	vm_map_t				map,
211	thread_t			thread,
212	int						depth,
213	int64_t			slide,
214	load_result_t			*result
215);
216
217struct macho_data;
218
219static load_return_t
220get_macho_vnode(
221	char				*path,
222	integer_t		archbits,
223	struct mach_header	*mach_header,
224	off_t			*file_offset,
225	off_t			*macho_size,
226	struct macho_data	*macho_data,
227	struct vnode		**vpp
228);
229
230static inline void
231widen_segment_command(const struct segment_command *scp32,
232    struct segment_command_64 *scp)
233{
234	scp->cmd = scp32->cmd;
235	scp->cmdsize = scp32->cmdsize;
236	bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
237	scp->vmaddr = scp32->vmaddr;
238	scp->vmsize = scp32->vmsize;
239	scp->fileoff = scp32->fileoff;
240	scp->filesize = scp32->filesize;
241	scp->maxprot = scp32->maxprot;
242	scp->initprot = scp32->initprot;
243	scp->nsects = scp32->nsects;
244	scp->flags = scp32->flags;
245}
246
247static void
248note_all_image_info_section(const struct segment_command_64 *scp,
249    boolean_t is64, size_t section_size, const void *sections,
250    int64_t slide, load_result_t *result)
251{
252	const union {
253		struct section s32;
254		struct section_64 s64;
255	} *sectionp;
256	unsigned int i;
257
258	if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
259		return;
260	for (i = 0; i < scp->nsects; ++i) {
261		sectionp = (const void *)
262		    ((const char *)sections + section_size * i);
263		if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
264		    sizeof(sectionp->s64.sectname))) {
265			result->all_image_info_addr =
266			    is64 ? sectionp->s64.addr : sectionp->s32.addr;
267			result->all_image_info_addr += slide;
268			result->all_image_info_size =
269			    is64 ? sectionp->s64.size : sectionp->s32.size;
270			return;
271		}
272	}
273}
274
275load_return_t
276load_machfile(
277	struct image_params	*imgp,
278	struct mach_header	*header,
279	thread_t 		thread,
280	vm_map_t 		new_map,
281	load_result_t		*result
282)
283{
284	struct vnode		*vp = imgp->ip_vp;
285	off_t			file_offset = imgp->ip_arch_offset;
286	off_t			macho_size = imgp->ip_arch_size;
287	off_t			file_size = imgp->ip_vattr->va_data_size;
288
289	pmap_t			pmap = 0;	/* protected by create_map */
290	vm_map_t		map;
291	vm_map_t		old_map;
292	task_t			old_task = TASK_NULL; /* protected by create_map */
293	load_result_t		myresult;
294	load_return_t		lret;
295	boolean_t create_map = FALSE;
296	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
297	task_t task = current_task();
298	proc_t p = current_proc();
299	mach_vm_offset_t	aslr_offset = 0;
300	kern_return_t 		kret;
301
302	if (macho_size > file_size) {
303		return(LOAD_BADMACHO);
304	}
305
306	if (new_map == VM_MAP_NULL) {
307		create_map = TRUE;
308		old_task = current_task();
309	}
310
311	/*
312	 * If we are spawning, we have created backing objects for the process
313	 * already, which include non-lazily creating the task map.  So we
314	 * are going to switch out the task map with one appropriate for the
315	 * bitness of the image being loaded.
316	 */
317	if (spawn) {
318		create_map = TRUE;
319		old_task = get_threadtask(thread);
320	}
321
322	if (create_map) {
323		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
324				(imgp->ip_flags & IMGPF_IS_64BIT));
325		map = vm_map_create(pmap,
326				0,
327				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
328				TRUE);
329
330	} else
331		map = new_map;
332
333#ifndef	CONFIG_ENFORCE_SIGNED_CODE
334	/* This turns off faulting for executable pages, which allows to
335	 * circumvent Code Signing Enforcement */
336	if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
337	        vm_map_disable_NX(map);
338#endif
339
340	/* Forcibly disallow execution from data pages on even if the arch
341	 * normally permits it. */
342	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
343		vm_map_disallow_data_exec(map);
344
345	/*
346	 * Compute a random offset for ASLR.
347	 */
348	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
349		aslr_offset = random();
350		aslr_offset %= 1 << ((imgp->ip_flags & IMGPF_IS_64BIT) ? 16 : 8);
351		aslr_offset <<= PAGE_SHIFT;
352	}
353
354	if (!result)
355		result = &myresult;
356
357	*result = load_result_null;
358
359	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
360			      0, (int64_t)aslr_offset, result);
361
362	if (lret != LOAD_SUCCESS) {
363		if (create_map) {
364			vm_map_deallocate(map);	/* will lose pmap reference too */
365		}
366		return(lret);
367	}
368
369#if CONFIG_EMBEDDED
370	/*
371	 * Check to see if the page zero is enforced by the map->min_offset.
372	 */
373	if (vm_map_has_hard_pagezero(map, 0x1000) == FALSE) {
374		if (create_map) {
375			vm_map_deallocate(map);	/* will lose pmap reference too */
376		}
377		printf("Cannot enforce a hard page-zero for %s\n", imgp->ip_strings);
378		psignal(vfs_context_proc(imgp->ip_vfs_context), SIGKILL);
379		return (LOAD_BADMACHO);
380	}
381#else
382	/*
383	 * For 64-bit users, check for presence of a 4GB page zero
384	 * which will enable the kernel to share the user's address space
385	 * and hence avoid TLB flushes on kernel entry/exit
386	 */
387
388	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
389	     vm_map_has_4GB_pagezero(map)) {
390		vm_map_set_4GB_pagezero(map);
391	}
392#endif
393	/*
394	 *	Commit to new map.
395	 *
396	 *	Swap the new map for the old, which  consumes our new map
397	 *	reference but each leaves us responsible for the old_map reference.
398	 *	That lets us get off the pmap associated with it, and
399	 *	then we can release it.
400	 */
401
402	 if (create_map) {
403		/*
404		 * If this is an exec, then we are going to destroy the old
405		 * task, and it's correct to halt it; if it's spawn, the
406		 * task is not yet running, and it makes no sense.
407		 */
408	 	if (!spawn) {
409			/*
410			 * Mark the task as halting and start the other
411			 * threads towards terminating themselves.  Then
412			 * make sure any threads waiting for a process
413			 * transition get informed that we are committed to
414			 * this transition, and then finally complete the
415			 * task halting (wait for threads and then cleanup
416			 * task resources).
417			 *
418			 * NOTE: task_start_halt() makes sure that no new
419			 * threads are created in the task during the transition.
420			 * We need to mark the workqueue as exiting before we
421			 * wait for threads to terminate (at the end of which
422			 * we no longer have a prohibition on thread creation).
423			 *
424			 * Finally, clean up any lingering workqueue data structures
425			 * that may have been left behind by the workqueue threads
426			 * as they exited (and then clean up the work queue itself).
427			 */
428			kret = task_start_halt(task);
429			if (kret != KERN_SUCCESS) {
430				return(kret);
431			}
432			proc_transcommit(p, 0);
433			workqueue_mark_exiting(p);
434			task_complete_halt(task);
435			workqueue_exit(p);
436		}
437		old_map = swap_task_map(old_task, thread, map, !spawn);
438		vm_map_clear_4GB_pagezero(old_map);
439		vm_map_deallocate(old_map);
440	}
441	return(LOAD_SUCCESS);
442}
443
444/*
445 * The file size of a mach-o file is limited to 32 bits; this is because
446 * this is the limit on the kalloc() of enough bytes for a mach_header and
447 * the contents of its sizeofcmds, which is currently constrained to 32
448 * bits in the file format itself.  We read into the kernel buffer the
449 * commands section, and then parse it in order to parse the mach-o file
450 * format load_command segment(s).  We are only interested in a subset of
451 * the total set of possible commands. If "map"==VM_MAP_NULL or
452 * "thread"==THREAD_NULL, do not make permament VM modifications,
453 * just preflight the parse.
454 */
455static
456load_return_t
457parse_machfile(
458	struct vnode 		*vp,
459	vm_map_t		map,
460	thread_t		thread,
461	struct mach_header	*header,
462	off_t			file_offset,
463	off_t			macho_size,
464	int			depth,
465	int64_t			aslr_offset,
466	load_result_t		*result
467)
468{
469	uint32_t		ncmds;
470	struct load_command	*lcp;
471	struct dylinker_command	*dlp = 0;
472	struct uuid_command	*uulp = 0;
473	integer_t		dlarchbits = 0;
474	void *			control;
475	load_return_t		ret = LOAD_SUCCESS;
476	caddr_t			addr;
477	void *			kl_addr;
478	vm_size_t		size,kl_size;
479	size_t			offset;
480	size_t			oldoffset;	/* for overflow check */
481	int			pass;
482	proc_t			p = current_proc();		/* XXXX */
483	int			error;
484	int resid=0;
485	size_t			mach_header_sz = sizeof(struct mach_header);
486	boolean_t		abi64;
487	boolean_t		got_code_signatures = FALSE;
488	int64_t			slide = 0;
489
490	if (header->magic == MH_MAGIC_64 ||
491	    header->magic == MH_CIGAM_64) {
492	    	mach_header_sz = sizeof(struct mach_header_64);
493	}
494
495	/*
496	 *	Break infinite recursion
497	 */
498	if (depth > 6) {
499		return(LOAD_FAILURE);
500	}
501
502	depth++;
503
504	/*
505	 *	Check to see if right machine type.
506	 */
507	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
508	    !grade_binary(header->cputype,
509	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
510		return(LOAD_BADARCH);
511
512	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
513
514	switch (header->filetype) {
515
516	case MH_OBJECT:
517	case MH_EXECUTE:
518	case MH_PRELOAD:
519		if (depth != 1) {
520			return (LOAD_FAILURE);
521		}
522		break;
523
524	case MH_FVMLIB:
525	case MH_DYLIB:
526		if (depth == 1) {
527			return (LOAD_FAILURE);
528		}
529		break;
530
531	case MH_DYLINKER:
532		if (depth != 2) {
533			return (LOAD_FAILURE);
534		}
535		break;
536
537	default:
538		return (LOAD_FAILURE);
539	}
540
541	/*
542	 *	Get the pager for the file.
543	 */
544	control = ubc_getobject(vp, UBC_FLAGS_NONE);
545
546	/*
547	 *	Map portion that must be accessible directly into
548	 *	kernel's map.
549	 */
550	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
551		return(LOAD_BADMACHO);
552
553	/*
554	 *	Round size of Mach-O commands up to page boundry.
555	 */
556	size = round_page(mach_header_sz + header->sizeofcmds);
557	if (size <= 0)
558		return(LOAD_BADMACHO);
559
560	/*
561	 * Map the load commands into kernel memory.
562	 */
563	addr = 0;
564	kl_size = size;
565	kl_addr = kalloc(size);
566	addr = (caddr_t)kl_addr;
567	if (addr == NULL)
568		return(LOAD_NOSPACE);
569
570	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
571	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
572	if (error) {
573		if (kl_addr )
574			kfree(kl_addr, kl_size);
575		return(LOAD_IOERROR);
576	}
577
578	/*
579	 *	For PIE and dyld, slide everything by the ASLR offset.
580	 */
581#ifdef DEBUG
582	kprintf("dyld_slide: 0x%08x\n", aslr_offset);
583#endif
584	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
585		slide = aslr_offset;
586	}
587
588	/*
589	 *	Scan through the commands, processing each one as necessary.
590	 */
591	for (pass = 1; pass <= 3; pass++) {
592
593		/*
594		 * Check that the entry point is contained in an executable segments
595		 */
596		if ((pass == 3) && (result->validentry == 0)) {
597			thread_state_initialize(thread);
598			ret = LOAD_FAILURE;
599			break;
600		}
601
602		/*
603		 * Loop through each of the load_commands indicated by the
604		 * Mach-O header; if an absurd value is provided, we just
605		 * run off the end of the reserved section by incrementing
606		 * the offset too far, so we are implicitly fail-safe.
607		 */
608		offset = mach_header_sz;
609		ncmds = header->ncmds;
610
611		while (ncmds--) {
612			/*
613			 *	Get a pointer to the command.
614			 */
615			lcp = (struct load_command *)(addr + offset);
616			oldoffset = offset;
617			offset += lcp->cmdsize;
618
619			/*
620			 * Perform prevalidation of the struct load_command
621			 * before we attempt to use its contents.  Invalid
622			 * values are ones which result in an overflow, or
623			 * which can not possibly be valid commands, or which
624			 * straddle or exist past the reserved section at the
625			 * start of the image.
626			 */
627			if (oldoffset > offset ||
628			    lcp->cmdsize < sizeof(struct load_command) ||
629			    offset > header->sizeofcmds + mach_header_sz) {
630				ret = LOAD_BADMACHO;
631				break;
632			}
633
634			/*
635			 * Act on struct load_command's for which kernel
636			 * intervention is required.
637			 */
638			switch(lcp->cmd) {
639			case LC_SEGMENT:
640			case LC_SEGMENT_64:
641				if (pass != 2)
642					break;
643				ret = load_segment(lcp,
644				    		   header->filetype,
645						   control,
646						   file_offset,
647						   macho_size,
648						   vp,
649						   map,
650						   slide,
651						   result);
652				break;
653			case LC_UNIXTHREAD:
654				if (pass != 1)
655					break;
656				ret = load_unixthread(
657						 (struct thread_command *) lcp,
658						 thread,
659						 slide,
660						 result);
661				break;
662			case LC_MAIN:
663				if (pass != 1)
664					break;
665				if (depth != 1)
666					break;
667				ret = load_main(
668						 (struct entry_point_command *) lcp,
669						 thread,
670						 slide,
671						 result);
672				break;
673			case LC_LOAD_DYLINKER:
674				if (pass != 3)
675					break;
676				if ((depth == 1) && (dlp == 0)) {
677					dlp = (struct dylinker_command *)lcp;
678					dlarchbits = (header->cputype & CPU_ARCH_MASK);
679				} else {
680					ret = LOAD_FAILURE;
681				}
682				break;
683			case LC_UUID:
684				if (pass == 1 && depth == 1) {
685					uulp = (struct uuid_command *)lcp;
686					memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
687				}
688				break;
689			case LC_CODE_SIGNATURE:
690				/* CODE SIGNING */
691				if (pass != 1)
692					break;
693				/* pager -> uip ->
694				   load signatures & store in uip
695				   set VM object "signed_pages"
696				*/
697				ret = load_code_signature(
698					(struct linkedit_data_command *) lcp,
699					vp,
700					file_offset,
701					macho_size,
702					header->cputype,
703					(depth == 1) ? result : NULL);
704				if (ret != LOAD_SUCCESS) {
705					printf("proc %d: load code signature error %d "
706					       "for file \"%s\"\n",
707					       p->p_pid, ret, vp->v_name);
708					ret = LOAD_SUCCESS; /* ignore error */
709				} else {
710					got_code_signatures = TRUE;
711				}
712				break;
713#if CONFIG_CODE_DECRYPTION
714#ifndef __arm__
715			case LC_ENCRYPTION_INFO:
716				if (pass != 3)
717					break;
718				ret = set_code_unprotect(
719					(struct encryption_info_command *) lcp,
720					addr, map, slide, vp);
721				if (ret != LOAD_SUCCESS) {
722					printf("proc %d: set_code_unprotect() error %d "
723					       "for file \"%s\"\n",
724					       p->p_pid, ret, vp->v_name);
725					/* Don't let the app run if it's
726					 * encrypted but we failed to set up the
727					 * decrypter */
728					 psignal(p, SIGKILL);
729				}
730				break;
731#endif
732#endif
733			default:
734				/* Other commands are ignored by the kernel */
735				ret = LOAD_SUCCESS;
736				break;
737			}
738			if (ret != LOAD_SUCCESS)
739				break;
740		}
741		if (ret != LOAD_SUCCESS)
742			break;
743	}
744	if (ret == LOAD_SUCCESS) {
745	    if (! got_code_signatures) {
746		    struct cs_blob *blob;
747		    /* no embedded signatures: look for detached ones */
748		    blob = ubc_cs_blob_get(vp, -1, file_offset);
749		    if (blob != NULL) {
750			    /* get flags to be applied to the process */
751			    result->csflags |= blob->csb_flags;
752		    }
753	    }
754
755		/* Make sure if we need dyld, we got it */
756		if (result->needs_dynlinker && !dlp) {
757			ret = LOAD_FAILURE;
758		}
759
760	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
761		    /* load the dylinker, and always slide it by the ASLR
762		     * offset regardless of PIE */
763		    ret = load_dylinker(dlp, dlarchbits, map, thread, depth, aslr_offset, result);
764	    }
765
766	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
767			if (result->thread_count == 0) {
768				ret = LOAD_FAILURE;
769			}
770	    }
771	}
772
773	if (kl_addr )
774		kfree(kl_addr, kl_size);
775
776	return(ret);
777}
778
779#if CONFIG_CODE_DECRYPTION
780
781#define	APPLE_UNPROTECTED_HEADER_SIZE	(3 * PAGE_SIZE_64)
782
783static load_return_t
784unprotect_segment(
785	uint64_t	file_off,
786	uint64_t	file_size,
787	struct vnode	*vp,
788	off_t		macho_offset,
789	vm_map_t	map,
790	vm_map_offset_t	map_addr,
791	vm_map_size_t	map_size)
792{
793#ifdef __arm__
794    return LOAD_FAILURE;
795#else
796	kern_return_t	kr;
797
798	/*
799	 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
800	 * this part of a Universal binary) are not protected...
801	 * The rest needs to be "transformed".
802	 */
803	if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
804	    file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
805		/* it's all unprotected, nothing to do... */
806		kr = KERN_SUCCESS;
807	} else {
808		if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
809			/*
810			 * We start mapping in the unprotected area.
811			 * Skip the unprotected part...
812			 */
813			vm_map_offset_t	delta;
814
815			delta = APPLE_UNPROTECTED_HEADER_SIZE;
816			delta -= file_off;
817			map_addr += delta;
818			map_size -= delta;
819		}
820		/* ... transform the rest of the mapping. */
821		struct pager_crypt_info crypt_info;
822		crypt_info.page_decrypt = dsmos_page_transform;
823		crypt_info.crypt_ops = NULL;
824		crypt_info.crypt_end = NULL;
825#pragma unused(vp, macho_offset)
826		crypt_info.crypt_ops = (void *)0x2e69cf40;
827		kr = vm_map_apple_protected(map,
828					    map_addr,
829					    map_addr + map_size,
830					    &crypt_info);
831	}
832
833	if (kr != KERN_SUCCESS) {
834		return LOAD_FAILURE;
835	}
836	return LOAD_SUCCESS;
837#endif
838}
839#else	/* CONFIG_CODE_DECRYPTION */
840static load_return_t
841unprotect_segment(
842	__unused	uint64_t	file_off,
843	__unused	uint64_t	file_size,
844	__unused	struct vnode	*vp,
845	__unused	off_t		macho_offset,
846	__unused	vm_map_t	map,
847	__unused	vm_map_offset_t	map_addr,
848	__unused	vm_map_size_t	map_size)
849{
850	return LOAD_SUCCESS;
851}
852#endif	/* CONFIG_CODE_DECRYPTION */
853
854static
855load_return_t
856load_segment(
857	struct load_command		*lcp,
858	uint32_t			filetype,
859	void *				control,
860	off_t				pager_offset,
861	off_t				macho_size,
862	struct vnode			*vp,
863	vm_map_t			map,
864	int64_t				slide,
865	load_result_t		*result
866)
867{
868	struct segment_command_64 segment_command, *scp;
869	kern_return_t		ret;
870	vm_map_offset_t		map_addr, map_offset;
871	vm_map_size_t		map_size, seg_size, delta_size;
872	vm_prot_t 		initprot;
873	vm_prot_t		maxprot;
874	size_t			segment_command_size, total_section_size,
875				single_section_size;
876	boolean_t		prohibit_pagezero_mapping = FALSE;
877
878	if (LC_SEGMENT_64 == lcp->cmd) {
879		segment_command_size = sizeof(struct segment_command_64);
880		single_section_size  = sizeof(struct section_64);
881	} else {
882		segment_command_size = sizeof(struct segment_command);
883		single_section_size  = sizeof(struct section);
884	}
885	if (lcp->cmdsize < segment_command_size)
886		return (LOAD_BADMACHO);
887	total_section_size = lcp->cmdsize - segment_command_size;
888
889	if (LC_SEGMENT_64 == lcp->cmd)
890		scp = (struct segment_command_64 *)lcp;
891	else {
892		scp = &segment_command;
893		widen_segment_command((struct segment_command *)lcp, scp);
894	}
895
896	/*
897	 * Make sure what we get from the file is really ours (as specified
898	 * by macho_size).
899	 */
900	if (scp->fileoff + scp->filesize < scp->fileoff ||
901	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
902		return (LOAD_BADMACHO);
903	/*
904	 * Ensure that the number of sections specified would fit
905	 * within the load command size.
906	 */
907	if (total_section_size / single_section_size < scp->nsects)
908		return (LOAD_BADMACHO);
909	/*
910	 * Make sure the segment is page-aligned in the file.
911	 */
912	if ((scp->fileoff & PAGE_MASK_64) != 0)
913		return (LOAD_BADMACHO);
914
915	/*
916	 *	Round sizes to page size.
917	 */
918	seg_size = round_page_64(scp->vmsize);
919	map_size = round_page_64(scp->filesize);
920	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
921	if (seg_size == 0)
922		return (KERN_SUCCESS);
923	if (map_addr == 0 &&
924	    map_size == 0 &&
925	    seg_size != 0 &&
926	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
927	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
928		/*
929		 * For PIE, extend page zero rather than moving it.  Extending
930		 * page zero keeps early allocations from falling predictably
931		 * between the end of page zero and the beginning of the first
932		 * slid segment.
933		 */
934		seg_size += slide;
935		slide = 0;
936#if CONFIG_EMBEDDED
937		prohibit_pagezero_mapping = TRUE;
938#endif
939		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
940		if (scp->cmd == LC_SEGMENT_64) {
941		        prohibit_pagezero_mapping = TRUE;
942		}
943
944		if (prohibit_pagezero_mapping) {
945			/*
946			 * This is a "page zero" segment:  it starts at address 0,
947			 * is not mapped from the binary file and is not accessible.
948			 * User-space should never be able to access that memory, so
949			 * make it completely off limits by raising the VM map's
950			 * minimum offset.
951			 */
952			ret = vm_map_raise_min_offset(map, seg_size);
953			if (ret != KERN_SUCCESS) {
954				return (LOAD_FAILURE);
955			}
956			return (LOAD_SUCCESS);
957		}
958	}
959
960	/* If a non-zero slide was specified by the caller, apply now */
961	map_addr += slide;
962
963	if (map_addr < result->min_vm_addr)
964		result->min_vm_addr = map_addr;
965	if (map_addr+seg_size > result->max_vm_addr)
966		result->max_vm_addr = map_addr+seg_size;
967
968	if (map == VM_MAP_NULL)
969		return (LOAD_SUCCESS);
970
971	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */
972
973	if (map_size > 0) {
974		initprot = (scp->initprot) & VM_PROT_ALL;
975		maxprot = (scp->maxprot) & VM_PROT_ALL;
976		/*
977		 *	Map a copy of the file into the address space.
978		 */
979		ret = vm_map_enter_mem_object_control(map,
980				&map_addr, map_size, (mach_vm_offset_t)0,
981			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
982				initprot, maxprot,
983				VM_INHERIT_DEFAULT);
984		if (ret != KERN_SUCCESS)
985			return (LOAD_NOSPACE);
986
987		/*
988		 *	If the file didn't end on a page boundary,
989		 *	we need to zero the leftover.
990		 */
991		delta_size = map_size - scp->filesize;
992#if FIXME
993		if (delta_size > 0) {
994			mach_vm_offset_t	tmp;
995
996			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
997			if (ret != KERN_SUCCESS)
998				return(LOAD_RESOURCE);
999
1000			if (copyout(tmp, map_addr + scp->filesize,
1001								delta_size)) {
1002				(void) mach_vm_deallocate(
1003						kernel_map, tmp, delta_size);
1004				return (LOAD_FAILURE);
1005			}
1006
1007			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
1008		}
1009#endif /* FIXME */
1010	}
1011
1012	/*
1013	 *	If the virtual size of the segment is greater
1014	 *	than the size from the file, we need to allocate
1015	 *	zero fill memory for the rest.
1016	 */
1017	delta_size = seg_size - map_size;
1018	if (delta_size > 0) {
1019		mach_vm_offset_t tmp = map_addr + map_size;
1020
1021		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
1022				  NULL, 0, FALSE,
1023				  scp->initprot, scp->maxprot,
1024				  VM_INHERIT_DEFAULT);
1025		if (ret != KERN_SUCCESS)
1026			return(LOAD_NOSPACE);
1027	}
1028
1029	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
1030		result->mach_header = map_addr;
1031
1032	if (scp->flags & SG_PROTECTED_VERSION_1) {
1033		ret = unprotect_segment(scp->fileoff,
1034					scp->filesize,
1035					vp,
1036					pager_offset,
1037					map,
1038					map_addr,
1039					map_size);
1040	} else {
1041		ret = LOAD_SUCCESS;
1042	}
1043	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
1044	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
1045		note_all_image_info_section(scp,
1046		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
1047		    (const char *)lcp + segment_command_size, slide, result);
1048
1049	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
1050		result->validentry = 1;
1051
1052	return ret;
1053}
1054
1055
1056
1057static
1058load_return_t
1059load_main(
1060	struct entry_point_command	*epc,
1061	thread_t		thread,
1062	int64_t				slide,
1063	load_result_t		*result
1064)
1065{
1066	mach_vm_offset_t addr;
1067	kern_return_t	ret;
1068
1069	if (epc->cmdsize < sizeof(*epc))
1070		return (LOAD_BADMACHO);
1071	if (result->thread_count != 0) {
1072		printf("load_main: already have a thread!");
1073		return (LOAD_FAILURE);
1074	}
1075
1076	if (thread == THREAD_NULL)
1077		return (LOAD_SUCCESS);
1078
1079	/* LC_MAIN specifies stack size but not location */
1080	if (epc->stacksize) {
1081		result->prog_stack_size = 1;
1082		result->user_stack_size = epc->stacksize;
1083	} else {
1084		result->prog_stack_size = 0;
1085		result->user_stack_size = MAXSSIZ;
1086	}
1087	result->prog_allocated_stack = 0;
1088
1089	/* use default location for stack */
1090	ret = thread_userstackdefault(thread, &addr);
1091	if (ret != KERN_SUCCESS)
1092		return(LOAD_FAILURE);
1093
1094	/* The stack slides down from the default location */
1095	result->user_stack = addr;
1096	result->user_stack -= slide;
1097
1098	/* kernel does *not* use entryoff from LC_MAIN.	 Dyld uses it. */
1099	result->needs_dynlinker = TRUE;
1100	result->validentry = TRUE;
1101
1102	ret = thread_state_initialize( thread );
1103	if (ret != KERN_SUCCESS) {
1104		return(LOAD_FAILURE);
1105	}
1106
1107	result->unixproc = TRUE;
1108	result->thread_count++;
1109
1110	return(LOAD_SUCCESS);
1111}
1112
1113
1114static
1115load_return_t
1116load_unixthread(
1117	struct thread_command	*tcp,
1118	thread_t		thread,
1119	int64_t				slide,
1120	load_result_t		*result
1121)
1122{
1123	load_return_t	ret;
1124	int customstack =0;
1125	mach_vm_offset_t addr;
1126
1127	if (tcp->cmdsize < sizeof(*tcp))
1128		return (LOAD_BADMACHO);
1129	if (result->thread_count != 0) {
1130		printf("load_unixthread: already have a thread!");
1131		return (LOAD_FAILURE);
1132	}
1133
1134	if (thread == THREAD_NULL)
1135		return (LOAD_SUCCESS);
1136
1137	ret = load_threadstack(thread,
1138		       (uint32_t *)(((vm_offset_t)tcp) +
1139		       		sizeof(struct thread_command)),
1140		       tcp->cmdsize - sizeof(struct thread_command),
1141		       &addr,
1142			   &customstack);
1143	if (ret != LOAD_SUCCESS)
1144		return(ret);
1145
1146	/* LC_UNIXTHREAD optionally specifies stack size and location */
1147
1148	if (customstack) {
1149		result->prog_stack_size = 0;	/* unknown */
1150		result->prog_allocated_stack = 1;
1151	} else {
1152		result->prog_allocated_stack = 0;
1153		result->prog_stack_size = 0;
1154		result->user_stack_size = MAXSSIZ;
1155	}
1156
1157	/* The stack slides down from the default location */
1158	result->user_stack = addr;
1159	result->user_stack -= slide;
1160
1161	ret = load_threadentry(thread,
1162		       (uint32_t *)(((vm_offset_t)tcp) +
1163		       		sizeof(struct thread_command)),
1164		       tcp->cmdsize - sizeof(struct thread_command),
1165		       &addr);
1166	if (ret != LOAD_SUCCESS)
1167		return(ret);
1168
1169	result->entry_point = addr;
1170	result->entry_point += slide;
1171
1172	ret = load_threadstate(thread,
1173		       (uint32_t *)(((vm_offset_t)tcp) +
1174		       		sizeof(struct thread_command)),
1175		       tcp->cmdsize - sizeof(struct thread_command));
1176	if (ret != LOAD_SUCCESS)
1177		return (ret);
1178
1179	result->unixproc = TRUE;
1180	result->thread_count++;
1181
1182	return(LOAD_SUCCESS);
1183}
1184
1185static
1186load_return_t
1187load_threadstate(
1188	thread_t	thread,
1189	uint32_t	*ts,
1190	uint32_t	total_size
1191)
1192{
1193	kern_return_t	ret;
1194	uint32_t	size;
1195	int		flavor;
1196	uint32_t	thread_size;
1197
1198    ret = thread_state_initialize( thread );
1199    if (ret != KERN_SUCCESS) {
1200        return(LOAD_FAILURE);
1201    }
1202
1203	/*
1204	 *	Set the new thread state; iterate through the state flavors in
1205     *  the mach-o file.
1206	 */
1207	while (total_size > 0) {
1208		flavor = *ts++;
1209		size = *ts++;
1210		if (UINT32_MAX-2 < size ||
1211		    UINT32_MAX/sizeof(uint32_t) < size+2)
1212			return (LOAD_BADMACHO);
1213		thread_size = (size+2)*sizeof(uint32_t);
1214		if (thread_size > total_size)
1215			return(LOAD_BADMACHO);
1216		total_size -= thread_size;
1217		/*
1218		 * Third argument is a kernel space pointer; it gets cast
1219		 * to the appropriate type in machine_thread_set_state()
1220		 * based on the value of flavor.
1221		 */
1222		ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
1223		if (ret != KERN_SUCCESS) {
1224			return(LOAD_FAILURE);
1225		}
1226		ts += size;	/* ts is a (uint32_t *) */
1227	}
1228	return(LOAD_SUCCESS);
1229}
1230
1231static
1232load_return_t
1233load_threadstack(
1234	thread_t	thread,
1235	uint32_t	*ts,
1236	uint32_t	total_size,
1237	mach_vm_offset_t	*user_stack,
1238	int *customstack
1239)
1240{
1241	kern_return_t	ret;
1242	uint32_t	size;
1243	int		flavor;
1244	uint32_t	stack_size;
1245
1246	while (total_size > 0) {
1247		flavor = *ts++;
1248		size = *ts++;
1249		if (UINT32_MAX-2 < size ||
1250		    UINT32_MAX/sizeof(uint32_t) < size+2)
1251			return (LOAD_BADMACHO);
1252		stack_size = (size+2)*sizeof(uint32_t);
1253		if (stack_size > total_size)
1254			return(LOAD_BADMACHO);
1255		total_size -= stack_size;
1256
1257		/*
1258		 * Third argument is a kernel space pointer; it gets cast
1259		 * to the appropriate type in thread_userstack() based on
1260		 * the value of flavor.
1261		 */
1262		ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
1263		if (ret != KERN_SUCCESS) {
1264			return(LOAD_FAILURE);
1265		}
1266		ts += size;	/* ts is a (uint32_t *) */
1267	}
1268	return(LOAD_SUCCESS);
1269}
1270
1271static
1272load_return_t
1273load_threadentry(
1274	thread_t	thread,
1275	uint32_t	*ts,
1276	uint32_t	total_size,
1277	mach_vm_offset_t	*entry_point
1278)
1279{
1280	kern_return_t	ret;
1281	uint32_t	size;
1282	int		flavor;
1283	uint32_t	entry_size;
1284
1285	/*
1286	 *	Set the thread state.
1287	 */
1288	*entry_point = MACH_VM_MIN_ADDRESS;
1289	while (total_size > 0) {
1290		flavor = *ts++;
1291		size = *ts++;
1292		if (UINT32_MAX-2 < size ||
1293		    UINT32_MAX/sizeof(uint32_t) < size+2)
1294			return (LOAD_BADMACHO);
1295		entry_size = (size+2)*sizeof(uint32_t);
1296		if (entry_size > total_size)
1297			return(LOAD_BADMACHO);
1298		total_size -= entry_size;
1299		/*
1300		 * Third argument is a kernel space pointer; it gets cast
1301		 * to the appropriate type in thread_entrypoint() based on
1302		 * the value of flavor.
1303		 */
1304		ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
1305		if (ret != KERN_SUCCESS) {
1306			return(LOAD_FAILURE);
1307		}
1308		ts += size;	/* ts is a (uint32_t *) */
1309	}
1310	return(LOAD_SUCCESS);
1311}
1312
1313struct macho_data {
1314	struct nameidata	__nid;
1315	union macho_vnode_header {
1316		struct mach_header	mach_header;
1317		struct fat_header	fat_header;
1318		char	__pad[512];
1319	} __header;
1320};
1321
1322static load_return_t
1323load_dylinker(
1324	struct dylinker_command	*lcp,
1325	integer_t		archbits,
1326	vm_map_t		map,
1327	thread_t	thread,
1328	int			depth,
1329	int64_t			slide,
1330	load_result_t		*result
1331)
1332{
1333	char			*name;
1334	char			*p;
1335	struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
1336	struct mach_header	*header;
1337	off_t			file_offset = 0; /* set by get_macho_vnode() */
1338	off_t			macho_size = 0;	/* set by get_macho_vnode() */
1339	load_result_t		*myresult;
1340	kern_return_t		ret;
1341	struct macho_data	*macho_data;
1342	struct {
1343		struct mach_header	__header;
1344		load_result_t		__myresult;
1345		struct macho_data	__macho_data;
1346	} *dyld_data;
1347
1348	if (lcp->cmdsize < sizeof(*lcp))
1349		return (LOAD_BADMACHO);
1350
1351	name = (char *)lcp + lcp->name.offset;
1352	/*
1353	 *	Check for a proper null terminated string.
1354	 */
1355	p = name;
1356	do {
1357		if (p >= (char *)lcp + lcp->cmdsize)
1358			return(LOAD_BADMACHO);
1359	} while (*p++);
1360
1361	/* Allocate wad-of-data from heap to reduce excessively deep stacks */
1362
1363	MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK);
1364	header = &dyld_data->__header;
1365	myresult = &dyld_data->__myresult;
1366	macho_data = &dyld_data->__macho_data;
1367
1368	ret = get_macho_vnode(name, archbits, header,
1369	    &file_offset, &macho_size, macho_data, &vp);
1370	if (ret)
1371		goto novp_out;
1372
1373	*myresult = load_result_null;
1374
1375	/*
1376	 *	First try to map dyld in directly.  This should work most of
1377	 *	the time since there shouldn't normally be something already
1378	 *	mapped to its address.
1379	 */
1380
1381	ret = parse_machfile(vp, map, thread, header, file_offset,
1382	    macho_size, depth, slide, myresult);
1383
1384	/*
1385	 *	If it turned out something was in the way, then we'll take
1386	 *	take this longer path to preflight dyld's vm ranges, then
1387	 *	map it at a free location in the address space.
1388	 */
1389
1390	if (ret == LOAD_NOSPACE) {
1391		mach_vm_offset_t	dyl_start, map_addr;
1392		mach_vm_size_t	dyl_length;
1393		int64_t			slide_amount;
1394
1395		*myresult = load_result_null;
1396
1397		/*
1398		 * Preflight parsing the Mach-O file with a NULL
1399		 * map, which will return the ranges needed for a
1400		 * subsequent map attempt (with a slide) in "myresult"
1401		 */
1402		ret = parse_machfile(vp, VM_MAP_NULL, THREAD_NULL, header,
1403		    file_offset, macho_size, depth, 0 /* slide */, myresult);
1404
1405		if (ret != LOAD_SUCCESS) {
1406			goto out;
1407		}
1408
1409		dyl_start = myresult->min_vm_addr;
1410		dyl_length = myresult->max_vm_addr - myresult->min_vm_addr;
1411
1412		dyl_length += slide;
1413
1414		/* To find an appropriate load address, do a quick allocation */
1415		map_addr = dyl_start;
1416		ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
1417		if (ret != KERN_SUCCESS) {
1418			ret = LOAD_NOSPACE;
1419			goto out;
1420		}
1421
1422		ret = mach_vm_deallocate(map, map_addr, dyl_length);
1423		if (ret != KERN_SUCCESS) {
1424			ret = LOAD_NOSPACE;
1425			goto out;
1426		}
1427
1428		if (map_addr < dyl_start)
1429			slide_amount = -(int64_t)(dyl_start - map_addr);
1430		else
1431			slide_amount = (int64_t)(map_addr - dyl_start);
1432
1433		slide_amount += slide;
1434
1435		*myresult = load_result_null;
1436
1437		ret = parse_machfile(vp, map, thread, header,
1438		    file_offset, macho_size, depth, slide_amount, myresult);
1439
1440		if (ret) {
1441			goto out;
1442		}
1443	}
1444
1445	if (ret == LOAD_SUCCESS) {
1446		result->dynlinker = TRUE;
1447		result->entry_point = myresult->entry_point;
1448		result->validentry = myresult->validentry;
1449		result->all_image_info_addr = myresult->all_image_info_addr;
1450		result->all_image_info_size = myresult->all_image_info_size;
1451	}
1452out:
1453	vnode_put(vp);
1454novp_out:
1455	FREE(dyld_data, M_TEMP);
1456	return (ret);
1457
1458}
1459
1460static load_return_t
1461load_code_signature(
1462	struct linkedit_data_command	*lcp,
1463	struct vnode			*vp,
1464	off_t				macho_offset,
1465	off_t				macho_size,
1466	cpu_type_t			cputype,
1467	load_result_t			*result)
1468{
1469	int		ret;
1470	kern_return_t	kr;
1471	vm_offset_t	addr;
1472	int		resid;
1473	struct cs_blob	*blob;
1474	int		error;
1475	vm_size_t	blob_size;
1476
1477	addr = 0;
1478	blob = NULL;
1479
1480	if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
1481	    lcp->dataoff + lcp->datasize > macho_size) {
1482		ret = LOAD_BADMACHO;
1483		goto out;
1484	}
1485
1486	blob = ubc_cs_blob_get(vp, cputype, -1);
1487	if (blob != NULL) {
1488		/* we already have a blob for this vnode and cputype */
1489		if (blob->csb_cpu_type == cputype &&
1490		    blob->csb_base_offset == macho_offset &&
1491		    blob->csb_mem_size == lcp->datasize) {
1492			/* it matches the blob we want here: we're done */
1493			ret = LOAD_SUCCESS;
1494		} else {
1495			/* the blob has changed for this vnode: fail ! */
1496			ret = LOAD_BADMACHO;
1497		}
1498		goto out;
1499	}
1500
1501	blob_size = lcp->datasize;
1502	kr = ubc_cs_blob_allocate(&addr, &blob_size);
1503	if (kr != KERN_SUCCESS) {
1504		ret = LOAD_NOSPACE;
1505		goto out;
1506	}
1507
1508	resid = 0;
1509	error = vn_rdwr(UIO_READ,
1510			vp,
1511			(caddr_t) addr,
1512			lcp->datasize,
1513			macho_offset + lcp->dataoff,
1514			UIO_SYSSPACE,
1515			0,
1516			kauth_cred_get(),
1517			&resid,
1518			current_proc());
1519	if (error || resid != 0) {
1520		ret = LOAD_IOERROR;
1521		goto out;
1522	}
1523
1524	if (ubc_cs_blob_add(vp,
1525			    cputype,
1526			    macho_offset,
1527			    addr,
1528			    lcp->datasize)) {
1529		ret = LOAD_FAILURE;
1530		goto out;
1531	} else {
1532		/* ubc_cs_blob_add() has consumed "addr" */
1533		addr = 0;
1534	}
1535
1536#if CHECK_CS_VALIDATION_BITMAP
1537	ubc_cs_validation_bitmap_allocate( vp );
1538#endif
1539
1540	blob = ubc_cs_blob_get(vp, cputype, -1);
1541
1542	ret = LOAD_SUCCESS;
1543out:
1544	if (result && ret == LOAD_SUCCESS) {
1545		result->csflags |= blob->csb_flags;
1546	}
1547	if (addr != 0) {
1548		ubc_cs_blob_deallocate(addr, blob_size);
1549		addr = 0;
1550	}
1551
1552	return ret;
1553}
1554
1555
1556#if CONFIG_CODE_DECRYPTION
1557
1558#ifndef __arm__
1559static load_return_t
1560set_code_unprotect(
1561		   struct encryption_info_command *eip,
1562		   caddr_t addr,
1563		   vm_map_t map,
1564		   int64_t slide,
1565		   struct vnode	*vp)
1566{
1567	int result, len;
1568	pager_crypt_info_t crypt_info;
1569	const char * cryptname = 0;
1570	char *vpath;
1571
1572	size_t offset;
1573	struct segment_command_64 *seg64;
1574	struct segment_command *seg32;
1575	vm_map_offset_t map_offset, map_size;
1576	kern_return_t kr;
1577
1578	if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
1579
1580	switch(eip->cryptid) {
1581		case 0:
1582			/* not encrypted, just an empty load command */
1583			return LOAD_SUCCESS;
1584		case 1:
1585			cryptname="com.apple.unfree";
1586			break;
1587		case 0x10:
1588			/* some random cryptid that you could manually put into
1589			 * your binary if you want NULL */
1590			cryptname="com.apple.null";
1591			break;
1592		default:
1593			return LOAD_BADMACHO;
1594	}
1595
1596	if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
1597	if (NULL == text_crypter_create) return LOAD_FAILURE;
1598
1599	MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1600	if(vpath == NULL) return LOAD_FAILURE;
1601
1602	len = MAXPATHLEN;
1603	result = vn_getpath(vp, vpath, &len);
1604	if(result) {
1605		FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1606		return LOAD_FAILURE;
1607	}
1608
1609	/* set up decrypter first */
1610	kr=text_crypter_create(&crypt_info, cryptname, (void*)vpath);
1611	FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
1612
1613	if(kr) {
1614		printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
1615		       cryptname, kr);
1616		return LOAD_RESOURCE;
1617	}
1618
1619	/* this is terrible, but we have to rescan the load commands to find the
1620	 * virtual address of this encrypted stuff. This code is gonna look like
1621	 * the dyld source one day... */
1622	struct mach_header *header = (struct mach_header *)addr;
1623	size_t mach_header_sz = sizeof(struct mach_header);
1624	if (header->magic == MH_MAGIC_64 ||
1625	    header->magic == MH_CIGAM_64) {
1626	    	mach_header_sz = sizeof(struct mach_header_64);
1627	}
1628	offset = mach_header_sz;
1629	uint32_t ncmds = header->ncmds;
1630	while (ncmds--) {
1631		/*
1632		 *	Get a pointer to the command.
1633		 */
1634		struct load_command *lcp = (struct load_command *)(addr + offset);
1635		offset += lcp->cmdsize;
1636
1637		switch(lcp->cmd) {
1638			case LC_SEGMENT_64:
1639				seg64 = (struct segment_command_64 *)lcp;
1640				if ((seg64->fileoff <= eip->cryptoff) &&
1641				    (seg64->fileoff+seg64->filesize >=
1642				     eip->cryptoff+eip->cryptsize)) {
1643					map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
1644					map_size = eip->cryptsize;
1645					goto remap_now;
1646				}
1647			case LC_SEGMENT:
1648				seg32 = (struct segment_command *)lcp;
1649				if ((seg32->fileoff <= eip->cryptoff) &&
1650				    (seg32->fileoff+seg32->filesize >=
1651				     eip->cryptoff+eip->cryptsize)) {
1652					map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
1653					map_size = eip->cryptsize;
1654					goto remap_now;
1655				}
1656		}
1657	}
1658
1659	/* if we get here, did not find anything */
1660	return LOAD_BADMACHO;
1661
1662remap_now:
1663	/* now remap using the decrypter */
1664	kr = vm_map_apple_protected(map, map_offset, map_offset+map_size, &crypt_info);
1665	if(kr) {
1666		printf("set_code_unprotect(): mapping failed with %x\n", kr);
1667		crypt_info.crypt_end(crypt_info.crypt_ops);
1668		return LOAD_PROTECT;
1669	}
1670
1671	return LOAD_SUCCESS;
1672}
1673#endif
1674
1675#endif
1676
1677/*
1678 * This routine exists to support the load_dylinker().
1679 *
1680 * This routine has its own, separate, understanding of the FAT file format,
1681 * which is terrifically unfortunate.
1682 */
1683static
1684load_return_t
1685get_macho_vnode(
1686	char			*path,
1687	integer_t		archbits,
1688	struct mach_header	*mach_header,
1689	off_t			*file_offset,
1690	off_t			*macho_size,
1691	struct macho_data	*data,
1692	struct vnode		**vpp
1693)
1694{
1695	struct vnode		*vp;
1696	vfs_context_t		ctx = vfs_context_current();
1697	proc_t			p = vfs_context_proc(ctx);
1698	kauth_cred_t		kerncred;
1699	struct nameidata	*ndp = &data->__nid;
1700	boolean_t		is_fat;
1701	struct fat_arch		fat_arch;
1702	int			error;
1703	int resid;
1704	union macho_vnode_header *header = &data->__header;
1705	off_t fsize = (off_t)0;
1706
1707	/*
1708	 * Capture the kernel credential for use in the actual read of the
1709	 * file, since the user doing the execution may have execute rights
1710	 * but not read rights, but to exec something, we have to either map
1711	 * or read it into the new process address space, which requires
1712	 * read rights.  This is to deal with lack of common credential
1713	 * serialization code which would treat NOCRED as "serialize 'root'".
1714	 */
1715	kerncred = vfs_context_ucred(vfs_context_kernel());
1716
1717	/* init the namei data to point the file user's program name */
1718	NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
1719
1720	if ((error = namei(ndp)) != 0) {
1721		if (error == ENOENT) {
1722			error = LOAD_ENOENT;
1723		} else {
1724			error = LOAD_FAILURE;
1725		}
1726		return(error);
1727	}
1728	nameidone(ndp);
1729	vp = ndp->ni_vp;
1730
1731	/* check for regular file */
1732	if (vp->v_type != VREG) {
1733		error = LOAD_PROTECT;
1734		goto bad1;
1735	}
1736
1737	/* get size */
1738	if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
1739		error = LOAD_FAILURE;
1740		goto bad1;
1741	}
1742
1743	/* Check mount point */
1744	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
1745		error = LOAD_PROTECT;
1746		goto bad1;
1747	}
1748
1749	/* check access */
1750	if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx)) != 0) {
1751		error = LOAD_PROTECT;
1752		goto bad1;
1753	}
1754
1755	/* try to open it */
1756	if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
1757		error = LOAD_PROTECT;
1758		goto bad1;
1759	}
1760
1761	if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0,
1762	    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
1763		error = LOAD_IOERROR;
1764		goto bad2;
1765	}
1766
1767	if (header->mach_header.magic == MH_MAGIC ||
1768	    header->mach_header.magic == MH_MAGIC_64) {
1769		is_fat = FALSE;
1770	} else if (header->fat_header.magic == FAT_MAGIC ||
1771	    header->fat_header.magic == FAT_CIGAM) {
1772		is_fat = TRUE;
1773	} else {
1774		error = LOAD_BADMACHO;
1775		goto bad2;
1776	}
1777
1778	if (is_fat) {
1779		/* Look up our architecture in the fat file. */
1780		error = fatfile_getarch_with_bits(vp, archbits,
1781		    (vm_offset_t)(&header->fat_header), &fat_arch);
1782		if (error != LOAD_SUCCESS)
1783			goto bad2;
1784
1785		/* Read the Mach-O header out of it */
1786		error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
1787		    sizeof (header->mach_header), fat_arch.offset,
1788		    UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
1789		if (error) {
1790			error = LOAD_IOERROR;
1791			goto bad2;
1792		}
1793
1794		/* Is this really a Mach-O? */
1795		if (header->mach_header.magic != MH_MAGIC &&
1796		    header->mach_header.magic != MH_MAGIC_64) {
1797			error = LOAD_BADMACHO;
1798			goto bad2;
1799		}
1800
1801		*file_offset = fat_arch.offset;
1802		*macho_size = fat_arch.size;
1803	} else {
1804		/*
1805		 * Force get_macho_vnode() to fail if the architecture bits
1806		 * do not match the expected architecture bits.  This in
1807		 * turn causes load_dylinker() to fail for the same reason,
1808		 * so it ensures the dynamic linker and the binary are in
1809		 * lock-step.  This is potentially bad, if we ever add to
1810		 * the CPU_ARCH_* bits any bits that are desirable but not
1811		 * required, since the dynamic linker might work, but we will
1812		 * refuse to load it because of this check.
1813		 */
1814		if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
1815			error = LOAD_BADARCH;
1816			goto bad2;
1817		}
1818
1819		*file_offset = 0;
1820		*macho_size = fsize;
1821	}
1822
1823	*mach_header = header->mach_header;
1824	*vpp = vp;
1825
1826	ubc_setsize(vp, fsize);
1827	return (error);
1828
1829bad2:
1830	(void) VNOP_CLOSE(vp, FREAD, ctx);
1831bad1:
1832	vnode_put(vp);
1833	return(error);
1834}
1835