1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <kern/assert.h>
29#include <kern/debug.h>
30#include <kern/kext_alloc.h>
31#include <kern/misc_protos.h>
32
33#include <mach/host_priv_server.h>
34#include <mach/kern_return.h>
35#include <mach/mach_vm.h>
36#include <mach/vm_map.h>
37#include <mach/vm_types.h>
38
39#include <mach-o/loader.h>
40#include <libkern/kernel_mach_header.h>
41
42#define KASLR_IOREG_DEBUG 0
43
44
45vm_map_t g_kext_map = 0;
46#if KASLR_IOREG_DEBUG
47mach_vm_offset_t kext_alloc_base = 0;
48mach_vm_offset_t kext_alloc_max = 0;
49#else
50static mach_vm_offset_t kext_alloc_base = 0;
51static mach_vm_offset_t kext_alloc_max = 0;
52#if CONFIG_KEXT_BASEMENT
53static mach_vm_offset_t kext_post_boot_base = 0;
54#endif
55#endif
56
57/*
58 * On x86_64 systems, kernel extension text must remain within 2GB of the
59 * kernel's text segment.  To ensure this happens, we snag 2GB of kernel VM
60 * as early as possible for kext allocations.
61 */
62void
63kext_alloc_init(void)
64{
65#if CONFIG_KEXT_BASEMENT
66    kern_return_t rval = 0;
67    kernel_segment_command_t *text = NULL;
68    kernel_segment_command_t *prelinkTextSegment = NULL;
69    mach_vm_offset_t text_end, text_start;
70    mach_vm_size_t text_size;
71    mach_vm_size_t kext_alloc_size;
72
73    /* Determine the start of the kernel's __TEXT segment and determine the
74     * lower bound of the allocated submap for kext allocations.
75     */
76
77    text = getsegbyname(SEG_TEXT);
78    text_start = vm_map_trunc_page(text->vmaddr);
79    text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1);
80    text_end = vm_map_round_page(text->vmaddr + text->vmsize);
81    text_size = text_end - text_start;
82
83    kext_alloc_base = KEXT_ALLOC_BASE(text_end);
84    kext_alloc_size = KEXT_ALLOC_SIZE(text_size);
85    kext_alloc_max = kext_alloc_base + kext_alloc_size;
86
87    /* Post boot kext allocation will start after the prelinked kexts */
88    prelinkTextSegment = getsegbyname("__PRELINK_TEXT");
89    if (prelinkTextSegment) {
90        /* use kext_post_boot_base to start allocations past all the prelinked
91         * kexts
92         */
93        kext_post_boot_base =
94            vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize);
95    }
96    else {
97        kext_post_boot_base = kext_alloc_base;
98    }
99
100    /* Allocate the sub block of the kernel map */
101    rval = kmem_suballoc(kernel_map, (vm_offset_t *) &kext_alloc_base,
102			 kext_alloc_size, /* pageable */ TRUE,
103			 VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE,
104			 &g_kext_map);
105    if (rval != KERN_SUCCESS) {
106	    panic("kext_alloc_init: kmem_suballoc failed 0x%x\n", rval);
107    }
108
109    if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) {
110        panic("kext_alloc_init: failed to get first 2GB\n");
111    }
112
113    if (kernel_map->min_offset > kext_alloc_base) {
114	    kernel_map->min_offset = kext_alloc_base;
115    }
116
117    printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n",
118	   VM_KERNEL_UNSLIDE(kext_alloc_base),
119	   VM_KERNEL_UNSLIDE(kext_alloc_max),
120	   VM_KERNEL_UNSLIDE(text->vmaddr),
121	   VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize));
122
123#else
124    kernel_segment_command_t *text = NULL;
125    kernel_segment_command_t *prelinkTextSegment = NULL;
126    mach_vm_offset_t text_end, text_start;
127    mach_vm_size_t text_size;
128    mach_vm_size_t kext_alloc_size;
129
130    /* Determine the start of the kernel's __TEXT segment and determine the
131     * lower bound of the allocated submap for kext allocations.
132     */
133
134    text = getsegbyname(SEG_TEXT);
135    text_start = vm_map_trunc_page(text->vmaddr);
136    text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1);
137    text_end = vm_map_round_page(text->vmaddr + text->vmsize);
138    text_size = text_end - text_start;
139
140    g_kext_map = kernel_map;
141    kext_alloc_base = VM_MIN_KERNEL_ADDRESS;
142    kext_alloc_max = VM_MAX_KERNEL_ADDRESS;
143
144    printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n",
145       VM_KERNEL_UNSLIDE(kext_alloc_base),
146       VM_KERNEL_UNSLIDE(kext_alloc_max),
147       VM_KERNEL_UNSLIDE(text->vmaddr),
148       VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize));
149#endif /* CONFIG_KEXT_BASEMENT */
150}
151
152kern_return_t
153kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
154{
155    kern_return_t rval = 0;
156#if CONFIG_KEXT_BASEMENT
157    mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base;
158#else
159    mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
160#endif
161    int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE;
162
163#if CONFIG_KEXT_BASEMENT
164    /* Allocate the kext virtual memory
165     * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past
166     * kext_post_boot_base (when possible).  mach_vm_allocate will always
167     * start at 0 into the map no matter what you pass in addr.  We want non
168     * fixed (post boot) kext allocations to start looking for free space
169     * just past where prelinked kexts have loaded.
170     */
171    rval = mach_vm_map(g_kext_map,
172                       &addr,
173                       size,
174                       0,
175                       flags,
176                       MACH_PORT_NULL,
177                       0,
178                       TRUE,
179                       VM_PROT_DEFAULT,
180                       VM_PROT_ALL,
181                       VM_INHERIT_DEFAULT);
182    if (rval != KERN_SUCCESS) {
183        printf("mach_vm_map failed - %d\n", rval);
184        goto finish;
185    }
186#else
187    rval = mach_vm_allocate(g_kext_map, &addr, size, flags);
188    if (rval != KERN_SUCCESS) {
189        printf("vm_allocate failed - %d\n", rval);
190        goto finish;
191    }
192#endif
193
194    /* Check that the memory is reachable by kernel text */
195    if ((addr + size) > kext_alloc_max) {
196        kext_free((vm_offset_t)addr, size);
197        rval = KERN_INVALID_ADDRESS;
198        goto finish;
199    }
200
201    *_addr = (vm_offset_t)addr;
202    rval = KERN_SUCCESS;
203
204finish:
205    return rval;
206}
207
208void
209kext_free(vm_offset_t addr, vm_size_t size)
210{
211    kern_return_t rval;
212
213    rval = mach_vm_deallocate(g_kext_map, addr, size);
214    assert(rval == KERN_SUCCESS);
215}
216
217