1/*
2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
30 * support for mandatory and extensible security protections.  This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34/*
35 * Copyright (c) 1999 Apple Computer, Inc.  All rights reserved.
36 *
37 * HISTORY
38 *
39 * 1999 Mar 29 rsulack created.
40 */
41
42#include <mach/mach_types.h>
43#include <mach/vm_types.h>
44#include <mach/kern_return.h>
45#include <mach/host_priv_server.h>
46#include <mach/vm_map.h>
47
48#include <kern/clock.h>
49#include <kern/kalloc.h>
50#include <kern/kern_types.h>
51#include <kern/thread.h>
52
53#include <vm/vm_kern.h>
54
55#include <mach-o/mach_header.h>
56#include <mach-o/loader.h>
57#include <mach-o/nlist.h>
58
59#include <mach/kext_panic_report.h>
60
61/*
62 * XXX headers for which prototypes should be in a common include file;
63 * XXX see libsa/kext.cpp for why.
64 */
65kern_return_t kmod_create_internal(kmod_info_t *info, kmod_t *id);
66kern_return_t kmod_destroy_internal(kmod_t id);
67kern_return_t kmod_start_or_stop(kmod_t id, int start, kmod_args_t *data,
68    mach_msg_type_number_t *dataCount);
69kern_return_t kmod_retain(kmod_t id);
70kern_return_t kmod_release(kmod_t id);
71kern_return_t kmod_queue_cmd(vm_address_t data, vm_size_t size);
72kern_return_t kmod_get_info(host_t host, kmod_info_array_t *kmods,
73    mach_msg_type_number_t *kmodCount);
74
75static kern_return_t kmod_get_symbol_data(kmod_args_t * data,
76    mach_msg_type_number_t * dataCount);
77static kern_return_t kmod_free_linkedit_data(void);
78static kern_return_t kmod_get_kext_uuid(
79    const char * kext_id,
80    kmod_args_t * data,
81    mach_msg_type_number_t * dataCount);
82
83extern int IODTGetLoaderInfo(const char * key, void ** infoAddr, vm_size_t * infoSize);
84extern void IODTFreeLoaderInfo(const char * key, void * infoAddr, vm_size_t infoSize);
85/* operates on 32 bit segments */
86extern void OSRuntimeUnloadCPPForSegment(struct segment_command * segment);
87
88#define WRITE_PROTECT_MODULE_TEXT   (0)
89
90kmod_info_t *kmod;
91static int kmod_index = 1;
92static int kmod_load_disabled = 0;
93
94mutex_t * kmod_lock = 0;
95static mutex_t * kmod_queue_lock = 0;
96
97typedef struct cmd_queue_entry {
98    queue_chain_t    links;
99    vm_address_t     data;
100    vm_size_t        size;
101} cmd_queue_entry_t;
102
103queue_head_t kmod_cmd_queue;
104
105/*******************************************************************************
106*******************************************************************************/
107#define KMOD_PANICLIST_SIZE  (2 * PAGE_SIZE)
108
109char     * unloaded_kext_paniclist        = NULL;
110uint32_t   unloaded_kext_paniclist_size   = 0;
111uint32_t   unloaded_kext_paniclist_length = 0;
112uint64_t   last_loaded_timestamp          = 0;
113
114char     * loaded_kext_paniclist          = NULL;
115uint32_t   loaded_kext_paniclist_size     = 0;
116uint32_t   loaded_kext_paniclist_length   = 0;
117uint64_t   last_unloaded_timestamp        = 0;
118
119int substitute(
120    const char * scan_string,
121    char       * string_out,
122    uint32_t   * to_index,
123    uint32_t   * from_index,
124    const char * substring,
125    char         marker,
126    char         substitution);
127
128/* identifier_out must be at least KMOD_MAX_NAME bytes.
129 */
130int substitute(
131    const char * scan_string,
132    char       * string_out,
133    uint32_t   * to_index,
134    uint32_t   * from_index,
135    const char * substring,
136    char         marker,
137    char         substitution)
138{
139    uint32_t substring_length = strnlen(substring, KMOD_MAX_NAME - 1);
140
141    if (!strncmp(scan_string, substring, substring_length)) {
142        if (marker) {
143            string_out[(*to_index)++] = marker;
144        }
145        string_out[(*to_index)++] = substitution;
146        (*from_index) += substring_length;
147        return 1;
148    }
149    return 0;
150}
151
152void compactIdentifier(
153    const char * identifier,
154    char       * identifier_out,
155    char      ** identifier_out_end);
156
157void compactIdentifier(
158    const char * identifier,
159    char       * identifier_out,
160    char      ** identifier_out_end)
161{
162    uint32_t       from_index, to_index;
163    uint32_t       scan_from_index = 0;
164    uint32_t       scan_to_index   = 0;
165    subs_entry_t * subs_entry    = NULL;
166    int            did_sub       = 0;
167
168    from_index = to_index = 0;
169    identifier_out[0] = '\0';
170
171   /* Replace certain identifier prefixes with shorter @+character sequences.
172    */
173    for (subs_entry = &kext_identifier_prefix_subs[0];
174         subs_entry->substring && !did_sub;
175         subs_entry++) {
176
177        did_sub = substitute(identifier, identifier_out,
178            &scan_to_index, &scan_from_index,
179            subs_entry->substring, /* marker */ '\0', subs_entry->substitute);
180    }
181    did_sub = 0;
182
183   /* Now scan through the identifier looking for the common substrings
184    * and replacing them with shorter !+character sequences.
185    */
186    for (/* see above */;
187         scan_from_index < KMOD_MAX_NAME - 1 && identifier[scan_from_index];
188         /* see loop */) {
189
190        const char   * scan_string = &identifier[scan_from_index];
191
192        did_sub = 0;
193
194        if (scan_from_index) {
195            for (subs_entry = &kext_identifier_substring_subs[0];
196                 subs_entry->substring && !did_sub;
197                 subs_entry++) {
198
199                did_sub = substitute(scan_string, identifier_out,
200                    &scan_to_index, &scan_from_index,
201                    subs_entry->substring, '!', subs_entry->substitute);
202            }
203        }
204
205        if (!did_sub) {
206            identifier_out[scan_to_index++] = identifier[scan_from_index++];
207        }
208    }
209
210    identifier_out[scan_to_index] = '\0';
211    if (identifier_out_end) {
212        *identifier_out_end = &identifier_out[scan_to_index];
213    }
214
215    return;
216}
217
218/* identPlusVers must be at least 2*KMOD_MAX_NAME in length.
219 */
220int assemble_identifier_and_version(
221    kmod_info_t * kmod_info,
222    char        * identPlusVers);
223int assemble_identifier_and_version(
224    kmod_info_t * kmod_info,
225    char        * identPlusVers)
226{
227    int result = 0;
228
229    compactIdentifier(kmod_info->name, identPlusVers, NULL);
230    result = strnlen(identPlusVers, KMOD_MAX_NAME - 1);
231    identPlusVers[result++] = '\t';  // increment for real char
232    identPlusVers[result] = '\0';    // don't increment for nul char
233    result = strlcat(identPlusVers, kmod_info->version, KMOD_MAX_NAME);
234
235    return result;
236}
237
238#define LAST_LOADED " - last loaded "
239#define LAST_LOADED_TS_WIDTH  (16)
240
241uint32_t save_loaded_kext_paniclist_typed(
242    const char * prefix,
243    int          invertFlag,
244    int          libsFlag,
245    char       * paniclist,
246    uint32_t     list_size,
247    uint32_t   * list_length_ptr,
248    int         (*printf_func)(const char *fmt, ...));
249uint32_t save_loaded_kext_paniclist_typed(
250    const char * prefix,
251    int          invertFlag,
252    int          libsFlag,
253    char       * paniclist,
254    uint32_t     list_size,
255    uint32_t   * list_length_ptr,
256    int         (*printf_func)(const char *fmt, ...))
257{
258    uint32_t      result = 0;
259    int           error  = 0;
260    kmod_info_t * kmod_info;
261
262    for (kmod_info = kmod;
263         kmod_info && (*list_length_ptr + 1 < list_size);
264         kmod_info = kmod_info->next) {
265
266        int      match;
267        char     identPlusVers[2*KMOD_MAX_NAME];
268        uint32_t identPlusVersLength;
269        char     timestampBuffer[17]; // enough for a uint64_t
270
271        if (!pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_info))) {
272            (*printf_func)("kmod scan stopped due to missing kmod page: %p\n",
273                kmod_info);
274            error = 1;
275            goto finish;
276        }
277
278       /* Skip all built-in/fake entries.
279        */
280        if (!kmod_info->address) {
281            continue;
282        }
283
284       /* Filter for kmod name (bundle identifier).
285        */
286        match = !strncmp(kmod_info->name, prefix, strnlen(prefix, KMOD_MAX_NAME));
287        if ((match && invertFlag) || (!match && !invertFlag)) {
288            continue;
289        }
290
291       /* Filter for libraries. This isn't a strictly correct check,
292        * but any kext that does have references to it has to be a library.
293        * A kext w/o references may or may not be a library.
294        */
295        if ((libsFlag == 0 && kmod_info->reference_count) ||
296            (libsFlag == 1 && !kmod_info->reference_count)) {
297
298            continue;
299        }
300
301        identPlusVersLength = assemble_identifier_and_version(kmod_info,
302            identPlusVers);
303        if (!identPlusVersLength) {
304            printf_func("error saving loaded kext info\n");
305            goto finish;
306        }
307
308       /* We're going to note the last-loaded kext in the list.
309        */
310        if (kmod_info == kmod) {
311            snprintf(timestampBuffer, sizeof(timestampBuffer), "%llu",
312                last_loaded_timestamp);
313            identPlusVersLength += sizeof(LAST_LOADED) - 1 +
314                strnlen(timestampBuffer, sizeof(timestampBuffer));
315        }
316
317       /* Adding 1 for the newline.
318        */
319        if (*list_length_ptr + identPlusVersLength + 1 >= list_size) {
320            goto finish;
321        }
322
323        *list_length_ptr = strlcat(paniclist, identPlusVers, list_size);
324        if (kmod_info == kmod) {
325            *list_length_ptr = strlcat(paniclist, LAST_LOADED, list_size);
326            *list_length_ptr = strlcat(paniclist, timestampBuffer, list_size);
327        }
328        *list_length_ptr = strlcat(paniclist, "\n", list_size);
329    }
330
331finish:
332    if (!error) {
333        if (*list_length_ptr + 1 <= list_size) {
334            result = list_size - (*list_length_ptr + 1);
335        }
336    }
337
338    return result;
339}
340
341void save_loaded_kext_paniclist(
342    int         (*printf_func)(const char *fmt, ...));
343
344void save_loaded_kext_paniclist(
345    int         (*printf_func)(const char *fmt, ...))
346{
347    char     * newlist        = NULL;
348    uint32_t   newlist_size   = 0;
349    uint32_t   newlist_length = 0;
350
351    newlist_length = 0;
352    newlist_size = KMOD_PANICLIST_SIZE;
353    newlist = (char *)kalloc(newlist_size);
354
355    if (!newlist) {
356        printf_func("couldn't allocate kext panic log buffer\n");
357        goto finish;
358    }
359
360    newlist[0] = '\0';
361
362    // non-"com.apple." kexts
363    if (!save_loaded_kext_paniclist_typed("com.apple.", /* invert? */ 1,
364        /* libs? */ -1, newlist, newlist_size, &newlist_length,
365        printf_func)) {
366
367        goto finish;
368    }
369    // "com.apple." nonlibrary kexts
370    if (!save_loaded_kext_paniclist_typed("com.apple.", /* invert? */ 0,
371        /* libs? */ 0, newlist, newlist_size, &newlist_length,
372        printf_func)) {
373
374        goto finish;
375    }
376    // "com.apple." library kexts
377    if (!save_loaded_kext_paniclist_typed("com.apple.", /* invert? */ 0,
378        /* libs? */ 1, newlist, newlist_size, &newlist_length,
379        printf_func)) {
380
381        goto finish;
382    }
383
384    if (loaded_kext_paniclist) {
385        kfree(loaded_kext_paniclist, loaded_kext_paniclist_size);
386    }
387    loaded_kext_paniclist = newlist;
388    loaded_kext_paniclist_size = newlist_size;
389    loaded_kext_paniclist_length = newlist_length;
390
391finish:
392    return;
393}
394
395void save_unloaded_kext_paniclist(
396    kmod_info_t * kmod_info,
397    int         (*printf_func)(const char *fmt, ...));
398void save_unloaded_kext_paniclist(
399    kmod_info_t * kmod_info,
400    int         (*printf_func)(const char *fmt, ...))
401{
402    char     * newlist        = NULL;
403    uint32_t   newlist_size   = 0;
404    uint32_t   newlist_length = 0;
405    char       identPlusVers[2*KMOD_MAX_NAME];
406    uint32_t   identPlusVersLength;
407
408    identPlusVersLength = assemble_identifier_and_version(kmod_info,
409        identPlusVers);
410    if (!identPlusVersLength) {
411        printf_func("error saving unloaded kext info\n");
412        goto finish;
413    }
414
415    newlist_length = identPlusVersLength;
416    newlist_size = newlist_length + 1;
417    newlist = (char *)kalloc(newlist_size);
418
419    if (!newlist) {
420        printf_func("couldn't allocate kext panic log buffer\n");
421        goto finish;
422    }
423
424    newlist[0] = '\0';
425
426    strlcpy(newlist, identPlusVers, newlist_size);
427
428    if (unloaded_kext_paniclist) {
429        kfree(unloaded_kext_paniclist, unloaded_kext_paniclist_size);
430    }
431    unloaded_kext_paniclist = newlist;
432    unloaded_kext_paniclist_size = newlist_size;
433    unloaded_kext_paniclist_length = newlist_length;
434
435finish:
436    return;
437}
438
439// proto is in header
440void record_kext_unload(kmod_t kmod_id)
441{
442    kmod_info_t * kmod_info = NULL;
443
444    mutex_lock(kmod_lock);
445
446    kmod_info = kmod_lookupbyid(kmod_id);
447    if (kmod_info) {
448        clock_get_uptime(&last_unloaded_timestamp);
449        save_unloaded_kext_paniclist(kmod_info, &printf);
450    }
451    mutex_unlock(kmod_lock);
452    return;
453}
454
455void dump_kext_info(int (*printf_func)(const char *fmt, ...))
456{
457    printf_func("unloaded kexts:\n");
458    if (unloaded_kext_paniclist && (pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) unloaded_kext_paniclist))) {
459        printf_func("%.*s - last unloaded %llu\n",
460            unloaded_kext_paniclist_length, unloaded_kext_paniclist,
461            last_unloaded_timestamp);
462    } else {
463        printf_func("(none)\n");
464    }
465    printf_func("loaded kexts:\n");
466    if (loaded_kext_paniclist && (pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) loaded_kext_paniclist)) && loaded_kext_paniclist[0]) {
467        printf_func("%.*s", loaded_kext_paniclist_length, loaded_kext_paniclist);
468    } else {
469        printf_func("(none)\n");
470    }
471    return;
472}
473
474/*******************************************************************************
475*******************************************************************************/
476void
477kmod_init(void)
478{
479    kmod_lock = mutex_alloc(0);
480    kmod_queue_lock = mutex_alloc(0);
481    queue_init(&kmod_cmd_queue);
482}
483
484kmod_info_t *
485kmod_lookupbyid(kmod_t id)
486{
487    kmod_info_t *k = NULL;
488
489    k = kmod;
490    while (k) {
491        if (k->id == id) break;
492        k = k->next;
493    }
494
495    return k;
496}
497
498kmod_info_t *
499kmod_lookupbyname(const char * name)
500{
501    kmod_info_t *k = NULL;
502
503    k = kmod;
504    while (k) {
505        if (!strncmp(k->name, name, sizeof(k->name)))
506        break;
507        k = k->next;
508    }
509
510    return k;
511}
512
513// get the id of a kext in a given range, if the address is not in a kext
514// -1 is returned
515int kmod_lookupidbyaddress_locked(vm_address_t addr)
516{
517    kmod_info_t *k = 0;
518
519    mutex_lock(kmod_queue_lock);
520    k = kmod;
521    if(NULL != k) {
522        while (k) {
523            if ((k->address <= addr) && ((k->address + k->size) > addr)) {
524                break;
525            }
526            k = k->next;
527        }
528        mutex_unlock(kmod_queue_lock);
529    } else {
530        mutex_unlock(kmod_queue_lock);
531        return -1;
532    }
533
534    if(NULL == k) {
535        return -1;
536    } else {
537        return k->id;
538    }
539}
540
541kmod_info_t *
542kmod_lookupbyaddress(vm_address_t addr)
543{
544    kmod_info_t *k = 0;
545
546    k = kmod;
547    while (k) {
548        if ((k->address <= addr) && ((k->address + k->size) > addr)) break;
549        k = k->next;
550    }
551
552    return k;
553}
554
555kmod_info_t *
556kmod_lookupbyid_locked(kmod_t id)
557{
558    kmod_info_t *k = NULL;
559    kmod_info_t *kc = NULL;
560
561    kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
562    if (!kc) return kc;
563
564    mutex_lock(kmod_lock);
565    k = kmod_lookupbyid(id);
566    if (k) {
567        bcopy((char*)k, (char *)kc, sizeof(kmod_info_t));
568    }
569
570    mutex_unlock(kmod_lock);
571
572    if (k == 0) {
573        kfree(kc, sizeof(kmod_info_t));
574    kc = NULL;
575    }
576    return kc;
577}
578
579kmod_info_t *
580kmod_lookupbyname_locked(const char * name)
581{
582    kmod_info_t *k = NULL;
583    kmod_info_t *kc = NULL;
584
585    kc = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
586    if (!kc) return kc;
587
588    mutex_lock(kmod_lock);
589    k = kmod_lookupbyname(name);
590    if (k) {
591        bcopy((char *)k, (char *)kc, sizeof(kmod_info_t));
592    }
593
594    mutex_unlock(kmod_lock);
595
596    if (k == 0) {
597        kfree(kc, sizeof(kmod_info_t));
598    kc = NULL;
599    }
600    return kc;
601}
602
603// XXX add a nocopy flag??
604
605kern_return_t
606kmod_queue_cmd(vm_address_t data, vm_size_t size)
607{
608    kern_return_t rc;
609    cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
610    if (!e) return KERN_RESOURCE_SHORTAGE;
611
612    rc = kmem_alloc(kernel_map, &e->data, size);
613    if (rc != KERN_SUCCESS) {
614        kfree(e, sizeof(struct cmd_queue_entry));
615        return rc;
616    }
617    e->size = size;
618    bcopy((void *)data, (void *)e->data, size);
619
620    mutex_lock(kmod_queue_lock);
621    enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
622    mutex_unlock(kmod_queue_lock);
623
624    thread_wakeup_one((event_t)&kmod_cmd_queue);
625
626    return KERN_SUCCESS;
627}
628
629kern_return_t
630kmod_load_extension(char *name)
631{
632    kmod_load_extension_cmd_t data;
633
634    if (kmod_load_disabled) {
635        return KERN_NO_ACCESS;
636    }
637
638    data.type = KMOD_LOAD_EXTENSION_PACKET;
639    strncpy(data.name, name, sizeof(data.name));
640
641    return kmod_queue_cmd((vm_address_t)&data, sizeof(data));
642}
643
644kern_return_t
645kmod_load_extension_with_dependencies(char *name, char **dependencies)
646{
647    kern_return_t result;
648    kmod_load_with_dependencies_cmd_t * data;
649    vm_size_t    size;
650    char        **c;
651    int         i, count = 0;
652
653    if (kmod_load_disabled) {
654        return KERN_NO_ACCESS;
655    }
656
657    c = dependencies;
658    if (c) {
659        while (*c) {
660            count++; c++;
661        }
662    }
663    size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
664    data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
665    if (!data) return KERN_RESOURCE_SHORTAGE;
666
667    data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
668    strncpy(data->name, name, KMOD_MAX_NAME);
669
670    c = dependencies;
671    for (i=0; i < count; i++) {
672        strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
673        c++;
674    }
675    data->dependencies[count][0] = 0;
676
677    result = kmod_queue_cmd((vm_address_t)data, size);
678    kfree(data, size);
679    return result;
680}
681kern_return_t
682kmod_send_generic(int type, void *generic_data, int size)
683{
684    kern_return_t result;
685    kmod_generic_cmd_t * data;
686    vm_size_t cmd_size;
687
688    // add sizeof(int) for the type field
689    cmd_size = size + sizeof(int);
690    data = (kmod_generic_cmd_t *)kalloc(cmd_size);
691    if (!data) return KERN_RESOURCE_SHORTAGE;
692
693    data->type = type;
694    bcopy(data->data, generic_data, size);
695
696    result = kmod_queue_cmd((vm_address_t)data, cmd_size);
697    kfree(data, cmd_size);
698    return result;
699}
700
701extern vm_offset_t sectPRELINKB;
702extern int sectSizePRELINK;
703extern int kth_started;
704
705/*
706 * Operates only on 32 bit mach keaders on behalf of kernel module loader
707 * if WRITE_PROTECT_MODULE_TEXT is defined.
708 */
709kern_return_t
710kmod_create_internal(kmod_info_t *info, kmod_t *id)
711{
712    kern_return_t rc;
713    boolean_t     isPrelink;
714
715    if (!info) return KERN_INVALID_ADDRESS;
716
717    // double check for page alignment
718    if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
719        return KERN_INVALID_ADDRESS;
720    }
721
722    isPrelink = ((info->address >= sectPRELINKB) && (info->address < (sectPRELINKB + sectSizePRELINK)));
723    if (!isPrelink && kth_started) {
724        rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
725            info->address + info->size, VM_PROT_DEFAULT, FALSE);
726        if (rc != KERN_SUCCESS) {
727            return rc;
728        }
729    }
730#if WRITE_PROTECT_MODULE_TEXT
731    {
732        struct section * sect = getsectbynamefromheader(
733            (struct mach_header*) info->address, "__TEXT", "__text");
734
735        if(sect) {
736            (void) vm_map_protect(kernel_map, round_page(sect->addr),
737                trunc_page(sect->addr + sect->size),
738                VM_PROT_READ|VM_PROT_EXECUTE, TRUE);
739        }
740    }
741#endif /* WRITE_PROTECT_MODULE_TEXT */
742
743    mutex_lock(kmod_lock);
744
745    // check to see if already loaded
746    if (kmod_lookupbyname(info->name)) {
747        mutex_unlock(kmod_lock);
748        if (!isPrelink) {
749            rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
750            info->address + info->size, FALSE);
751            assert(rc == KERN_SUCCESS);
752        }
753        return KERN_INVALID_ARGUMENT;
754    }
755
756    info->id = kmod_index++;
757    info->reference_count = 0;
758
759    info->next = kmod;
760    kmod = info;
761
762    *id = info->id;
763
764    clock_get_uptime(&last_loaded_timestamp);
765    save_loaded_kext_paniclist(&printf);
766
767    mutex_unlock(kmod_lock);
768
769#if DEBUG
770    printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
771        info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
772#endif /* DEBUG */
773
774    return KERN_SUCCESS;
775}
776
777
778kern_return_t
779kmod_create(host_priv_t host_priv,
780        vm_address_t addr,
781        kmod_t *id)
782{
783#ifdef SECURE_KERNEL
784    return KERN_NOT_SUPPORTED;
785#else
786    kmod_info_t *info;
787
788    if (kmod_load_disabled) {
789        return KERN_NO_ACCESS;
790    }
791
792    info = (kmod_info_t *)addr;
793
794    if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
795    return kmod_create_internal(info, id);
796#endif
797}
798
799kern_return_t
800kmod_create_fake_with_address(const char *name, const char *version,
801                                vm_address_t address, vm_size_t size,
802                                int * return_id)
803{
804    kmod_info_t *info;
805
806    if (!name || ! version ||
807        (1 + strlen(name) > KMOD_MAX_NAME) ||
808        (1 + strlen(version) > KMOD_MAX_NAME)) {
809
810        return KERN_INVALID_ARGUMENT;
811    }
812
813    info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
814    if (!info) {
815        return KERN_RESOURCE_SHORTAGE;
816    }
817
818    // make de fake
819    info->info_version = KMOD_INFO_VERSION;
820    bcopy(name, info->name, 1 + strlen(name));
821    bcopy(version, info->version, 1 + strlen(version));  //NIK fixed this part
822    info->reference_count = 1;    // keep it from unloading, starting, stopping
823    info->reference_list = NULL;
824    info->address = address;
825    info->size = size;
826    info->hdr_size = 0;
827    info->start = info->stop = NULL;
828
829    mutex_lock(kmod_lock);
830
831    // check to see if already "loaded"
832    if (kmod_lookupbyname(info->name)) {
833        mutex_unlock(kmod_lock);
834        kfree(info, sizeof(kmod_info_t));
835        return KERN_INVALID_ARGUMENT;
836    }
837
838    info->id = kmod_index++;
839    if (return_id)
840        *return_id = info->id;
841
842    info->next = kmod;
843    kmod = info;
844
845    mutex_unlock(kmod_lock);
846
847    return KERN_SUCCESS;
848}
849
850kern_return_t
851kmod_create_fake(const char *name, const char *version)
852{
853    return kmod_create_fake_with_address(name, version, 0, 0, NULL);
854}
855
856
857static kern_return_t
858_kmod_destroy_internal(kmod_t id, boolean_t fake)
859{
860    kern_return_t rc;
861    kmod_info_t *k;
862    kmod_info_t *p;
863
864    mutex_lock(kmod_lock);
865
866    k = p = kmod;
867    while (k) {
868        if (k->id == id) {
869            kmod_reference_t *r, *t;
870
871            if (!fake && (k->reference_count != 0)) {
872                mutex_unlock(kmod_lock);
873                return KERN_INVALID_ARGUMENT;
874            }
875
876            if (k == p) {    // first element
877                kmod = k->next;
878            } else {
879                p->next = k->next;
880            }
881            mutex_unlock(kmod_lock);
882
883            r = k->reference_list;
884            while (r) {
885                r->info->reference_count--;
886                t = r;
887                r = r->next;
888                kfree(t, sizeof(struct kmod_reference));
889            }
890
891            if (!fake)
892            {
893#if DEBUG
894                printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
895                    k->name, k->id, k->size / PAGE_SIZE, k->address);
896#endif /* DEBUG */
897
898                if( (k->address >= sectPRELINKB) && (k->address < (sectPRELINKB + sectSizePRELINK)))
899                {
900                    vm_offset_t
901                    virt = ml_static_ptovirt(k->address);
902                    if( virt) {
903                        ml_static_mfree( virt, k->size);
904                    }
905                }
906                else
907                {
908                    rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
909                            k->address + k->size, FALSE);
910                    assert(rc == KERN_SUCCESS);
911
912                    rc = vm_deallocate(kernel_map, k->address, k->size);
913                    assert(rc == KERN_SUCCESS);
914                }
915            }
916            return KERN_SUCCESS;
917        }
918        p = k;
919        k = k->next;
920    }
921
922    if (!fake) {
923        save_loaded_kext_paniclist(&printf);
924    }
925
926    mutex_unlock(kmod_lock);
927
928    return KERN_INVALID_ARGUMENT;
929}
930
931kern_return_t
932kmod_destroy_internal(kmod_t id)
933{
934    return _kmod_destroy_internal(id, FALSE);
935}
936
937kern_return_t
938kmod_destroy(host_priv_t host_priv,
939         kmod_t id)
940{
941    if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
942    return _kmod_destroy_internal(id, FALSE);
943}
944
945kern_return_t
946kmod_destroy_fake(kmod_t id)
947{
948    return _kmod_destroy_internal(id, TRUE);
949}
950
951kern_return_t
952kmod_start_or_stop(
953    kmod_t id,
954    int start,
955    kmod_args_t *data,
956    mach_msg_type_number_t *dataCount)
957{
958    kern_return_t rc = KERN_SUCCESS;
959    void * user_data = NULL;
960    kern_return_t (*func)(kmod_info_t *, void *);
961    kmod_info_t *k;
962
963    if (start && kmod_load_disabled) {
964        return KERN_NO_ACCESS;
965    }
966
967    mutex_lock(kmod_lock);
968
969    k = kmod_lookupbyid(id);
970    if (!k || k->reference_count) {
971        mutex_unlock(kmod_lock);
972        rc = KERN_INVALID_ARGUMENT;
973        goto finish;
974    }
975
976    if (start) {
977        func = (void *)k->start;
978    } else {
979        func = (void *)k->stop;
980    }
981
982    mutex_unlock(kmod_lock);
983
984    //
985    // call kmod entry point
986    //
987    if (data && dataCount && *data && *dataCount) {
988        vm_map_offset_t map_addr;
989        vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
990    user_data = CAST_DOWN(void *, map_addr);
991    }
992
993    rc = (*func)(k, user_data);
994
995finish:
996
997    if (user_data) {
998        (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
999    }
1000    if (data) *data = NULL;
1001    if (dataCount) *dataCount = 0;
1002
1003    return rc;
1004}
1005
1006
1007/*
1008 * The retain and release calls take no user data, but the caller
1009 * may have sent some in error (the MIG definition allows it).
1010 * If this is the case, they will just return that same data
1011 * right back to the caller (since they never touch the *data and
1012 * *dataCount fields).
1013 */
1014kern_return_t
1015kmod_retain(kmod_t id)
1016{
1017    kern_return_t rc = KERN_SUCCESS;
1018
1019    kmod_info_t *t;    // reference to
1020    kmod_info_t *f;    // reference from
1021    kmod_reference_t *r = NULL;
1022
1023    r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
1024    if (!r) {
1025        rc = KERN_RESOURCE_SHORTAGE;
1026        goto finish;
1027    }
1028
1029    mutex_lock(kmod_lock);
1030
1031    t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
1032    f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
1033    if (!t || !f) {
1034        mutex_unlock(kmod_lock);
1035        if (r) kfree(r, sizeof(struct kmod_reference));
1036        rc = KERN_INVALID_ARGUMENT;
1037        goto finish;
1038    }
1039
1040    r->next = f->reference_list;
1041    r->info = t;
1042    f->reference_list = r;
1043    t->reference_count++;
1044
1045    mutex_unlock(kmod_lock);
1046
1047finish:
1048
1049    return rc;
1050}
1051
1052
1053kern_return_t
1054kmod_release(kmod_t id)
1055{
1056    kern_return_t rc = KERN_INVALID_ARGUMENT;
1057
1058    kmod_info_t *t;    // reference to
1059    kmod_info_t *f;    // reference from
1060    kmod_reference_t *r = NULL;
1061    kmod_reference_t * p;
1062
1063    mutex_lock(kmod_lock);
1064
1065    t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
1066    f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
1067    if (!t || !f) {
1068        rc = KERN_INVALID_ARGUMENT;
1069        goto finish;
1070    }
1071
1072    p = r = f->reference_list;
1073    while (r) {
1074        if (r->info == t) {
1075            if (p == r) {    // first element
1076                f->reference_list = r->next;
1077            } else {
1078                p->next = r->next;
1079            }
1080            r->info->reference_count--;
1081
1082        mutex_unlock(kmod_lock);
1083            kfree(r, sizeof(struct kmod_reference));
1084        rc = KERN_SUCCESS;
1085            goto finish;
1086        }
1087        p = r;
1088        r = r->next;
1089    }
1090
1091    mutex_unlock(kmod_lock);
1092
1093finish:
1094
1095    return rc;
1096}
1097
1098
1099kern_return_t
1100kmod_control(host_priv_t host_priv,
1101         kmod_t id,
1102         kmod_control_flavor_t flavor,
1103         kmod_args_t *data,
1104         mach_msg_type_number_t *dataCount)
1105{
1106    kern_return_t rc = KERN_SUCCESS;
1107
1108   /* Only allow non-root access to retrieve kernel symbols or UUID.
1109    */
1110    if (flavor != KMOD_CNTL_GET_KERNEL_SYMBOLS &&
1111        flavor != KMOD_CNTL_GET_UUID) {
1112
1113        if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
1114    }
1115
1116    switch (flavor) {
1117
1118      case KMOD_CNTL_START:
1119      case KMOD_CNTL_STOP:
1120        {
1121            rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
1122                data, dataCount);
1123            break;
1124        }
1125
1126      case KMOD_CNTL_RETAIN:
1127        {
1128            rc = kmod_retain(id);
1129            break;
1130        }
1131
1132      case KMOD_CNTL_RELEASE:
1133        {
1134            rc = kmod_release(id);
1135            break;
1136        }
1137
1138      case KMOD_CNTL_GET_CMD:
1139        {
1140
1141            cmd_queue_entry_t *e;
1142
1143           /* Throw away any data the user may have sent in error.
1144            * We must do this, because we are likely to return to
1145            * some data for these commands (thus causing a leak of
1146            * whatever data the user sent us in error).
1147            */
1148            if (*data && *dataCount) {
1149                vm_map_copy_discard(*data);
1150                *data = NULL;
1151                *dataCount = 0;
1152            }
1153
1154            mutex_lock(kmod_queue_lock);
1155
1156            if (queue_empty(&kmod_cmd_queue)) {
1157                wait_result_t res;
1158
1159                res = thread_sleep_mutex((event_t)&kmod_cmd_queue,
1160                    kmod_queue_lock,
1161                    THREAD_ABORTSAFE);
1162                if (queue_empty(&kmod_cmd_queue)) {
1163                    // we must have been interrupted!
1164                    mutex_unlock(kmod_queue_lock);
1165                    assert(res == THREAD_INTERRUPTED);
1166                    return KERN_ABORTED;
1167                }
1168            }
1169            e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
1170
1171            mutex_unlock(kmod_queue_lock);
1172
1173            rc = vm_map_copyin(kernel_map, (vm_map_address_t)e->data,
1174                   (vm_map_size_t)e->size, TRUE, (vm_map_copy_t *)data);
1175            if (rc) {
1176                mutex_lock(kmod_queue_lock);
1177                enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
1178                mutex_unlock(kmod_queue_lock);
1179                *data = NULL;
1180                *dataCount = 0;
1181                return rc;
1182            }
1183            *dataCount = e->size;
1184
1185            kfree(e, sizeof(struct cmd_queue_entry));
1186
1187            break;
1188        }
1189
1190      case KMOD_CNTL_GET_KERNEL_SYMBOLS:
1191        {
1192           /* Throw away any data the user may have sent in error.
1193            * We must do this, because we are likely to return to
1194            * some data for these commands (thus causing a leak of
1195            * whatever data the user sent us in error).
1196            */
1197            if (*data && *dataCount) {
1198                vm_map_copy_discard(*data);
1199                *data = NULL;
1200                *dataCount = 0;
1201            }
1202
1203            return kmod_get_symbol_data(data, dataCount);
1204            break;
1205        }
1206
1207      case KMOD_CNTL_FREE_LINKEDIT_DATA:
1208        {
1209            return kmod_free_linkedit_data();
1210            break;
1211        }
1212
1213      case KMOD_CNTL_GET_UUID:
1214        {
1215            uint32_t id_length = *dataCount;
1216            char * kext_id = NULL;
1217            vm_map_offset_t map_addr;
1218            void * user_data;
1219            kern_return_t result;
1220
1221           /* Get the bundle id, if provided, and discard the buffer sent down.
1222            */
1223            if (*data && *dataCount) {
1224                kmem_alloc(kernel_map, (vm_offset_t *)&kext_id, id_length);
1225                if (!kext_id) {
1226                    return KERN_FAILURE;
1227                }
1228
1229                vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)*data);
1230                user_data = CAST_DOWN(void *, map_addr);
1231
1232                memcpy(kext_id, user_data, id_length);
1233                kext_id[id_length-1] = '\0';
1234                if (user_data) {
1235                    (void)vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
1236                }
1237                *data = NULL;
1238                *dataCount = 0;
1239            }
1240
1241            result = kmod_get_kext_uuid(kext_id, data, dataCount);
1242            if (kext_id) {
1243                kmem_free(kernel_map, (vm_offset_t)kext_id, id_length);
1244            }
1245            return result;
1246            break;
1247        }
1248
1249      case KMOD_CNTL_DISABLE_LOAD:
1250        {
1251            kmod_load_disabled = 1;
1252            rc = KERN_SUCCESS;
1253            break;
1254        }
1255
1256      default:
1257        rc = KERN_INVALID_ARGUMENT;
1258    }
1259
1260    return rc;
1261};
1262
1263/*******************************************************************************
1264* This function creates a dummy symbol file for the running kernel based on data
1265* in the run-time image. This allows us to correctly link other executables
1266* (drivers, etc) against the kernel when the kernel image on the root filesystem
1267* does not match the live kernel, as c can occur during net-booting where the
1268* actual kernel image is obtained from the network via tftp rather than the root
1269* device.
1270*
1271* If a symbol table is available, then a link-suitable Mach-O file image is
1272* created containing a Mach Header and an LC_SYMTAB load command followed by the
1273* the symbol table data for mach_kernel. A UUID load command is also present for
1274* identification, so we don't link against the wrong kernel.
1275*
1276* NOTE: This file supports only 32 bit kernels; adding support for 64 bit
1277* kernels is possible, but is not necessary yet.
1278*******************************************************************************/
1279extern struct mach_header _mh_execute_header;
1280static int                _linkedit_segment_freed = 0;
1281
1282static kern_return_t
1283kmod_get_symbol_data(
1284    kmod_args_t * symbol_data,
1285    mach_msg_type_number_t * data_size)
1286{
1287    kern_return_t            result = KERN_FAILURE;
1288
1289    struct load_command    * load_cmd;
1290    struct mach_header     * orig_header = &_mh_execute_header;
1291    struct segment_command * orig_text = NULL;
1292    struct segment_command * orig_data = NULL;
1293    struct segment_command * orig_linkedit = NULL;
1294    struct uuid_command    * orig_uuid = NULL;
1295    struct symtab_command  * orig_symtab = NULL;
1296    struct section         * sect;
1297    struct section         * const_text = NULL;
1298
1299    vm_size_t                header_size = 0;
1300    vm_offset_t              symtab_size;
1301    vm_offset_t              total_size;  // copied out to 'data_size'
1302    char                   * buffer = 0;  // copied out to 'symbol_data'
1303
1304    struct mach_header     * header;
1305    struct segment_command * seg_cmd = NULL;
1306    struct symtab_command  * symtab;
1307
1308    unsigned int             i;
1309    caddr_t                  addr;
1310    vm_offset_t              offset;
1311
1312    // only want to do these 1st call
1313    static int               syms_marked = 0;
1314
1315    mutex_lock(kmod_lock);
1316
1317   /*****
1318    * Check for empty out parameter pointers, and zero them if ok.
1319    */
1320    if (!symbol_data || !data_size) {
1321        result = KERN_INVALID_ARGUMENT;
1322        goto finish;
1323    }
1324
1325    *symbol_data = NULL;
1326    *data_size = 0;
1327
1328    if (_linkedit_segment_freed) {
1329        result = KERN_MEMORY_FAILURE;
1330        goto finish;
1331    }
1332
1333   /*****
1334    * Scan the in-memory kernel's mach header for the parts we need to copy:
1335    * TEXT (for basic file info + const section), DATA (for basic file info),
1336    * LINKEDIT (for the symbol table entries), SYMTAB (for the symbol table
1337    * overall).
1338    */
1339    load_cmd = (struct load_command *)&orig_header[1];
1340    for (i = 0; i < orig_header->ncmds; i++) {
1341        if (load_cmd->cmd == LC_SEGMENT) {
1342            struct segment_command * orig_seg_cmd =
1343                (struct segment_command *)load_cmd;
1344
1345            if (!strncmp(SEG_TEXT, orig_seg_cmd->segname, strlen(SEG_TEXT))) {
1346                orig_text = orig_seg_cmd;
1347            } else if (!strncmp(SEG_DATA, orig_seg_cmd->segname,
1348                strlen(SEG_DATA))) {
1349
1350                orig_data = orig_seg_cmd;
1351            } else if (!strncmp(SEG_LINKEDIT, orig_seg_cmd->segname,
1352                strlen(SEG_LINKEDIT))) {
1353
1354                orig_linkedit = orig_seg_cmd;
1355            }
1356        } else if (load_cmd->cmd == LC_UUID) {
1357            orig_uuid = (struct uuid_command *)load_cmd;
1358        } else if (load_cmd->cmd == LC_SYMTAB) {
1359            orig_symtab = (struct symtab_command *)load_cmd;
1360        }
1361
1362        load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
1363    }
1364
1365   /* Bail if any wasn't found.
1366    */
1367    if (!orig_text || !orig_data || !orig_linkedit || !orig_uuid || !orig_symtab) {
1368        goto finish;
1369    }
1370
1371   /* Now seek out the const section of the TEXT segment, bailing if not found.
1372    */
1373    sect = (struct section *)&orig_text[1];
1374    for (i = 0; i < orig_text->nsects; i++, sect++) {
1375        if (!strncmp("__const", sect->sectname, sizeof("__const"))) {
1376            const_text = sect;
1377            break;
1378        }
1379    }
1380    if (!const_text) {
1381        goto finish;
1382    }
1383
1384   /*****
1385    * Calculate the total size needed and allocate the buffer. In summing the
1386    * total size, every size before the last must be rounded to a
1387    * page-size increment.
1388    */
1389    header_size = sizeof(struct mach_header) +
1390        orig_text->cmdsize + orig_data->cmdsize +
1391        orig_uuid->cmdsize + orig_symtab->cmdsize;
1392    symtab_size = (orig_symtab->nsyms * sizeof(struct nlist)) +
1393        orig_symtab->strsize;
1394    total_size = round_page(header_size) + round_page(const_text->size) +
1395        symtab_size;
1396
1397    (void)kmem_alloc(kernel_map, (vm_offset_t *)&buffer, total_size);
1398    if (!buffer) {
1399        goto finish;
1400    }
1401    bzero((void *)buffer, total_size);
1402
1403   /*****
1404    * Set up the Mach-O header in the buffer.
1405    */
1406    header = (struct mach_header *)buffer;
1407    header->magic      = orig_header->magic;
1408    header->cputype    = orig_header->cputype;
1409    header->cpusubtype = orig_header->cpusubtype;
1410    header->filetype   = orig_header->filetype;
1411    header->ncmds      = 4;  // TEXT, DATA, UUID, SYMTAB
1412    header->sizeofcmds = header_size - sizeof(struct mach_header);
1413    header->flags      = orig_header->flags;
1414
1415   /*****
1416    * Initialize the current file offset and addr; updated as we go through,
1417    * but only for fields that need proper info.
1418    */
1419    offset = round_page(header_size);
1420    addr   = (caddr_t)const_text->addr;
1421
1422   /*****
1423    * Construct a TEXT segment load command. The only content of the TEXT
1424    * segment that we actually copy is the __TEXT,__const, which contains the
1425    * kernel vtables.  The other sections are just filled with unincremented
1426    * addr/offset and zero size and number fields.
1427    */
1428    seg_cmd = (struct segment_command *)&header[1]; // just past mach header
1429    memcpy(seg_cmd, orig_text, orig_text->cmdsize);
1430    seg_cmd->vmaddr   = (unsigned long)addr;
1431    seg_cmd->vmsize   = const_text->size;
1432    seg_cmd->fileoff  = 0;
1433    seg_cmd->filesize = const_text->size + round_page(header_size);
1434    seg_cmd->maxprot  = 0;
1435    seg_cmd->initprot = 0;
1436    seg_cmd->flags    = 0;
1437    sect = (struct section *)(seg_cmd + 1);
1438    for (i = 0; i < seg_cmd->nsects; i++, sect++) {
1439        sect->addr  = (unsigned long)addr; // only valid for __TEXT,__const
1440        sect->size  = 0;
1441        sect->offset = offset;
1442        sect->nreloc = 0;
1443        if (0 == strncmp("__const", sect->sectname, sizeof("__const"))) {
1444            sect->size = const_text->size;
1445            addr      += const_text->size;
1446            offset    += const_text->size;
1447            const_text = sect;  // retarget to constructed section
1448        }
1449    }
1450    offset = round_page(offset);
1451
1452   /*****
1453    * Now copy the __DATA segment load command, but none of its content.
1454    */
1455    seg_cmd = (struct segment_command *)((int)seg_cmd + seg_cmd->cmdsize);
1456    memcpy(seg_cmd, orig_data, orig_data->cmdsize);
1457
1458    seg_cmd->vmaddr   = (unsigned long)addr;
1459    seg_cmd->vmsize   = 0x1000;    // Why not just zero? DATA seg is empty.
1460    seg_cmd->fileoff  = offset;
1461    seg_cmd->filesize = 0;
1462    seg_cmd->maxprot  = 0;
1463    seg_cmd->initprot = 0;
1464    seg_cmd->flags    = 0;
1465    sect = (struct section *)(seg_cmd+1);
1466    for (i = 0; i < seg_cmd->nsects; i++, sect++) {
1467        sect->addr   = (unsigned long)addr;
1468        sect->size   = 0;
1469        sect->offset = offset;
1470        sect->nreloc = 0;
1471    }
1472    offset = round_page(offset);
1473
1474   /* Set up LC_UUID command
1475    */
1476    seg_cmd = (struct segment_command *)((int)seg_cmd + seg_cmd->cmdsize);
1477    memcpy(seg_cmd, orig_uuid, orig_uuid->cmdsize);
1478
1479   /* Set up LC_SYMTAB command
1480    */
1481    symtab          = (struct symtab_command *)((int)seg_cmd + seg_cmd->cmdsize);
1482    symtab->cmd     = LC_SYMTAB;
1483    symtab->cmdsize = sizeof(struct symtab_command);
1484    symtab->symoff  = offset;
1485    symtab->nsyms   = orig_symtab->nsyms;
1486    symtab->strsize = orig_symtab->strsize;
1487    symtab->stroff  = offset + symtab->nsyms * sizeof(struct nlist);
1488
1489   /* Convert the symbol table in place (yes, in the running kernel)
1490    * from section references to absolute references.
1491    */
1492    if (!syms_marked) {
1493        struct nlist * sym = (struct nlist *) orig_linkedit->vmaddr;
1494        for (i = 0; i < orig_symtab->nsyms; i++, sym++) {
1495            if ((sym->n_type & N_TYPE) == N_SECT) {
1496                sym->n_sect = NO_SECT;
1497                sym->n_type = (sym->n_type & ~N_TYPE) | N_ABS;
1498            }
1499        }
1500        syms_marked = 1;
1501    }
1502
1503   /*****
1504    * Copy the contents of the __TEXT,__const section and the linkedit symbol
1505    * data into the constructed object file buffer. The header has already been
1506    * filled in.
1507    */
1508    memcpy(buffer + const_text->offset, (void *)const_text->addr, const_text->size);
1509    memcpy(buffer + symtab->symoff, (void *)orig_linkedit->vmaddr, symtab_size);
1510
1511    result = vm_map_copyin(kernel_map,
1512        (vm_offset_t)buffer,
1513        (vm_map_size_t)total_size,
1514        /* src_destroy */ TRUE,
1515        (vm_map_copy_t *)symbol_data);
1516    if (result != KERN_SUCCESS) {
1517        kmem_free(kernel_map, (vm_offset_t)buffer, total_size);
1518        *symbol_data = NULL;
1519        *data_size   = 0;
1520        goto finish;
1521    } else {
1522        *data_size = total_size;
1523    }
1524
1525finish:
1526    mutex_unlock(kmod_lock);
1527    return result;
1528}
1529
1530/*******************************************************************************
1531* Drop the LINKEDIT segment from the running kernel to recover wired memory.
1532* This is invoked by kextd after it has successfully determined a file is
1533* available in the root filesystem to link against (either a symbol file it
1534* wrote, or /mach_kernel).
1535*******************************************************************************/
1536// in IOCatalogue.cpp
1537extern int kernelLinkerPresent;
1538
1539static kern_return_t
1540kmod_free_linkedit_data(void)
1541{
1542    kern_return_t result = KERN_FAILURE;
1543
1544    const char * dt_kernel_header_name = "Kernel-__HEADER";
1545    const char * dt_kernel_symtab_name = "Kernel-__SYMTAB";
1546    struct mach_header_t * dt_mach_header = NULL;
1547    vm_size_t dt_mach_header_size = 0;
1548    struct symtab_command *dt_symtab = NULL;
1549    vm_size_t dt_symtab_size = 0;
1550    int dt_result;
1551
1552    struct segment_command * segmentLE;
1553    boolean_t    keepsyms = FALSE;
1554    const char * segment_name = "__LINKEDIT";
1555#if __ppc__ || __arm__
1556    const char * devtree_segment_name = "Kernel-__LINKEDIT";
1557    void       * segment_paddress;
1558    vm_size_t    segment_size;
1559#endif
1560
1561    mutex_lock(kmod_lock);
1562
1563   /* The semantic is "make sure the linkedit segment is freed", so if we
1564    * previously did it, it's a success.
1565    */
1566    if (_linkedit_segment_freed) {
1567        result = KERN_SUCCESS;
1568        goto finish;
1569    } else if (kernelLinkerPresent) {
1570        // The in-kernel linker requires the linkedit segment to function.
1571        // Refuse to dump if it's still around.
1572        // XXX: We need a dedicated error return code for this.
1573        printf("can't remove kernel __LINKEDIT segment - in-kernel linker needs it\n");
1574        result = KERN_MEMORY_FAILURE;
1575        goto finish;
1576    }
1577
1578   /* Dispose of unnecessary stuff that the booter didn't need to load.
1579    */
1580    dt_result = IODTGetLoaderInfo(dt_kernel_header_name,
1581        (void **)&dt_mach_header, &dt_mach_header_size);
1582    if (dt_result == 0 && dt_mach_header) {
1583        IODTFreeLoaderInfo(dt_kernel_header_name, (void *)dt_mach_header,
1584            round_page_32(dt_mach_header_size));
1585    }
1586    dt_result = IODTGetLoaderInfo(dt_kernel_symtab_name,
1587        (void **)&dt_symtab, &dt_symtab_size);
1588    if (dt_result == 0 && dt_symtab) {
1589        IODTFreeLoaderInfo(dt_kernel_symtab_name, (void *)dt_symtab,
1590            round_page_32(dt_symtab_size));
1591    }
1592
1593    PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
1594
1595    segmentLE = getsegbyname(segment_name);
1596    if (!segmentLE) {
1597        printf("error removing kernel __LINKEDIT segment\n");
1598        goto finish;
1599    }
1600    OSRuntimeUnloadCPPForSegment(segmentLE);
1601#if __ppc__ || __arm__
1602    if (!keepsyms && 0 == IODTGetLoaderInfo(devtree_segment_name,
1603        &segment_paddress, &segment_size)) {
1604
1605        IODTFreeLoaderInfo(devtree_segment_name, (void *)segment_paddress,
1606            (int)segment_size);
1607    }
1608#elif __i386__
1609    if (!keepsyms && segmentLE->vmaddr && segmentLE->vmsize) {
1610        ml_static_mfree(segmentLE->vmaddr, segmentLE->vmsize);
1611    }
1612#else
1613#error arch
1614#endif
1615    result = KERN_SUCCESS;
1616
1617finish:
1618    if (!keepsyms && result == KERN_SUCCESS) {
1619        _linkedit_segment_freed = 1;
1620    }
1621    mutex_unlock(kmod_lock);
1622    return result;
1623}
1624
1625/*******************************************************************************
1626* Retrieve the UUID load command payload from the running kernel.
1627*******************************************************************************/
1628static kern_return_t
1629kmod_get_kext_uuid(
1630    const char * kext_id,
1631    kmod_args_t * data,
1632    mach_msg_type_number_t * dataCount)
1633{
1634    kern_return_t result = KERN_FAILURE;
1635    kmod_info_t * kmod_info = NULL;
1636    unsigned int i;
1637    char * uuid_data = 0;
1638    struct mach_header  * header = &_mh_execute_header;
1639    struct load_command * load_cmd = (struct load_command *)&header[1];
1640    struct uuid_command * uuid_cmd;
1641
1642   /* If given no kext ID, retrieve the kernel UUID.
1643    */
1644    if (!kext_id) {
1645        header = &_mh_execute_header;
1646    } else {
1647        kmod_info = kmod_lookupbyname_locked(kext_id);
1648        if (!kmod_info) {
1649            result = KERN_INVALID_ARGUMENT;
1650            goto finish;
1651        }
1652
1653       /* If the kmod is build-in, it's part of the kernel, so retrieve the
1654        * kernel UUID.
1655        */
1656        if (!kmod_info->address) {
1657            header = &_mh_execute_header;
1658        } else {
1659            header = (struct mach_header *)kmod_info->address;
1660        }
1661    }
1662
1663    load_cmd = (struct load_command *)&header[1];
1664
1665    for (i = 0; i < header->ncmds; i++) {
1666        if (load_cmd->cmd == LC_UUID) {
1667            uuid_cmd = (struct uuid_command *)load_cmd;
1668
1669           /* kmem_alloc() a local buffer that's on a boundary known to work
1670            * with vm_map_copyin().
1671            */
1672            result = kmem_alloc(kernel_map, (vm_offset_t *)&uuid_data,
1673                sizeof(uuid_cmd->uuid));
1674            if (result != KERN_SUCCESS) {
1675                result = KERN_RESOURCE_SHORTAGE;
1676                goto finish;
1677            }
1678
1679            memcpy(uuid_data, uuid_cmd->uuid, sizeof(uuid_cmd->uuid));
1680
1681            result = vm_map_copyin(kernel_map, (vm_offset_t)uuid_data,
1682                sizeof(uuid_cmd->uuid), /* src_destroy */ TRUE,
1683                (vm_map_copy_t *)data);
1684            if (result == KERN_SUCCESS) {
1685                *dataCount = sizeof(uuid_cmd->uuid);
1686            } else {
1687                result = KERN_RESOURCE_SHORTAGE;
1688                kmem_free(kernel_map, (vm_offset_t)uuid_data,
1689                    sizeof(uuid_cmd->uuid));
1690            }
1691            goto finish;
1692        }
1693
1694        load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
1695    }
1696
1697finish:
1698    return result;
1699}
1700
1701kern_return_t
1702kmod_get_info(__unused host_t host,
1703          kmod_info_array_t *kmods,
1704          mach_msg_type_number_t *kmodCount)
1705{
1706    vm_offset_t data;
1707    kmod_info_t *k, *p1;
1708    kmod_reference_t *r, *p2;
1709    int ref_count;
1710    unsigned size = 0;
1711    kern_return_t rc = KERN_SUCCESS;
1712
1713    *kmods = (void *)0;
1714    *kmodCount = 0;
1715
1716retry:
1717    mutex_lock(kmod_lock);
1718    size = 0;
1719    k = kmod;
1720    while (k) {
1721        size += sizeof(kmod_info_t);
1722        r = k->reference_list;
1723        while (r) {
1724            size +=sizeof(kmod_reference_t);
1725            r = r->next;
1726        }
1727        k = k->next;
1728    }
1729    mutex_unlock(kmod_lock);
1730    if (!size) return KERN_SUCCESS;
1731
1732    rc = kmem_alloc(kernel_map, &data, size);
1733    if (rc) return rc;
1734
1735    // copy kmod into data, retry if kmod's size has changed (grown)
1736    // the copied out data is tweeked to figure what's what at user level
1737    // change the copied out k->next pointers to point to themselves
1738    // change the k->reference into a count, tack the references on
1739    // the end of the data packet in the order they are found
1740
1741    mutex_lock(kmod_lock);
1742    k = kmod; p1 = (kmod_info_t *)data;
1743    while (k) {
1744        if ((p1 + 1) > (kmod_info_t *)(data + size)) {
1745            mutex_unlock(kmod_lock);
1746            kmem_free(kernel_map, data, size);
1747            goto retry;
1748        }
1749
1750        *p1 = *k;
1751        if (k->next) p1->next = k;
1752        p1++; k = k->next;
1753    }
1754
1755    p2 = (kmod_reference_t *)p1;
1756    k = kmod; p1 = (kmod_info_t *)data;
1757    while (k) {
1758        r = k->reference_list; ref_count = 0;
1759        while (r) {
1760            if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
1761                mutex_unlock(kmod_lock);
1762                kmem_free(kernel_map, data, size);
1763                goto retry;
1764            }
1765            // note the last 'k' in the chain has its next == 0
1766            // since there can only be one like that,
1767            // this case is handled by the caller
1768            *p2 = *r;
1769            p2++; r = r->next; ref_count++;
1770        }
1771        p1->reference_list = (kmod_reference_t *)ref_count;
1772        p1++; k = k->next;
1773    }
1774    mutex_unlock(kmod_lock);
1775
1776    rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
1777    if (rc) {
1778        kmem_free(kernel_map, data, size);
1779        *kmods = NULL;
1780        *kmodCount = 0;
1781        return rc;
1782    }
1783    *kmodCount = size;
1784
1785    return KERN_SUCCESS;
1786}
1787
1788/*
1789 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1790 */
1791static kern_return_t
1792kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
1793{
1794    typedef void (*Routine)(void);
1795    Routine *     routines;
1796    int           size, i;
1797
1798    if (header->magic != MH_MAGIC) {
1799        return KERN_INVALID_ARGUMENT;
1800    }
1801
1802    routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, /*(char *)*/ sectName, &size);
1803    if (!routines) return KERN_SUCCESS;
1804
1805    size /= sizeof(Routine);
1806    for (i = 0; i < size; i++) {
1807        (*routines[i])();
1808    }
1809
1810    return KERN_SUCCESS;
1811}
1812
1813/*
1814 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1815 */
1816kern_return_t
1817kmod_initialize_cpp(kmod_info_t *info)
1818{
1819    return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
1820}
1821
1822/*
1823 * Operates only on 32 bit mach keaders on behalf of kernel module loader
1824 */
1825kern_return_t
1826kmod_finalize_cpp(kmod_info_t *info)
1827{
1828    return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
1829}
1830
1831kern_return_t
1832kmod_default_start(__unused struct kmod_info *ki, __unused void *data)
1833{
1834    return KMOD_RETURN_SUCCESS;
1835}
1836
1837kern_return_t
1838kmod_default_stop(__unused struct kmod_info *ki, __unused void *data)
1839{
1840    return KMOD_RETURN_SUCCESS;
1841}
1842
1843static void
1844kmod_dump_to(vm_offset_t *addr, unsigned int cnt,
1845    int (*printf_func)(const char *fmt, ...))
1846{
1847    vm_offset_t * kscan_addr = NULL;
1848    kmod_info_t * k;
1849    kmod_reference_t * r;
1850    unsigned int i;
1851    int found_kmod = 0;
1852    kmod_info_t * stop_kmod = NULL;
1853
1854    for (k = kmod; k; k = k->next) {
1855        if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)k)) == 0) {
1856            (*printf_func)("         kmod scan stopped due to missing "
1857                "kmod page: %08x\n", stop_kmod);
1858            break;
1859        }
1860        if (!k->address) {
1861            continue; // skip fake entries for built-in kernel components
1862        }
1863        for (i = 0, kscan_addr = addr; i < cnt; i++, kscan_addr++) {
1864            if ((*kscan_addr >= k->address) &&
1865                (*kscan_addr < (k->address + k->size))) {
1866
1867                if (!found_kmod) {
1868                    (*printf_func)("      Kernel loadable modules in backtrace "
1869                        "(with dependencies):\n");
1870                }
1871                found_kmod = 1;
1872                (*printf_func)("         %s(%s)@0x%x->0x%x\n",
1873                    k->name, k->version, k->address, k->address + k->size - 1);
1874
1875                for (r = k->reference_list; r; r = r->next) {
1876                    kmod_info_t * rinfo;
1877
1878                    if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)r)) == 0) {
1879                        (*printf_func)("            kmod dependency scan stopped "
1880                            "due to missing dependency page: %08x\n", r);
1881                        break;
1882                    }
1883
1884                    rinfo = r->info;
1885
1886                    if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
1887                        (*printf_func)("            kmod dependency scan stopped "
1888                            "due to missing kmod page: %08x\n", rinfo);
1889                        break;
1890                    }
1891
1892                    if (!rinfo->address) {
1893                        continue; // skip fake entries for built-ins
1894                    }
1895
1896                    (*printf_func)("            dependency: %s(%s)@0x%x\n",
1897                        rinfo->name, rinfo->version, rinfo->address);
1898                }
1899
1900                break;  // only report this kmod for one backtrace address
1901            }
1902        }
1903    }
1904
1905    return;
1906}
1907
1908void
1909kmod_dump(vm_offset_t *addr, unsigned int cnt)
1910{
1911    kmod_dump_to(addr, cnt, &kdb_printf);
1912}
1913
1914void kmod_dump_log(vm_offset_t *, unsigned); /* gcc 4 warn fix */
1915
1916void
1917kmod_dump_log(vm_offset_t *addr, unsigned int cnt)
1918{
1919    kmod_dump_to(addr, cnt, &printf);
1920}
1921