1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <zone_debug.h>
29#include <mach/boolean.h>
30#include <mach/kern_return.h>
31#include <mach/mig_errors.h>
32#include <mach/port.h>
33#include <mach/vm_param.h>
34#include <mach/notify.h>
35//#include <mach/mach_host_server.h>
36#include <mach/mach_types.h>
37
38#include <machine/machparam.h>		/* spl definitions */
39
40#include <ipc/ipc_port.h>
41#include <ipc/ipc_space.h>
42
43#include <kern/clock.h>
44#include <kern/spl.h>
45#include <kern/counters.h>
46#include <kern/queue.h>
47#include <kern/zalloc.h>
48#include <kern/thread.h>
49#include <kern/task.h>
50#include <kern/sched_prim.h>
51#include <kern/misc_protos.h>
52
53#include <kern/assert.h>
54
55#include <vm/pmap.h>
56#include <vm/vm_map.h>
57#include <vm/vm_kern.h>
58
59#include <device/device_types.h>
60#include <device/device_port.h>
61#include <device/device_server.h>
62
63#include <machine/machparam.h>
64
65#if defined(__i386__) || defined(__x86_64__)
66#include <i386/pmap.h>
67#endif
68#include <IOKit/IOTypes.h>
69
70#define EXTERN
71#define MIGEXTERN
72
73/*
74 * Functions in iokit:IOUserClient.cpp
75 */
76
77extern void iokit_add_reference( io_object_t obj );
78
79extern ipc_port_t iokit_port_for_object( io_object_t obj,
80			ipc_kobject_type_t type );
81
82extern kern_return_t iokit_client_died( io_object_t obj,
83                        ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
84
85extern kern_return_t
86iokit_client_memory_for_type(
87	io_object_t	connect,
88	unsigned int	type,
89	unsigned int *	flags,
90	vm_address_t *	address,
91	vm_size_t    *	size );
92
93
94extern ppnum_t IOGetLastPageNumber(void);
95
96/*
97 * Functions imported by iokit:IOUserClient.cpp
98 */
99
100extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
101			ipc_kobject_type_t type );
102
103extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
104
105extern mach_port_name_t iokit_make_send_right( task_t task,
106				io_object_t obj, ipc_kobject_type_t type );
107
108extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
109
110extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
111
112extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
113
114extern void iokit_retain_port( ipc_port_t port );
115extern void iokit_release_port( ipc_port_t port );
116extern void iokit_release_port_send( ipc_port_t port );
117
118extern void iokit_lock_port(ipc_port_t port);
119extern void iokit_unlock_port(ipc_port_t port);
120
121extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
122
123/*
124 * Functions imported by iokit:IOMemoryDescriptor.cpp
125 */
126
127extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
128                                 mach_vm_size_t length, unsigned int mapFlags);
129
130extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
131
132extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
133					mach_vm_size_t length, unsigned int options);
134
135extern unsigned int IODefaultCacheBits(addr64_t pa);
136
137/*
138 * Lookup a device by its port.
139 * Doesn't consume the naked send right; produces a device reference.
140 */
141MIGEXTERN io_object_t
142iokit_lookup_object_port(
143	ipc_port_t	port)
144{
145	register io_object_t	obj;
146
147	if (!IP_VALID(port))
148	    return (NULL);
149
150	iokit_lock_port(port);
151	if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
152	    obj = (io_object_t) port->ip_kobject;
153	    iokit_add_reference( obj );
154	}
155	else
156	    obj = NULL;
157
158	iokit_unlock_port(port);
159
160	return( obj );
161}
162
163MIGEXTERN io_object_t
164iokit_lookup_connect_port(
165	ipc_port_t	port)
166{
167	register io_object_t	obj;
168
169	if (!IP_VALID(port))
170	    return (NULL);
171
172	iokit_lock_port(port);
173	if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
174	    obj = (io_object_t) port->ip_kobject;
175	    iokit_add_reference( obj );
176	}
177	else
178	    obj = NULL;
179
180	iokit_unlock_port(port);
181
182	return( obj );
183}
184
185EXTERN io_object_t
186iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
187{
188	io_object_t obj = NULL;
189
190	if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
191		ipc_port_t port;
192		kern_return_t kr;
193
194		kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
195
196		if (kr == KERN_SUCCESS) {
197			assert(IP_VALID(port));
198
199			ip_reference(port);
200			ip_unlock(port);
201
202			iokit_lock_port(port);
203			if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
204				obj = (io_object_t) port->ip_kobject;
205				iokit_add_reference(obj);
206			}
207			iokit_unlock_port(port);
208
209			ip_release(port);
210		}
211	}
212
213	return obj;
214}
215
216EXTERN io_object_t
217iokit_lookup_connect_ref_current_task(io_object_t connectRef)
218{
219	return iokit_lookup_connect_ref(connectRef, current_space());
220}
221
222EXTERN void
223iokit_retain_port( ipc_port_t port )
224{
225    ipc_port_reference( port );
226}
227
228EXTERN void
229iokit_release_port( ipc_port_t port )
230{
231    ipc_port_release( port );
232}
233
234EXTERN void
235iokit_release_port_send( ipc_port_t port )
236{
237    ipc_port_release_send( port );
238}
239
240extern lck_mtx_t iokit_obj_to_port_binding_lock;
241
242EXTERN void
243iokit_lock_port( __unused ipc_port_t port )
244{
245    lck_mtx_lock(&iokit_obj_to_port_binding_lock);
246}
247
248EXTERN void
249iokit_unlock_port( __unused ipc_port_t port )
250{
251    lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
252}
253
254/*
255 * Get the port for a device.
256 * Consumes a device reference; produces a naked send right.
257 */
258MIGEXTERN ipc_port_t
259iokit_make_object_port(
260	io_object_t	obj )
261{
262    register ipc_port_t	port;
263    register ipc_port_t	sendPort;
264
265    if( obj == NULL)
266        return IP_NULL;
267
268    port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
269    if( port) {
270	sendPort = ipc_port_make_send( port);
271	iokit_release_port( port );
272    } else
273	sendPort = IP_NULL;
274
275    iokit_remove_reference( obj );
276
277    return( sendPort);
278}
279
280MIGEXTERN ipc_port_t
281iokit_make_connect_port(
282	io_object_t	obj )
283{
284    register ipc_port_t	port;
285    register ipc_port_t	sendPort;
286
287    if( obj == NULL)
288        return IP_NULL;
289
290    port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
291    if( port) {
292	sendPort = ipc_port_make_send( port);
293	iokit_release_port( port );
294    } else
295	sendPort = IP_NULL;
296
297    iokit_remove_reference( obj );
298
299    return( sendPort);
300}
301
302int gIOKitPortCount;
303
304EXTERN ipc_port_t
305iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
306{
307    ipc_port_t		notify;
308    ipc_port_t		port;
309
310    do {
311
312	/* Allocate port, keeping a reference for it. */
313        port = ipc_port_alloc_kernel();
314        if( port == IP_NULL)
315            continue;
316
317        /* set kobject & type */
318//	iokit_add_reference( obj );
319	ipc_kobject_set( port, (ipc_kobject_t) obj, type);
320
321        /* Request no-senders notifications on the port. */
322        ip_lock( port);
323        notify = ipc_port_make_sonce_locked( port);
324        ipc_port_nsrequest( port, 1, notify, &notify);
325	/* port unlocked */
326        assert( notify == IP_NULL);
327	gIOKitPortCount++;
328
329    } while( FALSE);
330
331    return( port );
332}
333
334
335EXTERN kern_return_t
336iokit_destroy_object_port( ipc_port_t port )
337{
338    ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
339
340//    iokit_remove_reference( obj );
341
342    ipc_port_dealloc_kernel( port);
343    gIOKitPortCount--;
344
345    return( KERN_SUCCESS);
346}
347
348EXTERN kern_return_t
349iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
350{
351    iokit_lock_port(port);
352    ipc_kobject_set( port, (ipc_kobject_t) obj, type);
353    iokit_unlock_port(port);
354
355    return( KERN_SUCCESS);
356}
357
358EXTERN mach_port_name_t
359iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
360{
361    ipc_port_t		port;
362    ipc_port_t		sendPort;
363    mach_port_name_t	name;
364
365    if( obj == NULL)
366        return MACH_PORT_NULL;
367
368    port = iokit_port_for_object( obj, type );
369    if( port) {
370	sendPort = ipc_port_make_send( port);
371	iokit_release_port( port );
372    } else
373	sendPort = IP_NULL;
374
375    if (IP_VALID( sendPort )) {
376    	kern_return_t	kr;
377    	kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
378				MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
379	if ( kr != KERN_SUCCESS)
380		name = MACH_PORT_NULL;
381    } else if ( sendPort == IP_NULL)
382        name = MACH_PORT_NULL;
383    else if ( sendPort == IP_DEAD)
384    	name = MACH_PORT_DEAD;
385
386    iokit_remove_reference( obj );
387
388    return( name );
389}
390
391EXTERN kern_return_t
392iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
393{
394    return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
395}
396
397/*
398 * Handle the No-More_Senders notification generated from a device port destroy.
399 * Since there are no longer any tasks which hold a send right to this device
400 * port a NMS notification has been generated.
401 */
402
403static void
404iokit_no_senders( mach_no_senders_notification_t * notification )
405{
406    ipc_port_t		port;
407    io_object_t		obj = NULL;
408    ipc_kobject_type_t	type = IKOT_NONE;
409    ipc_port_t		notify;
410
411    port = (ipc_port_t) notification->not_header.msgh_remote_port;
412
413    // convert a port to io_object_t.
414    if( IP_VALID(port)) {
415        iokit_lock_port(port);
416        if( ip_active(port)) {
417            obj = (io_object_t) port->ip_kobject;
418	    type = ip_kotype( port );
419            if( (IKOT_IOKIT_OBJECT  == type)
420	     || (IKOT_IOKIT_CONNECT == type))
421                iokit_add_reference( obj );
422            else
423                obj = NULL;
424	}
425        iokit_unlock_port(port);
426
427        if( obj ) {
428
429	    mach_port_mscount_t mscount = notification->not_count;
430
431            if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
432	    {
433		/* Re-request no-senders notifications on the port (if still active) */
434		ip_lock(port);
435		if (ip_active(port)) {
436			notify = ipc_port_make_sonce_locked(port);
437			ipc_port_nsrequest( port, mscount + 1, notify, &notify);
438			/* port unlocked */
439			if ( notify != IP_NULL)
440				ipc_port_release_sonce(notify);
441		} else {
442			ip_unlock(port);
443		}
444	    }
445            iokit_remove_reference( obj );
446        }
447    }
448}
449
450
451EXTERN
452boolean_t
453iokit_notify( mach_msg_header_t * msg )
454{
455    switch (msg->msgh_id) {
456        case MACH_NOTIFY_NO_SENDERS:
457            iokit_no_senders((mach_no_senders_notification_t *) msg);
458            return TRUE;
459
460        case MACH_NOTIFY_PORT_DELETED:
461        case MACH_NOTIFY_PORT_DESTROYED:
462        case MACH_NOTIFY_SEND_ONCE:
463        case MACH_NOTIFY_DEAD_NAME:
464        default:
465            printf("iokit_notify: strange notification %d\n", msg->msgh_id);
466            return FALSE;
467    }
468}
469
470/* need to create a pmap function to generalize */
471unsigned int IODefaultCacheBits(addr64_t pa)
472{
473	return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
474}
475
476kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
477			mach_vm_size_t length, unsigned int options)
478{
479    vm_prot_t	 prot;
480    unsigned int flags;
481    ppnum_t	 pagenum;
482    pmap_t 	 pmap = map->pmap;
483
484    prot = (options & kIOMapReadOnly)
485		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
486
487    pagenum = (ppnum_t)atop_64(pa);
488
489    switch(options & kIOMapCacheMask ) {			/* What cache mode do we need? */
490
491	case kIOMapDefaultCache:
492	default:
493	    flags = IODefaultCacheBits(pa);
494	    break;
495
496	case kIOMapInhibitCache:
497	    flags = VM_WIMG_IO;
498	    break;
499
500	case kIOMapWriteThruCache:
501	    flags = VM_WIMG_WTHRU;
502	    break;
503
504	case kIOMapWriteCombineCache:
505	    flags = VM_WIMG_WCOMB;
506	    break;
507
508	case kIOMapCopybackCache:
509	    flags = VM_WIMG_COPYBACK;
510	    break;
511	case kIOMapCopybackInnerCache:
512	    flags = VM_WIMG_INNERWBACK;
513	    break;
514    }
515
516    pmap_set_cache_attributes(pagenum, flags);
517
518    vm_map_set_cache_attr(map, (vm_map_offset_t)va);
519
520
521    // Set up a block mapped area
522    pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
523
524    return( KERN_SUCCESS );
525}
526
527kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
528{
529    pmap_t	pmap = map->pmap;
530
531    pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
532
533    return( KERN_SUCCESS );
534}
535
536kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
537					mach_vm_size_t __unused length, unsigned int __unused options)
538{
539    mach_vm_size_t off;
540    vm_prot_t	   prot;
541    unsigned int   flags;
542    pmap_t 	   pmap = map->pmap;
543
544    prot = (options & kIOMapReadOnly)
545		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
546
547    switch (options & kIOMapCacheMask)
548    {
549	// what cache mode do we need?
550	case kIOMapDefaultCache:
551	default:
552	    return (KERN_INVALID_ARGUMENT);
553
554	case kIOMapInhibitCache:
555	    flags = VM_WIMG_IO;
556	    break;
557
558	case kIOMapWriteThruCache:
559	    flags = VM_WIMG_WTHRU;
560	    break;
561
562	case kIOMapWriteCombineCache:
563	    flags = VM_WIMG_WCOMB;
564	    break;
565
566	case kIOMapCopybackCache:
567	    flags = VM_WIMG_COPYBACK;
568	    break;
569    }
570
571    //  enter each page's physical address in the target map
572    for (off = 0; off < length; off += page_size)
573    {
574	ppnum_t ppnum = pmap_find_phys(pmap, va + off);
575	if (ppnum)
576	    pmap_enter(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE);
577    }
578
579    return (KERN_SUCCESS);
580}
581
582ppnum_t IOGetLastPageNumber(void)
583{
584#if __i386__ || __x86_64__
585	ppnum_t	 lastPage, highest = 0;
586	unsigned int idx;
587
588	for (idx = 0; idx < pmap_memory_region_count; idx++)
589	{
590		lastPage = pmap_memory_regions[idx].end - 1;
591		if (lastPage > highest)
592			highest = lastPage;
593	}
594	return (highest);
595#elif defined(__arm__)
596	return 0;
597#else
598#error unknown arch
599#endif
600}
601
602
603void IOGetTime( mach_timespec_t * clock_time);
604void IOGetTime( mach_timespec_t * clock_time)
605{
606	clock_sec_t sec;
607	clock_nsec_t nsec;
608	clock_get_system_nanotime(&sec, &nsec);
609	clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
610	clock_time->tv_nsec = nsec;
611}
612
613