1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <zone_debug.h>
29#include <mach/boolean.h>
30#include <mach/kern_return.h>
31#include <mach/mig_errors.h>
32#include <mach/port.h>
33#include <mach/vm_param.h>
34#include <mach/notify.h>
35//#include <mach/mach_host_server.h>
36#include <mach/mach_types.h>
37
38#include <machine/machparam.h>		/* spl definitions */
39
40#include <ipc/ipc_port.h>
41#include <ipc/ipc_space.h>
42
43#include <kern/clock.h>
44#include <kern/spl.h>
45#include <kern/counters.h>
46#include <kern/queue.h>
47#include <kern/zalloc.h>
48#include <kern/thread.h>
49#include <kern/task.h>
50#include <kern/sched_prim.h>
51#include <kern/misc_protos.h>
52
53#include <vm/pmap.h>
54#include <vm/vm_map.h>
55#include <vm/vm_kern.h>
56
57#include <device/device_types.h>
58#include <device/device_port.h>
59#include <device/device_server.h>
60
61#include <machine/machparam.h>
62
63#if defined(__i386__) || defined(__x86_64__)
64#include <i386/pmap.h>
65#endif
66#include <IOKit/IOTypes.h>
67
68#define EXTERN
69#define MIGEXTERN
70
71/*
72 * Functions in iokit:IOUserClient.cpp
73 */
74
75extern void iokit_add_reference( io_object_t obj );
76
77extern ipc_port_t iokit_port_for_object( io_object_t obj,
78			ipc_kobject_type_t type );
79
80extern kern_return_t iokit_client_died( io_object_t obj,
81                        ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
82
83extern kern_return_t
84iokit_client_memory_for_type(
85	io_object_t	connect,
86	unsigned int	type,
87	unsigned int *	flags,
88	vm_address_t *	address,
89	vm_size_t    *	size );
90
91
92extern ppnum_t IOGetLastPageNumber(void);
93
94/*
95 * Functions imported by iokit:IOUserClient.cpp
96 */
97
98extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
99			ipc_kobject_type_t type );
100
101extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
102
103extern mach_port_name_t iokit_make_send_right( task_t task,
104				io_object_t obj, ipc_kobject_type_t type );
105
106extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
107
108extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
109
110extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
111
112extern void iokit_retain_port( ipc_port_t port );
113extern void iokit_release_port( ipc_port_t port );
114extern void iokit_release_port_send( ipc_port_t port );
115
116extern void iokit_lock_port(ipc_port_t port);
117extern void iokit_unlock_port(ipc_port_t port);
118
119extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
120
121/*
122 * Functions imported by iokit:IOMemoryDescriptor.cpp
123 */
124
125extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
126                                 mach_vm_size_t length, unsigned int mapFlags);
127
128extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
129
130extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
131					mach_vm_size_t length, unsigned int options);
132
133extern unsigned int IODefaultCacheBits(addr64_t pa);
134
135/*
136 * Lookup a device by its port.
137 * Doesn't consume the naked send right; produces a device reference.
138 */
139MIGEXTERN io_object_t
140iokit_lookup_object_port(
141	ipc_port_t	port)
142{
143	register io_object_t	obj;
144
145	if (!IP_VALID(port))
146	    return (NULL);
147
148	iokit_lock_port(port);
149	if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
150	    obj = (io_object_t) port->ip_kobject;
151	    iokit_add_reference( obj );
152	}
153	else
154	    obj = NULL;
155
156	iokit_unlock_port(port);
157
158	return( obj );
159}
160
161MIGEXTERN io_object_t
162iokit_lookup_connect_port(
163	ipc_port_t	port)
164{
165	register io_object_t	obj;
166
167	if (!IP_VALID(port))
168	    return (NULL);
169
170	iokit_lock_port(port);
171	if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
172	    obj = (io_object_t) port->ip_kobject;
173	    iokit_add_reference( obj );
174	}
175	else
176	    obj = NULL;
177
178	iokit_unlock_port(port);
179
180	return( obj );
181}
182
183EXTERN io_object_t
184iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
185{
186	io_object_t obj = NULL;
187
188	if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
189		ipc_port_t port;
190		kern_return_t kr;
191
192		kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
193
194		if (kr == KERN_SUCCESS) {
195			assert(IP_VALID(port));
196
197			ip_reference(port);
198			ip_unlock(port);
199
200			iokit_lock_port(port);
201			if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
202				obj = (io_object_t) port->ip_kobject;
203				iokit_add_reference(obj);
204			}
205			iokit_unlock_port(port);
206
207			ip_release(port);
208		}
209	}
210
211	return obj;
212}
213
214EXTERN io_object_t
215iokit_lookup_connect_ref_current_task(io_object_t connectRef)
216{
217	return iokit_lookup_connect_ref(connectRef, current_space());
218}
219
220EXTERN void
221iokit_retain_port( ipc_port_t port )
222{
223    ipc_port_reference( port );
224}
225
226EXTERN void
227iokit_release_port( ipc_port_t port )
228{
229    ipc_port_release( port );
230}
231
232EXTERN void
233iokit_release_port_send( ipc_port_t port )
234{
235    ipc_port_release_send( port );
236}
237
238extern lck_mtx_t iokit_obj_to_port_binding_lock;
239
240EXTERN void
241iokit_lock_port( __unused ipc_port_t port )
242{
243    lck_mtx_lock(&iokit_obj_to_port_binding_lock);
244}
245
246EXTERN void
247iokit_unlock_port( __unused ipc_port_t port )
248{
249    lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
250}
251
252/*
253 * Get the port for a device.
254 * Consumes a device reference; produces a naked send right.
255 */
256MIGEXTERN ipc_port_t
257iokit_make_object_port(
258	io_object_t	obj )
259{
260    register ipc_port_t	port;
261    register ipc_port_t	sendPort;
262
263    if( obj == NULL)
264        return IP_NULL;
265
266    port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT );
267    if( port) {
268	sendPort = ipc_port_make_send( port);
269	iokit_release_port( port );
270    } else
271	sendPort = IP_NULL;
272
273    iokit_remove_reference( obj );
274
275    return( sendPort);
276}
277
278MIGEXTERN ipc_port_t
279iokit_make_connect_port(
280	io_object_t	obj )
281{
282    register ipc_port_t	port;
283    register ipc_port_t	sendPort;
284
285    if( obj == NULL)
286        return IP_NULL;
287
288    port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT );
289    if( port) {
290	sendPort = ipc_port_make_send( port);
291	iokit_release_port( port );
292    } else
293	sendPort = IP_NULL;
294
295    iokit_remove_reference( obj );
296
297    return( sendPort);
298}
299
300int gIOKitPortCount;
301
302EXTERN ipc_port_t
303iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
304{
305    ipc_port_t		notify;
306    ipc_port_t		port;
307
308    do {
309
310	/* Allocate port, keeping a reference for it. */
311        port = ipc_port_alloc_kernel();
312        if( port == IP_NULL)
313            continue;
314
315        /* set kobject & type */
316//	iokit_add_reference( obj );
317	ipc_kobject_set( port, (ipc_kobject_t) obj, type);
318
319        /* Request no-senders notifications on the port. */
320        ip_lock( port);
321        notify = ipc_port_make_sonce_locked( port);
322        ipc_port_nsrequest( port, 1, notify, &notify);
323	/* port unlocked */
324        assert( notify == IP_NULL);
325	gIOKitPortCount++;
326
327    } while( FALSE);
328
329    return( port );
330}
331
332
333EXTERN kern_return_t
334iokit_destroy_object_port( ipc_port_t port )
335{
336
337    iokit_lock_port(port);
338    ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
339
340//    iokit_remove_reference( obj );
341    iokit_unlock_port(port);
342    ipc_port_dealloc_kernel( port);
343    gIOKitPortCount--;
344
345    return( KERN_SUCCESS);
346}
347
348EXTERN kern_return_t
349iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
350{
351    iokit_lock_port(port);
352    ipc_kobject_set( port, (ipc_kobject_t) obj, type);
353    iokit_unlock_port(port);
354
355    return( KERN_SUCCESS);
356}
357
358EXTERN mach_port_name_t
359iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
360{
361    ipc_port_t		port;
362    ipc_port_t		sendPort;
363    mach_port_name_t	name = 0;
364
365    if( obj == NULL)
366        return MACH_PORT_NULL;
367
368    port = iokit_port_for_object( obj, type );
369    if( port) {
370	sendPort = ipc_port_make_send( port);
371	iokit_release_port( port );
372    } else
373	sendPort = IP_NULL;
374
375    if (IP_VALID( sendPort )) {
376    	kern_return_t	kr;
377    	kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
378				MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
379	if ( kr != KERN_SUCCESS) {
380	    ipc_port_release_send( sendPort );
381	    name = MACH_PORT_NULL;
382	}
383    } else if ( sendPort == IP_NULL)
384        name = MACH_PORT_NULL;
385    else if ( sendPort == IP_DEAD)
386    	name = MACH_PORT_DEAD;
387
388    iokit_remove_reference( obj );
389
390    return( name );
391}
392
393EXTERN kern_return_t
394iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
395{
396    return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
397}
398
399/*
400 * Handle the No-More_Senders notification generated from a device port destroy.
401 * Since there are no longer any tasks which hold a send right to this device
402 * port a NMS notification has been generated.
403 */
404
405static void
406iokit_no_senders( mach_no_senders_notification_t * notification )
407{
408    ipc_port_t		port;
409    io_object_t		obj = NULL;
410    ipc_kobject_type_t	type = IKOT_NONE;
411    ipc_port_t		notify;
412
413    port = (ipc_port_t) notification->not_header.msgh_remote_port;
414
415    // convert a port to io_object_t.
416    if( IP_VALID(port)) {
417        iokit_lock_port(port);
418        if( ip_active(port)) {
419            obj = (io_object_t) port->ip_kobject;
420	    type = ip_kotype( port );
421            if( (IKOT_IOKIT_OBJECT  == type)
422	     || (IKOT_IOKIT_CONNECT == type))
423                iokit_add_reference( obj );
424            else
425                obj = NULL;
426	}
427        iokit_unlock_port(port);
428
429        if( obj ) {
430
431	    mach_port_mscount_t mscount = notification->not_count;
432
433            if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
434	    {
435		/* Re-request no-senders notifications on the port (if still active) */
436		ip_lock(port);
437		if (ip_active(port)) {
438			notify = ipc_port_make_sonce_locked(port);
439			ipc_port_nsrequest( port, mscount + 1, notify, &notify);
440			/* port unlocked */
441			if ( notify != IP_NULL)
442				ipc_port_release_sonce(notify);
443		} else {
444			ip_unlock(port);
445		}
446	    }
447            iokit_remove_reference( obj );
448        }
449    }
450}
451
452
453EXTERN
454boolean_t
455iokit_notify( mach_msg_header_t * msg )
456{
457    switch (msg->msgh_id) {
458        case MACH_NOTIFY_NO_SENDERS:
459            iokit_no_senders((mach_no_senders_notification_t *) msg);
460            return TRUE;
461
462        case MACH_NOTIFY_PORT_DELETED:
463        case MACH_NOTIFY_PORT_DESTROYED:
464        case MACH_NOTIFY_SEND_ONCE:
465        case MACH_NOTIFY_DEAD_NAME:
466        default:
467            printf("iokit_notify: strange notification %d\n", msg->msgh_id);
468            return FALSE;
469    }
470}
471
472/* need to create a pmap function to generalize */
473unsigned int IODefaultCacheBits(addr64_t pa)
474{
475	return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
476}
477
478kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
479			mach_vm_size_t length, unsigned int options)
480{
481    vm_prot_t	 prot;
482    unsigned int flags;
483    ppnum_t	 pagenum;
484    pmap_t 	 pmap = map->pmap;
485
486    prot = (options & kIOMapReadOnly)
487		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
488
489    pagenum = (ppnum_t)atop_64(pa);
490
491    switch(options & kIOMapCacheMask ) {			/* What cache mode do we need? */
492
493	case kIOMapDefaultCache:
494	default:
495	    flags = IODefaultCacheBits(pa);
496	    break;
497
498	case kIOMapInhibitCache:
499	    flags = VM_WIMG_IO;
500	    break;
501
502	case kIOMapWriteThruCache:
503	    flags = VM_WIMG_WTHRU;
504	    break;
505
506	case kIOMapWriteCombineCache:
507	    flags = VM_WIMG_WCOMB;
508	    break;
509
510	case kIOMapCopybackCache:
511	    flags = VM_WIMG_COPYBACK;
512	    break;
513	case kIOMapCopybackInnerCache:
514	    flags = VM_WIMG_INNERWBACK;
515	    break;
516    }
517
518    pmap_set_cache_attributes(pagenum, flags);
519
520    vm_map_set_cache_attr(map, (vm_map_offset_t)va);
521
522
523    // Set up a block mapped area
524    pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
525
526    return( KERN_SUCCESS );
527}
528
529kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
530{
531    pmap_t	pmap = map->pmap;
532
533    pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
534
535    return( KERN_SUCCESS );
536}
537
538kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
539					mach_vm_size_t __unused length, unsigned int __unused options)
540{
541    mach_vm_size_t off;
542    vm_prot_t	   prot;
543    unsigned int   flags;
544    pmap_t 	   pmap = map->pmap;
545    pmap_flush_context	pmap_flush_context_storage;
546    boolean_t		delayed_pmap_flush = FALSE;
547
548    prot = (options & kIOMapReadOnly)
549		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
550
551    switch (options & kIOMapCacheMask)
552    {
553	// what cache mode do we need?
554	case kIOMapDefaultCache:
555	default:
556	    return (KERN_INVALID_ARGUMENT);
557
558	case kIOMapInhibitCache:
559	    flags = VM_WIMG_IO;
560	    break;
561
562	case kIOMapWriteThruCache:
563	    flags = VM_WIMG_WTHRU;
564	    break;
565
566	case kIOMapWriteCombineCache:
567	    flags = VM_WIMG_WCOMB;
568	    break;
569
570	case kIOMapCopybackCache:
571	    flags = VM_WIMG_COPYBACK;
572	    break;
573    }
574
575    pmap_flush_context_init(&pmap_flush_context_storage);
576    delayed_pmap_flush = FALSE;
577
578    //  enter each page's physical address in the target map
579    for (off = 0; off < length; off += page_size)
580    {
581	ppnum_t ppnum = pmap_find_phys(pmap, va + off);
582	if (ppnum) {
583		pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
584				   PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
585		delayed_pmap_flush = TRUE;
586	}
587    }
588    if (delayed_pmap_flush == TRUE)
589	    pmap_flush(&pmap_flush_context_storage);
590
591    return (KERN_SUCCESS);
592}
593
594ppnum_t IOGetLastPageNumber(void)
595{
596#if __i386__ || __x86_64__
597	ppnum_t	 lastPage, highest = 0;
598	unsigned int idx;
599
600	for (idx = 0; idx < pmap_memory_region_count; idx++)
601	{
602		lastPage = pmap_memory_regions[idx].end - 1;
603		if (lastPage > highest)
604			highest = lastPage;
605	}
606	return (highest);
607#else
608#error unknown arch
609#endif
610}
611
612
613void IOGetTime( mach_timespec_t * clock_time);
614void IOGetTime( mach_timespec_t * clock_time)
615{
616	clock_sec_t sec;
617	clock_nsec_t nsec;
618	clock_get_system_nanotime(&sec, &nsec);
619	clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
620	clock_time->tv_nsec = nsec;
621}
622
623