1/*	$NetBSD: subr_ntoskrnl.c,v 1.21 2011/08/31 18:31:02 plunky Exp $	*/
2
3/*-
4 * Copyright (c) 2003
5 *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36#ifdef __FreeBSD__
37__FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
38#endif
39#ifdef __NetBSD__
40__KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.21 2011/08/31 18:31:02 plunky Exp $");
41#endif
42
43#ifdef __FreeBSD__
44#include <sys/ctype.h>
45#endif
46#include <sys/unistd.h>
47#include <sys/param.h>
48#include <sys/types.h>
49#include <sys/errno.h>
50#include <sys/systm.h>
51#include <sys/malloc.h>
52#include <sys/mutex.h>
53
54#include <sys/callout.h>
55#if __FreeBSD_version > 502113
56#include <sys/kdb.h>
57#endif
58#include <sys/kernel.h>
59#include <sys/proc.h>
60#include <sys/kthread.h>
61#include <sys/module.h>
62#include <sys/atomic.h>
63#ifdef __FreeBSD__
64#include <machine/clock.h>
65#include <machine/bus_memio.h>
66#include <machine/bus_pio.h>
67#endif
68#include <sys/bus.h>
69
70#ifdef __FreeBSD__
71#include <sys/bus.h>
72#include <sys/rman.h>
73#endif
74
75#ifdef __NetBSD__
76#include <uvm/uvm.h>
77#include <uvm/uvm_param.h>
78#include <uvm/uvm_pmap.h>
79#include <sys/pool.h>
80#include <sys/reboot.h> /* for AB_VERBOSE */
81#else
82#include <vm/vm.h>
83#include <vm/vm_param.h>
84#include <vm/pmap.h>
85#include <vm/uma.h>
86#endif
87
88#include <compat/ndis/pe_var.h>
89#include <compat/ndis/ntoskrnl_var.h>
90#include <compat/ndis/hal_var.h>
91#include <compat/ndis/resource_var.h>
92#include <compat/ndis/ndis_var.h>
93#ifdef __NetBSD__
94#include <compat/ndis/nbcompat.h>
95#endif
96
97#define __regparm __attribute__((regparm(3)))
98
99#ifdef __NetBSD__
100/* Turn on DbgPrint() from Windows Driver*/
101#define boothowto AB_VERBOSE
102#endif
103
104__stdcall static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
105	ndis_unicode_string *, uint8_t);
106__stdcall static void RtlCopyUnicodeString(ndis_unicode_string *,
107	ndis_unicode_string *);
108__stdcall static ndis_status RtlUnicodeStringToAnsiString(ndis_ansi_string *,
109	ndis_unicode_string *, uint8_t);
110__stdcall static ndis_status RtlAnsiStringToUnicodeString(ndis_unicode_string *,
111	ndis_ansi_string *, uint8_t);
112__stdcall static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
113	 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
114__stdcall static irp *IoBuildAsynchronousFsdRequest(uint32_t,
115	device_object *, void *, uint32_t, uint64_t *, io_status_block *);
116__stdcall static irp *IoBuildDeviceIoControlRequest(uint32_t,
117	device_object *, void *, uint32_t, void *, uint32_t,
118	uint8_t, nt_kevent *, io_status_block *);
119__stdcall static irp *IoAllocateIrp(uint8_t, uint8_t);
120__stdcall static void IoReuseIrp(irp *, uint32_t);
121__stdcall static void IoFreeIrp(irp *);
122__stdcall static void IoInitializeIrp(irp *, uint16_t, uint8_t);
123__stdcall static irp *IoMakeAssociatedIrp(irp *, uint8_t);
124__stdcall static uint32_t KeWaitForMultipleObjects(uint32_t,
125	nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
126	int64_t *, wait_block *);
127static void ntoskrnl_wakeup(void *);
128static void ntoskrnl_timercall(void *);
129static void ntoskrnl_run_dpc(void *);
130__stdcall static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
131__stdcall static uint16_t READ_REGISTER_USHORT(uint16_t *);
132__stdcall static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
133__stdcall static uint32_t READ_REGISTER_ULONG(uint32_t *);
134__stdcall static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
135__stdcall static uint8_t READ_REGISTER_UCHAR(uint8_t *);
136__stdcall static int64_t _allmul(int64_t, int64_t);
137__stdcall static int64_t _alldiv(int64_t, int64_t);
138__stdcall static int64_t _allrem(int64_t, int64_t);
139__regparm static int64_t _allshr(int64_t, uint8_t);
140__regparm static int64_t _allshl(int64_t, uint8_t);
141__stdcall static uint64_t _aullmul(uint64_t, uint64_t);
142__stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
143__stdcall static uint64_t _aullrem(uint64_t, uint64_t);
144__regparm static uint64_t _aullshr(uint64_t, uint8_t);
145__regparm static uint64_t _aullshl(uint64_t, uint8_t);
146static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
147static slist_entry *ntoskrnl_popsl(slist_header *);
148__stdcall static void ExInitializePagedLookasideList(paged_lookaside_list *,
149	lookaside_alloc_func *, lookaside_free_func *,
150	uint32_t, size_t, uint32_t, uint16_t);
151__stdcall static void ExDeletePagedLookasideList(paged_lookaside_list *);
152__stdcall static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
153	lookaside_alloc_func *, lookaside_free_func *,
154	uint32_t, size_t, uint32_t, uint16_t);
155__stdcall static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
156__fastcall static slist_entry
157	*InterlockedPushEntrySList(REGARGS2(slist_header *head,
158	slist_entry *entry));
159__fastcall static slist_entry *InterlockedPopEntrySList(REGARGS1(slist_header
160	*head));
161__fastcall static slist_entry
162	*ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
163	slist_entry *entry), kspin_lock *lock);
164__fastcall static slist_entry
165	*ExInterlockedPopEntrySList(REGARGS2(slist_header *head,
166	kspin_lock *lock));
167__stdcall static uint16_t
168	ExQueryDepthSList(slist_header *);
169__fastcall static uint32_t
170	InterlockedIncrement(REGARGS1(volatile uint32_t *addend));
171__fastcall static uint32_t
172	InterlockedDecrement(REGARGS1(volatile uint32_t *addend));
173__fastcall static void
174	ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t));
175__stdcall static uint32_t MmSizeOfMdl(void *, size_t);
176__stdcall static void MmBuildMdlForNonPagedPool(mdl *);
177__stdcall static void *MmMapLockedPages(mdl *, uint8_t);
178__stdcall static void *MmMapLockedPagesSpecifyCache(mdl *,
179	uint8_t, uint32_t, void *, uint32_t, uint32_t);
180__stdcall static void MmUnmapLockedPages(void *, mdl *);
181__stdcall static size_t RtlCompareMemory(const void *, const void *, size_t);
182__stdcall static void RtlInitAnsiString(ndis_ansi_string *, char *);
183__stdcall static void RtlInitUnicodeString(ndis_unicode_string *,
184	uint16_t *);
185__stdcall static void RtlFreeUnicodeString(ndis_unicode_string *);
186__stdcall static void RtlFreeAnsiString(ndis_ansi_string *);
187__stdcall static ndis_status RtlUnicodeStringToInteger(ndis_unicode_string *,
188	uint32_t, uint32_t *);
189static int atoi (const char *);
190static long atol (const char *);
191static int rand(void);
192static void srand(unsigned int);
193static void ntoskrnl_time(uint64_t *);
194__stdcall static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
195static void ntoskrnl_thrfunc(void *);
196__stdcall static ndis_status PsCreateSystemThread(ndis_handle *,
197	uint32_t, void *, ndis_handle, void *, void *, void *);
198__stdcall static ndis_status PsTerminateSystemThread(ndis_status);
199__stdcall static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
200	uint32_t, void *, uint32_t *);
201__stdcall static void KeInitializeMutex(kmutant *, uint32_t);
202__stdcall static uint32_t KeReleaseMutex(kmutant *, uint8_t);
203__stdcall static uint32_t KeReadStateMutex(kmutant *);
204__stdcall static ndis_status ObReferenceObjectByHandle(ndis_handle,
205	uint32_t, void *, uint8_t, void **, void **);
206__fastcall static void ObfDereferenceObject(REGARGS1(void *object));
207__stdcall static uint32_t ZwClose(ndis_handle);
208static void *ntoskrnl_memset(void *, int, size_t);
209static funcptr ntoskrnl_findwrap(funcptr);
210static uint32_t DbgPrint(char *, ...);
211__stdcall static void DbgBreakPoint(void);
212__stdcall static void dummy(void);
213
214#ifdef __FreeBSD__
215static struct mtx ntoskrnl_dispatchlock;
216#else /* __NetBSD__ */
217static kmutex_t ntoskrnl_dispatchlock;
218#endif
219
220static kspin_lock ntoskrnl_global;
221static kspin_lock ntoskrnl_cancellock;
222static int ntoskrnl_kth = 0;
223static struct nt_objref_head ntoskrnl_reflist;
224#ifdef __FreeBSD__
225static uma_zone_t mdl_zone;
226#else
227static struct pool mdl_pool;
228#endif
229
230int
231ntoskrnl_libinit(void)
232{
233	image_patch_table	*patch;
234#ifdef __FreeBSD__
235	mtx_init(&ntoskrnl_dispatchlock,
236	    "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
237#else /* __NetBSD__ */
238	mutex_init(&ntoskrnl_dispatchlock, MUTEX_DEFAULT, IPL_NET);
239#endif
240	KeInitializeSpinLock(&ntoskrnl_global);
241	KeInitializeSpinLock(&ntoskrnl_cancellock);
242	TAILQ_INIT(&ntoskrnl_reflist);
243
244	patch = ntoskrnl_functbl;
245	while (patch->ipt_func != NULL) {
246		windrv_wrap((funcptr)patch->ipt_func,
247		    (funcptr *)&patch->ipt_wrap);
248		patch++;
249	}
250
251	/*
252	 * MDLs are supposed to be variable size (they describe
253	 * buffers containing some number of pages, but we don't
254	 * know ahead of time how many pages that will be). But
255	 * always allocating them off the heap is very slow. As
256	 * a compromize, we create an MDL UMA zone big enough to
257	 * handle any buffer requiring up to 16 pages, and we
258	 * use those for any MDLs for buffers of 16 pages or less
259	 * in size. For buffers larger than that (which we assume
260	 * will be few and far between, we allocate the MDLs off
261	 * the heap.
262	 */
263
264#ifdef __FreeBSD__
265	mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
266	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
267#else
268	pool_init(&mdl_pool, MDL_ZONE_SIZE, 0, 0, 0, "winmdl", NULL,
269	    IPL_VM);
270#endif
271
272	return(0);
273}
274
275int
276ntoskrnl_libfini(void)
277{
278	image_patch_table	*patch;
279
280	patch = ntoskrnl_functbl;
281	while (patch->ipt_func != NULL) {
282		windrv_unwrap(patch->ipt_wrap);
283		patch++;
284	}
285
286#ifdef __FreeBSD__
287	uma_zdestroy(mdl_zone);
288#else
289	pool_destroy(&mdl_pool);
290#endif
291	mtx_destroy(&ntoskrnl_dispatchlock);
292
293	return(0);
294}
295
296/*
297 * We need to be able to reference this externally from the wrapper;
298 * GCC only generates a local implementation of memset.
299 */
300static void *
301ntoskrnl_memset(void *buf, int ch, size_t size)
302{
303	return(memset(buf, ch, size));
304}
305
306__stdcall static uint8_t
307RtlEqualUnicodeString(ndis_unicode_string *str1, ndis_unicode_string *str2, uint8_t caseinsensitive)
308{
309	int			i;
310
311	if (str1->us_len != str2->us_len)
312		return(FALSE);
313
314	for (i = 0; i < str1->us_len; i++) {
315		if (caseinsensitive == TRUE) {
316			if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
317			    toupper((char)(str2->us_buf[i] & 0xFF)))
318				return(FALSE);
319		} else {
320			if (str1->us_buf[i] != str2->us_buf[i])
321				return(FALSE);
322		}
323	}
324
325	return(TRUE);
326}
327
328__stdcall static void
329RtlCopyUnicodeString(ndis_unicode_string *dest, ndis_unicode_string *src)
330{
331
332	if (dest->us_maxlen >= src->us_len)
333		dest->us_len = src->us_len;
334	else
335		dest->us_len = dest->us_maxlen;
336	memcpy(dest->us_buf, src->us_buf, dest->us_len);
337	return;
338}
339
340__stdcall static ndis_status
341RtlUnicodeStringToAnsiString(ndis_ansi_string *dest, ndis_unicode_string *src, uint8_t allocate)
342{
343	char			*astr = NULL;
344
345	if (dest == NULL || src == NULL)
346		return(NDIS_STATUS_FAILURE);
347
348	if (allocate == TRUE) {
349		if (ndis_unicode_to_ascii(src->us_buf, src->us_len, &astr))
350			return(NDIS_STATUS_FAILURE);
351		dest->nas_buf = astr;
352		dest->nas_len = dest->nas_maxlen = strlen(astr);
353	} else {
354		dest->nas_len = src->us_len / 2; /* XXX */
355		if (dest->nas_maxlen < dest->nas_len)
356			dest->nas_len = dest->nas_maxlen;
357		ndis_unicode_to_ascii(src->us_buf, dest->nas_len * 2,
358		    &dest->nas_buf);
359	}
360	return (NDIS_STATUS_SUCCESS);
361}
362
363__stdcall static ndis_status
364RtlAnsiStringToUnicodeString(ndis_unicode_string *dest, ndis_ansi_string *src, uint8_t allocate)
365{
366	uint16_t		*ustr = NULL;
367
368	if (dest == NULL || src == NULL)
369		return(NDIS_STATUS_FAILURE);
370
371	if (allocate == TRUE) {
372		if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
373			return(NDIS_STATUS_FAILURE);
374		dest->us_buf = ustr;
375		dest->us_len = dest->us_maxlen = strlen(src->nas_buf) * 2;
376	} else {
377		dest->us_len = src->nas_len * 2; /* XXX */
378		if (dest->us_maxlen < dest->us_len)
379			dest->us_len = dest->us_maxlen;
380		ndis_ascii_to_unicode(src->nas_buf, &dest->us_buf);
381	}
382	return (NDIS_STATUS_SUCCESS);
383}
384
385__stdcall void *
386ExAllocatePoolWithTag(
387	uint32_t		pooltype,
388	size_t			len,
389	uint32_t		tag)
390{
391	void			*buf;
392
393	buf = malloc(len, M_DEVBUF, M_NOWAIT);
394	if (buf == NULL)
395		return(NULL);
396	return(buf);
397}
398
399__stdcall void
400ExFreePool(void *buf)
401{
402	free(buf, M_DEVBUF);
403	return;
404}
405
406__stdcall uint32_t
407IoAllocateDriverObjectExtension(driver_object *drv, void *clid, uint32_t extlen, void **ext)
408{
409	custom_extension	*ce;
410
411	ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
412	    + extlen, 0);
413
414	if (ce == NULL)
415		return(STATUS_INSUFFICIENT_RESOURCES);
416
417	ce->ce_clid = clid;
418	INSERT_LIST_TAIL((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
419
420	*ext = (void *)(ce + 1);
421
422	return(STATUS_SUCCESS);
423}
424
425__stdcall void *
426IoGetDriverObjectExtension(driver_object *drv, void *clid)
427{
428	list_entry		*e;
429	custom_extension	*ce;
430
431	printf("in IoGetDriverObjectExtension\n");
432
433	e = drv->dro_driverext->dre_usrext.nle_flink;
434	while (e != &drv->dro_driverext->dre_usrext) {
435		ce = (custom_extension *)e;
436		if (ce->ce_clid == clid)
437			printf("found\n");
438			return((void *)(ce + 1));
439		e = e->nle_flink;
440	}
441	printf("not found\n");
442	return(NULL);
443}
444
445
446__stdcall uint32_t
447IoCreateDevice(
448	driver_object		*drv,
449	uint32_t		devextlen,
450	unicode_string		*devname,
451	uint32_t		devtype,
452	uint32_t		devchars,
453	uint8_t			exclusive,
454	device_object		**newdev)
455{
456	device_object		*dev;
457
458#ifdef NDIS_LKM
459	printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv, devextlen);
460#endif
461
462	dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
463#ifdef NDIS_LKM
464	printf("dev = %x\n", dev);
465#endif
466	if (dev == NULL)
467		return(STATUS_INSUFFICIENT_RESOURCES);
468
469	dev->do_type = devtype;
470	dev->do_drvobj = drv;
471	dev->do_currirp = NULL;
472	dev->do_flags = 0;
473
474	if (devextlen) {
475		dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
476		    devextlen, 0);
477
478		if (dev->do_devext == NULL) {
479			ExFreePool(dev);
480			return(STATUS_INSUFFICIENT_RESOURCES);
481		}
482
483		memset(dev->do_devext, 0, devextlen);
484	} else
485		dev->do_devext = NULL;
486
487	dev->do_size = sizeof(device_object) + devextlen;
488	dev->do_refcnt = 1;
489	dev->do_attacheddev = NULL;
490	dev->do_nextdev = NULL;
491	dev->do_devtype = devtype;
492	dev->do_stacksize = 1;
493	dev->do_alignreq = 1;
494	dev->do_characteristics = devchars;
495	dev->do_iotimer = NULL;
496	KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
497
498	/*
499	 * Vpd is used for disk/tape devices,
500	 * but we don't support those. (Yet.)
501	 */
502	dev->do_vpb = NULL;
503
504	dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
505	    sizeof(devobj_extension), 0);
506
507	if (dev->do_devobj_ext == NULL) {
508		if (dev->do_devext != NULL)
509			ExFreePool(dev->do_devext);
510		ExFreePool(dev);
511		return(STATUS_INSUFFICIENT_RESOURCES);
512	}
513
514	dev->do_devobj_ext->dve_type = 0;
515	dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
516	dev->do_devobj_ext->dve_devobj = dev;
517
518	/*
519	 * Attach this device to the driver object's list
520	 * of devices. Note: this is not the same as attaching
521	 * the device to the device stack. The driver's AddDevice
522	 * routine must explicitly call IoAddDeviceToDeviceStack()
523	 * to do that.
524	 */
525
526	if (drv->dro_devobj == NULL) {
527		drv->dro_devobj = dev;
528		dev->do_nextdev = NULL;
529	} else {
530		dev->do_nextdev = drv->dro_devobj;
531		drv->dro_devobj = dev;
532	}
533
534	*newdev = dev;
535
536	return(STATUS_SUCCESS);
537}
538
539__stdcall void
540IoDeleteDevice(device_object *dev)
541{
542	device_object		*prev;
543
544	if (dev == NULL)
545		return;
546
547	if (dev->do_devobj_ext != NULL)
548		ExFreePool(dev->do_devobj_ext);
549
550	if (dev->do_devext != NULL)
551		ExFreePool(dev->do_devext);
552
553	/* Unlink the device from the driver's device list. */
554
555	prev = dev->do_drvobj->dro_devobj;
556	if (prev == dev)
557		dev->do_drvobj->dro_devobj = dev->do_nextdev;
558	else {
559		while (prev->do_nextdev != dev)
560			prev = prev->do_nextdev;
561		prev->do_nextdev = dev->do_nextdev;
562	}
563
564	ExFreePool(dev);
565
566	return;
567}
568
569__stdcall device_object *
570IoGetAttachedDevice(device_object *dev)
571{
572	device_object		*d;
573
574	if (dev == NULL)
575		return (NULL);
576
577	d = dev;
578
579	while (d->do_attacheddev != NULL)
580		d = d->do_attacheddev;
581
582	return (d);
583}
584
585__stdcall static irp *
586IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
587{
588	irp			*ip;
589
590	ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
591	if (ip == NULL)
592		return(NULL);
593	ip->irp_usrevent = event;
594
595	return(ip);
596}
597
598__stdcall static irp *
599IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, io_status_block *status)
600{
601	irp			*ip;
602	io_stack_location	*sl;
603
604	ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
605	if (ip == NULL)
606		return(NULL);
607
608	ip->irp_usriostat = status;
609	ip->irp_tail.irp_overlay.irp_thread = NULL;
610
611	sl = IoGetNextIrpStackLocation(ip);
612	sl->isl_major = func;
613	sl->isl_minor = 0;
614	sl->isl_flags = 0;
615	sl->isl_ctl = 0;
616	sl->isl_devobj = dobj;
617	sl->isl_fileobj = NULL;
618	sl->isl_completionfunc = NULL;
619
620	ip->irp_userbuf = buf;
621
622	if (dobj->do_flags & DO_BUFFERED_IO) {
623		ip->irp_assoc.irp_sysbuf =
624		    ExAllocatePoolWithTag(NonPagedPool, len, 0);
625		if (ip->irp_assoc.irp_sysbuf == NULL) {
626			IoFreeIrp(ip);
627			return(NULL);
628		}
629		memcpy( ip->irp_assoc.irp_sysbuf, buf, len);
630	}
631
632	if (dobj->do_flags & DO_DIRECT_IO) {
633		ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
634		if (ip->irp_mdl == NULL) {
635			if (ip->irp_assoc.irp_sysbuf != NULL)
636				ExFreePool(ip->irp_assoc.irp_sysbuf);
637			IoFreeIrp(ip);
638			return(NULL);
639		}
640		ip->irp_userbuf = NULL;
641		ip->irp_assoc.irp_sysbuf = NULL;
642	}
643
644	if (func == IRP_MJ_READ) {
645		sl->isl_parameters.isl_read.isl_len = len;
646		if (off != NULL)
647			sl->isl_parameters.isl_read.isl_byteoff = *off;
648		else
649			sl->isl_parameters.isl_read.isl_byteoff = 0;
650	}
651
652	if (func == IRP_MJ_WRITE) {
653		sl->isl_parameters.isl_write.isl_len = len;
654		if (off != NULL)
655			sl->isl_parameters.isl_write.isl_byteoff = *off;
656		else
657			sl->isl_parameters.isl_write.isl_byteoff = 0;
658	}
659
660	return(ip);
661}
662
663__stdcall static irp *
664IoBuildDeviceIoControlRequest(
665	uint32_t		iocode,
666	device_object		*dobj,
667	void			*ibuf,
668	uint32_t		ilen,
669	void			*obuf,
670	uint32_t		olen,
671	uint8_t			isinternal,
672	nt_kevent		*event,
673	io_status_block		*status
674)
675{
676	irp			*ip;
677	io_stack_location	*sl;
678	uint32_t		buflen;
679
680	ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
681	if (ip == NULL)
682		return(NULL);
683	ip->irp_usrevent = event;
684	ip->irp_usriostat = status;
685	ip->irp_tail.irp_overlay.irp_thread = NULL;
686
687	sl = IoGetNextIrpStackLocation(ip);
688	sl->isl_major = isinternal == TRUE ?
689	    IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
690	sl->isl_minor = 0;
691	sl->isl_flags = 0;
692	sl->isl_ctl = 0;
693	sl->isl_devobj = dobj;
694	sl->isl_fileobj = NULL;
695	sl->isl_completionfunc = NULL;
696	sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
697	sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
698	sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
699
700	switch(IO_METHOD(iocode)) {
701	case METHOD_BUFFERED:
702		if (ilen > olen)
703			buflen = ilen;
704		else
705			buflen = olen;
706		if (buflen) {
707			ip->irp_assoc.irp_sysbuf =
708			    ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
709			if (ip->irp_assoc.irp_sysbuf == NULL) {
710				IoFreeIrp(ip);
711				return(NULL);
712			}
713		}
714		if (ilen && ibuf != NULL) {
715			memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
716			memset((char *)ip->irp_assoc.irp_sysbuf + ilen, 0,
717			    buflen - ilen);
718		} else
719			memset(ip->irp_assoc.irp_sysbuf, 0, ilen);
720		ip->irp_userbuf = obuf;
721		break;
722	case METHOD_IN_DIRECT:
723	case METHOD_OUT_DIRECT:
724		if (ilen && ibuf != NULL) {
725			ip->irp_assoc.irp_sysbuf =
726			    ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
727			if (ip->irp_assoc.irp_sysbuf == NULL) {
728				IoFreeIrp(ip);
729				return(NULL);
730			}
731			memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
732		}
733		if (olen && obuf != NULL) {
734			ip->irp_mdl = IoAllocateMdl(obuf, olen,
735			    FALSE, FALSE, ip);
736			/*
737			 * Normally we would MmProbeAndLockPages()
738			 * here, but we don't have to in our
739			 * imlementation.
740			 */
741		}
742		break;
743	case METHOD_NEITHER:
744		ip->irp_userbuf = obuf;
745		sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
746		break;
747	default:
748		break;
749	}
750
751	/*
752	 * Ideally, we should associate this IRP with the calling
753	 * thread here.
754	 */
755
756	return (ip);
757}
758
759__stdcall static irp *
760IoAllocateIrp(
761	uint8_t			stsize,
762	uint8_t			chargequota)
763{
764	irp			*i;
765
766	i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
767	if (i == NULL)
768		return (NULL);
769
770	IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
771
772	return (i);
773}
774
775__stdcall static irp *
776IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
777{
778	irp			*associrp;
779
780	associrp = IoAllocateIrp(stsize, FALSE);
781	if (associrp == NULL)
782		return(NULL);
783
784	mtx_lock(&ntoskrnl_dispatchlock);
785	associrp->irp_flags |= IRP_ASSOCIATED_IRP;
786	associrp->irp_tail.irp_overlay.irp_thread =
787	    ip->irp_tail.irp_overlay.irp_thread;
788	associrp->irp_assoc.irp_master = ip;
789	mtx_unlock(&ntoskrnl_dispatchlock);
790
791	return(associrp);
792}
793
794__stdcall static void
795IoFreeIrp(irp *ip)
796{
797	ExFreePool(ip);
798	return;
799}
800
801__stdcall static void
802IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
803{
804	memset((char *)io, 0, IoSizeOfIrp(ssize));
805	io->irp_size = psize;
806	io->irp_stackcnt = ssize;
807	io->irp_currentstackloc = ssize;
808	INIT_LIST_HEAD(&io->irp_thlist);
809	io->irp_tail.irp_overlay.irp_csl =
810	    (io_stack_location *)(io + 1) + ssize;
811
812	return;
813}
814
815__stdcall static void
816IoReuseIrp(irp *ip, uint32_t status)
817{
818	uint8_t			allocflags;
819
820	allocflags = ip->irp_allocflags;
821	IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
822	ip->irp_iostat.isb_status = status;
823	ip->irp_allocflags = allocflags;
824
825	return;
826}
827
828__stdcall void
829IoAcquireCancelSpinLock(uint8_t *irql)
830{
831	KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
832	return;
833}
834
835__stdcall void
836IoReleaseCancelSpinLock(uint8_t irql)
837{
838	KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
839	return;
840}
841
842__stdcall uint8_t
843IoCancelIrp(irp *ip)
844{
845	cancel_func		cfunc;
846
847	IoAcquireCancelSpinLock(&ip->irp_cancelirql);
848	cfunc = IoSetCancelRoutine(ip, NULL);
849	ip->irp_cancel = TRUE;
850	if (ip->irp_cancelfunc == NULL) {
851		IoReleaseCancelSpinLock(ip->irp_cancelirql);
852		return(FALSE);
853	}
854	MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
855	return(TRUE);
856}
857
858__fastcall uint32_t
859IofCallDriver(REGARGS2(device_object *dobj, irp *ip))
860{
861	driver_object		*drvobj;
862	io_stack_location	*sl;
863	uint32_t		status;
864	driver_dispatch		disp;
865
866	drvobj = dobj->do_drvobj;
867
868	if (ip->irp_currentstackloc <= 0)
869		panic("IoCallDriver(): out of stack locations");
870
871	IoSetNextIrpStackLocation(ip);
872	sl = IoGetCurrentIrpStackLocation(ip);
873
874	sl->isl_devobj = dobj;
875
876	disp = drvobj->dro_dispatch[sl->isl_major];
877	status = MSCALL2(disp, dobj, ip);
878
879	return(status);
880}
881
882__fastcall void
883IofCompleteRequest(REGARGS2(irp *ip, uint8_t prioboost))
884{
885	uint32_t		i;
886	uint32_t		status;
887	device_object		*dobj;
888	io_stack_location	*sl;
889	completion_func		cf;
890
891	ip->irp_pendingreturned =
892	    IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
893	sl = (io_stack_location *)(ip + 1);
894
895	for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
896		if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
897			IoSkipCurrentIrpStackLocation(ip);
898			dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
899		} else
900			dobj = NULL;
901
902		if (sl[i].isl_completionfunc != NULL &&
903		    ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
904		    sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
905		    (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
906		    sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
907		    (ip->irp_cancel == TRUE &&
908		    sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
909			cf = sl->isl_completionfunc;
910			status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
911			if (status == STATUS_MORE_PROCESSING_REQUIRED)
912				return;
913		}
914
915		if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
916		    SL_PENDING_RETURNED)
917			ip->irp_pendingreturned = TRUE;
918	}
919
920	/* Handle any associated IRPs. */
921
922	if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
923		uint32_t		masterirpcnt;
924		irp			*masterirp;
925		mdl			*m;
926
927		masterirp = ip->irp_assoc.irp_master;
928		masterirpcnt = FASTCALL1(InterlockedDecrement,
929		    &masterirp->irp_assoc.irp_irpcnt);
930
931		while ((m = ip->irp_mdl) != NULL) {
932			ip->irp_mdl = m->mdl_next;
933			IoFreeMdl(m);
934		}
935		IoFreeIrp(ip);
936		if (masterirpcnt == 0)
937			IoCompleteRequest(masterirp, IO_NO_INCREMENT);
938		return;
939	}
940
941	/* With any luck, these conditions will never arise. */
942
943	if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
944		if (ip->irp_usriostat != NULL)
945			*ip->irp_usriostat = ip->irp_iostat;
946		if (ip->irp_usrevent != NULL)
947			KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
948		if (ip->irp_flags & IRP_PAGING_IO) {
949			if (ip->irp_mdl != NULL)
950				IoFreeMdl(ip->irp_mdl);
951			IoFreeIrp(ip);
952		}
953	}
954
955	return;
956}
957
958__stdcall device_object *
959IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
960{
961	device_object		*attached;
962
963	mtx_lock(&ntoskrnl_dispatchlock);
964	attached = IoGetAttachedDevice(dst);
965	attached->do_attacheddev = src;
966	src->do_attacheddev = NULL;
967	src->do_stacksize = attached->do_stacksize + 1;
968	mtx_unlock(&ntoskrnl_dispatchlock);
969
970	return(attached);
971}
972
973__stdcall void
974IoDetachDevice(device_object *topdev)
975{
976	device_object		*tail;
977
978	mtx_lock(&ntoskrnl_dispatchlock);
979
980	/* First, break the chain. */
981	tail = topdev->do_attacheddev;
982	if (tail == NULL) {
983		mtx_unlock(&ntoskrnl_dispatchlock);
984		return;
985	}
986	topdev->do_attacheddev = tail->do_attacheddev;
987	topdev->do_refcnt--;
988
989	/* Now reduce the stacksize count for the tail objects. */
990
991	tail = topdev->do_attacheddev;
992	while (tail != NULL) {
993		tail->do_stacksize--;
994		tail = tail->do_attacheddev;
995	}
996	mtx_unlock(&ntoskrnl_dispatchlock);
997}
998
999/* Always called with dispatcher lock held. */
1000static void
1001ntoskrnl_wakeup(void *arg)
1002{
1003	nt_dispatch_header	*obj;
1004	wait_block		*w;
1005	list_entry		*e;
1006#ifdef __FreeBSD__
1007	struct thread		*td;
1008#endif
1009
1010	obj = arg;
1011
1012	obj->dh_sigstate = TRUE;
1013	e = obj->dh_waitlisthead.nle_flink;
1014	while (e != &obj->dh_waitlisthead) {
1015		w = (wait_block *)e;
1016/* TODO: is this correct? */
1017#ifdef __FreeBSD__
1018		td = w->wb_kthread;
1019		ndis_thresume(td->td_proc);
1020#else
1021		ndis_thresume(curproc);
1022#endif
1023		/*
1024		 * For synchronization objects, only wake up
1025		 * the first waiter.
1026		 */
1027		if (obj->dh_type == EVENT_TYPE_SYNC)
1028			break;
1029		e = e->nle_flink;
1030	}
1031
1032	return;
1033}
1034
1035static void
1036ntoskrnl_time(uint64_t *tval)
1037{
1038	struct timespec		ts;
1039#ifdef __NetBSD__
1040    struct timeval      tv;
1041    microtime(&tv);
1042    TIMEVAL_TO_TIMESPEC(&tv,&ts);
1043#else
1044    nanotime(&ts);
1045#endif
1046
1047	*tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1048	    (uint64_t)11644473600ULL;
1049
1050	return;
1051}
1052
1053/*
1054 * KeWaitForSingleObject() is a tricky beast, because it can be used
1055 * with several different object types: semaphores, timers, events,
1056 * mutexes and threads. Semaphores don't appear very often, but the
1057 * other object types are quite common. KeWaitForSingleObject() is
1058 * what's normally used to acquire a mutex, and it can be used to
1059 * wait for a thread termination.
1060 *
1061 * The Windows NDIS API is implemented in terms of Windows kernel
1062 * primitives, and some of the object manipulation is duplicated in
1063 * NDIS. For example, NDIS has timers and events, which are actually
1064 * Windows kevents and ktimers. Now, you're supposed to only use the
1065 * NDIS variants of these objects within the confines of the NDIS API,
1066 * but there are some naughty developers out there who will use
1067 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1068 * have to support that as well. Conseqently, our NDIS timer and event
1069 * code has to be closely tied into our ntoskrnl timer and event code,
1070 * just as it is in Windows.
1071 *
1072 * KeWaitForSingleObject() may do different things for different kinds
1073 * of objects:
1074 *
1075 * - For events, we check if the event has been signalled. If the
1076 *   event is already in the signalled state, we just return immediately,
1077 *   otherwise we wait for it to be set to the signalled state by someone
1078 *   else calling KeSetEvent(). Events can be either synchronization or
1079 *   notification events.
1080 *
1081 * - For timers, if the timer has already fired and the timer is in
1082 *   the signalled state, we just return, otherwise we wait on the
1083 *   timer. Unlike an event, timers get signalled automatically when
1084 *   they expire rather than someone having to trip them manually.
1085 *   Timers initialized with KeInitializeTimer() are always notification
1086 *   events: KeInitializeTimerEx() lets you initialize a timer as
1087 *   either a notification or synchronization event.
1088 *
1089 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1090 *   on the mutex until it's available and then grab it. When a mutex is
1091 *   released, it enters the signaled state, which wakes up one of the
1092 *   threads waiting to acquire it. Mutexes are always synchronization
1093 *   events.
1094 *
1095 * - For threads, the only thing we do is wait until the thread object
1096 *   enters a signalled state, which occurs when the thread terminates.
1097 *   Threads are always notification events.
1098 *
1099 * A notification event wakes up all threads waiting on an object. A
1100 * synchronization event wakes up just one. Also, a synchronization event
1101 * is auto-clearing, which means we automatically set the event back to
1102 * the non-signalled state once the wakeup is done.
1103 */
1104
1105__stdcall uint32_t
1106KeWaitForSingleObject(
1107	nt_dispatch_header	*obj,
1108	uint32_t		reason,
1109	uint32_t		mode,
1110	uint8_t			alertable,
1111	int64_t			*duetime)
1112{
1113#ifdef __FreeBSD__
1114	struct thread		*td = curthread;
1115#endif
1116	kmutant			*km;
1117	wait_block		w;
1118	struct timeval		tv;
1119	int			error = 0;
1120	uint64_t		curtime;
1121
1122	if (obj == NULL)
1123		return(STATUS_INVALID_PARAMETER);
1124
1125	mtx_lock(&ntoskrnl_dispatchlock);
1126
1127	/*
1128	 * See if the object is a mutex. If so, and we already own
1129	 * it, then just increment the acquisition count and return.
1130         *
1131         * For any other kind of object, see if it's already in the
1132	 * signalled state, and if it is, just return. If the object
1133         * is marked as a synchronization event, reset the state to
1134         * unsignalled.
1135	 */
1136
1137	if (obj->dh_size == OTYPE_MUTEX) {
1138		km = (kmutant *)obj;
1139		if (km->km_ownerthread == NULL ||
1140#ifdef __FreeBSD__
1141		    km->km_ownerthread == curthread->td_proc) {
1142#else
1143		km->km_ownerthread == curproc) {
1144#endif
1145			obj->dh_sigstate = FALSE;
1146			km->km_acquirecnt++;
1147#ifdef __FreeBSD__
1148			km->km_ownerthread = curthread->td_proc;
1149#else
1150			km->km_ownerthread = curproc;
1151#endif
1152			mtx_unlock(&ntoskrnl_dispatchlock);
1153			return (STATUS_SUCCESS);
1154		}
1155	} else if (obj->dh_sigstate == TRUE) {
1156		if (obj->dh_type == EVENT_TYPE_SYNC)
1157			obj->dh_sigstate = FALSE;
1158		mtx_unlock(&ntoskrnl_dispatchlock);
1159		return (STATUS_SUCCESS);
1160	}
1161
1162	w.wb_object = obj;
1163#ifdef __FreeBSD__
1164	w.wb_kthread = td;
1165#endif
1166
1167	INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
1168
1169	/*
1170	 * The timeout value is specified in 100 nanosecond units
1171	 * and can be a positive or negative number. If it's positive,
1172	 * then the duetime is absolute, and we need to convert it
1173	 * to an absolute offset relative to now in order to use it.
1174	 * If it's negative, then the duetime is relative and we
1175	 * just have to convert the units.
1176	 */
1177
1178	if (duetime != NULL) {
1179		if (*duetime < 0) {
1180			tv.tv_sec = - (*duetime) / 10000000;
1181			tv.tv_usec = (- (*duetime) / 10) -
1182			    (tv.tv_sec * 1000000);
1183		} else {
1184			ntoskrnl_time(&curtime);
1185			if (*duetime < curtime)
1186				tv.tv_sec = tv.tv_usec = 0;
1187			else {
1188				tv.tv_sec = ((*duetime) - curtime) / 10000000;
1189				tv.tv_usec = ((*duetime) - curtime) / 10 -
1190				    (tv.tv_sec * 1000000);
1191			}
1192		}
1193	}
1194
1195	error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1196	    duetime == NULL ? 0 : tvtohz(&tv));
1197
1198	/* We timed out. Leave the object alone and return status. */
1199
1200	if (error == EWOULDBLOCK) {
1201		REMOVE_LIST_ENTRY((&w.wb_waitlist));
1202		mtx_unlock(&ntoskrnl_dispatchlock);
1203		return(STATUS_TIMEOUT);
1204	}
1205
1206	/*
1207	 * Mutexes are always synchronization objects, which means
1208         * if several threads are waiting to acquire it, only one will
1209         * be woken up. If that one is us, and the mutex is up for grabs,
1210         * grab it.
1211	 */
1212
1213	if (obj->dh_size == OTYPE_MUTEX) {
1214		km = (kmutant *)obj;
1215		if (km->km_ownerthread == NULL) {
1216#ifdef __FreeBSD__
1217			km->km_ownerthread = curthread->td_proc;
1218#else
1219			km->km_ownerthread = curproc;
1220#endif
1221			km->km_acquirecnt++;
1222		}
1223	}
1224
1225	if (obj->dh_type == EVENT_TYPE_SYNC)
1226		obj->dh_sigstate = FALSE;
1227	REMOVE_LIST_ENTRY((&w.wb_waitlist));
1228
1229	mtx_unlock(&ntoskrnl_dispatchlock);
1230
1231	return(STATUS_SUCCESS);
1232}
1233
1234__stdcall static uint32_t
1235KeWaitForMultipleObjects(
1236	uint32_t		cnt,
1237	nt_dispatch_header	*obj[],
1238	uint32_t		wtype,
1239	uint32_t		reason,
1240	uint32_t		mode,
1241	uint8_t			alertable,
1242	int64_t			*duetime,
1243	wait_block		*wb_array)
1244{
1245#ifdef __FreeBSD__
1246	struct thread		*td = curthread;
1247#endif
1248	kmutant			*km;
1249	wait_block		_wb_array[THREAD_WAIT_OBJECTS];
1250	wait_block		*w;
1251	struct timeval		tv;
1252	int			i, wcnt = 0, widx = 0, error = 0;
1253	uint64_t		curtime;
1254	struct timespec		t1, t2;
1255#ifdef __NetBSD__
1256    	struct timeval      	tv1,tv2;
1257#endif
1258
1259	if (cnt > MAX_WAIT_OBJECTS)
1260		return(STATUS_INVALID_PARAMETER);
1261	if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1262		return(STATUS_INVALID_PARAMETER);
1263
1264	mtx_lock(&ntoskrnl_dispatchlock);
1265	if (wb_array == NULL)
1266		w = &_wb_array[0];
1267	else
1268		w = wb_array;
1269
1270	/* First pass: see if we can satisfy any waits immediately. */
1271
1272	for (i = 0; i < cnt; i++) {
1273		if (obj[i]->dh_size == OTYPE_MUTEX) {
1274			km = (kmutant *)obj[i];
1275			if (km->km_ownerthread == NULL ||
1276#ifdef __FreeBSD__
1277			    km->km_ownerthread == curthread->td_proc) {
1278#else
1279			    km->km_ownerthread == curproc) {
1280#endif
1281				obj[i]->dh_sigstate = FALSE;
1282				km->km_acquirecnt++;
1283#ifdef __FreeBSD__
1284				km->km_ownerthread = curthread->td_proc;
1285#else
1286				km->km_ownerthread = curproc;
1287#endif
1288				if (wtype == WAITTYPE_ANY) {
1289					mtx_unlock(&ntoskrnl_dispatchlock);
1290					return (STATUS_WAIT_0 + i);
1291				}
1292			}
1293		} else if (obj[i]->dh_sigstate == TRUE) {
1294			if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1295				obj[i]->dh_sigstate = FALSE;
1296			if (wtype == WAITTYPE_ANY) {
1297				mtx_unlock(&ntoskrnl_dispatchlock);
1298				return (STATUS_WAIT_0 + i);
1299			}
1300		}
1301	}
1302
1303	/*
1304	 * Second pass: set up wait for anything we can't
1305	 * satisfy immediately.
1306	 */
1307
1308	for (i = 0; i < cnt; i++) {
1309		if (obj[i]->dh_sigstate == TRUE)
1310			continue;
1311		INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
1312		    (&w[i].wb_waitlist));
1313#ifdef __FreeBSD__
1314		w[i].wb_kthread = td;
1315#endif
1316		w[i].wb_object = obj[i];
1317		wcnt++;
1318	}
1319
1320	if (duetime != NULL) {
1321		if (*duetime < 0) {
1322			tv.tv_sec = - (*duetime) / 10000000;
1323			tv.tv_usec = (- (*duetime) / 10) -
1324			    (tv.tv_sec * 1000000);
1325		} else {
1326			ntoskrnl_time(&curtime);
1327			if (*duetime < curtime)
1328				tv.tv_sec = tv.tv_usec = 0;
1329			else {
1330				tv.tv_sec = ((*duetime) - curtime) / 10000000;
1331				tv.tv_usec = ((*duetime) - curtime) / 10 -
1332				    (tv.tv_sec * 1000000);
1333			}
1334		}
1335	}
1336
1337	while (wcnt) {
1338#ifdef __FreeBSD__
1339        nanotime(&t1);
1340#else
1341        microtime(&tv1);
1342        TIMEVAL_TO_TIMESPEC(&tv1,&t1);
1343#endif
1344
1345	error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1346	    duetime == NULL ? 0 : tvtohz(&tv));
1347
1348#ifdef __FreeBSD__
1349        nanotime(&t2);
1350#else
1351        microtime(&tv2);
1352        TIMEVAL_TO_TIMESPEC(&tv2,&t2);
1353#endif
1354
1355		for (i = 0; i < cnt; i++) {
1356			if (obj[i]->dh_size == OTYPE_MUTEX) {
1357				km = (kmutant *)obj;
1358				if (km->km_ownerthread == NULL) {
1359					km->km_ownerthread =
1360#ifdef __FreeBSD__
1361					    curthread->td_proc;
1362#else
1363					    curproc;
1364#endif
1365					km->km_acquirecnt++;
1366				}
1367			}
1368			if (obj[i]->dh_sigstate == TRUE) {
1369				widx = i;
1370				if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1371					obj[i]->dh_sigstate = FALSE;
1372				REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1373				wcnt--;
1374			}
1375		}
1376
1377		if (error || wtype == WAITTYPE_ANY)
1378			break;
1379
1380		if (duetime != NULL) {
1381			tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1382			tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1383		}
1384	}
1385
1386	if (wcnt) {
1387		for (i = 0; i < cnt; i++)
1388			REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1389	}
1390
1391	if (error == EWOULDBLOCK) {
1392		mtx_unlock(&ntoskrnl_dispatchlock);
1393		return(STATUS_TIMEOUT);
1394	}
1395
1396	if (wtype == WAITTYPE_ANY && wcnt) {
1397		mtx_unlock(&ntoskrnl_dispatchlock);
1398		return(STATUS_WAIT_0 + widx);
1399	}
1400	mtx_unlock(&ntoskrnl_dispatchlock);
1401
1402	return(STATUS_SUCCESS);
1403}
1404
1405__stdcall static void
1406WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1407{
1408	bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1409	return;
1410}
1411
1412__stdcall static uint16_t
1413READ_REGISTER_USHORT(uint16_t *reg)
1414{
1415	return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1416}
1417
1418__stdcall static void
1419WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1420{
1421	bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1422	return;
1423}
1424
1425__stdcall static uint32_t
1426READ_REGISTER_ULONG(uint32_t *reg)
1427{
1428	return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1429}
1430
1431__stdcall static uint8_t
1432READ_REGISTER_UCHAR(uint8_t *reg)
1433{
1434	return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1435}
1436
1437__stdcall static void
1438WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1439{
1440	bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1441	return;
1442}
1443
1444__stdcall static int64_t
1445_allmul(int64_t a, int64_t b)
1446{
1447	return (a * b);
1448}
1449
1450__stdcall static int64_t
1451_alldiv(int64_t a, int64_t b)
1452{
1453	return (a / b);
1454}
1455
1456__stdcall static int64_t
1457_allrem(int64_t a, int64_t b)
1458{
1459	return (a % b);
1460}
1461
1462__stdcall static uint64_t
1463_aullmul(uint64_t a, uint64_t b)
1464{
1465	return (a * b);
1466}
1467
1468__stdcall static uint64_t
1469_aulldiv(uint64_t a, uint64_t b)
1470{
1471	return (a / b);
1472}
1473
1474__stdcall static uint64_t
1475_aullrem(uint64_t a, uint64_t b)
1476{
1477	return (a % b);
1478}
1479
1480__regparm static int64_t
1481_allshl(int64_t a, uint8_t b)
1482{
1483	return (a << b);
1484}
1485
1486__regparm static uint64_t
1487_aullshl(uint64_t a, uint8_t b)
1488{
1489	return (a << b);
1490}
1491
1492__regparm static int64_t
1493_allshr(int64_t a, uint8_t b)
1494{
1495	return (a >> b);
1496}
1497
1498__regparm static uint64_t
1499_aullshr(uint64_t a, uint8_t b)
1500{
1501	return (a >> b);
1502}
1503
1504static slist_entry *
1505ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
1506{
1507	slist_entry		*oldhead;
1508
1509	oldhead = head->slh_list.slh_next;
1510	entry->sl_next = head->slh_list.slh_next;
1511	head->slh_list.slh_next = entry;
1512	head->slh_list.slh_depth++;
1513	head->slh_list.slh_seq++;
1514
1515	return(oldhead);
1516}
1517
1518static slist_entry *
1519ntoskrnl_popsl(slist_header *head)
1520{
1521	slist_entry		*first;
1522
1523	first = head->slh_list.slh_next;
1524	if (first != NULL) {
1525		head->slh_list.slh_next = first->sl_next;
1526		head->slh_list.slh_depth--;
1527		head->slh_list.slh_seq++;
1528	}
1529
1530	return(first);
1531}
1532
1533/*
1534 * We need this to make lookaside lists work for amd64.
1535 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1536 * list structure. For amd64 to work right, this has to be a
1537 * pointer to the wrapped version of the routine, not the
1538 * original. Letting the Windows driver invoke the original
1539 * function directly will result in a convention calling
1540 * mismatch and a pretty crash. On x86, this effectively
1541 * becomes a no-op since ipt_func and ipt_wrap are the same.
1542 */
1543
1544static funcptr
1545ntoskrnl_findwrap(funcptr func)
1546{
1547	image_patch_table	*patch;
1548
1549	patch = ntoskrnl_functbl;
1550	while (patch->ipt_func != NULL) {
1551		if ((funcptr)patch->ipt_func == func)
1552			return((funcptr)patch->ipt_wrap);
1553		patch++;
1554	}
1555
1556	return(NULL);
1557}
1558
1559__stdcall static void
1560ExInitializePagedLookasideList(
1561	paged_lookaside_list	*lookaside,
1562	lookaside_alloc_func	*allocfunc,
1563	lookaside_free_func	*freefunc,
1564	uint32_t		flags,
1565	size_t			size,
1566	uint32_t		tag,
1567	uint16_t		depth)
1568{
1569	memset((char *)lookaside, 0, sizeof(paged_lookaside_list));
1570
1571	if (size < sizeof(slist_entry))
1572		lookaside->nll_l.gl_size = sizeof(slist_entry);
1573	else
1574		lookaside->nll_l.gl_size = size;
1575	lookaside->nll_l.gl_tag = tag;
1576	if (allocfunc == NULL)
1577		lookaside->nll_l.gl_allocfunc =
1578		    ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1579	else
1580		lookaside->nll_l.gl_allocfunc = allocfunc;
1581
1582	if (freefunc == NULL)
1583		lookaside->nll_l.gl_freefunc =
1584		    ntoskrnl_findwrap((funcptr)ExFreePool);
1585	else
1586		lookaside->nll_l.gl_freefunc = freefunc;
1587
1588#ifdef __i386__
1589	KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1590#endif
1591
1592	lookaside->nll_l.gl_type = NonPagedPool;
1593	lookaside->nll_l.gl_depth = depth;
1594	lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1595
1596	return;
1597}
1598
1599__stdcall static void
1600ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
1601{
1602	void			*buf;
1603	__stdcall void		(*freefunc)(void *);
1604
1605	freefunc = lookaside->nll_l.gl_freefunc;
1606	while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1607		MSCALL1(freefunc, buf);
1608
1609	return;
1610}
1611
1612__stdcall static void
1613ExInitializeNPagedLookasideList(
1614	npaged_lookaside_list	*lookaside,
1615	lookaside_alloc_func	*allocfunc,
1616	lookaside_free_func	*freefunc,
1617	uint32_t		flags,
1618	size_t			size,
1619	uint32_t		tag,
1620	uint16_t		depth)
1621{
1622	memset((char *)lookaside, 0, sizeof(npaged_lookaside_list));
1623
1624	if (size < sizeof(slist_entry))
1625		lookaside->nll_l.gl_size = sizeof(slist_entry);
1626	else
1627		lookaside->nll_l.gl_size = size;
1628	lookaside->nll_l.gl_tag = tag;
1629	if (allocfunc == NULL)
1630		lookaside->nll_l.gl_allocfunc =
1631		    ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1632	else
1633		lookaside->nll_l.gl_allocfunc = allocfunc;
1634
1635	if (freefunc == NULL)
1636		lookaside->nll_l.gl_freefunc =
1637		    ntoskrnl_findwrap((funcptr)ExFreePool);
1638	else
1639		lookaside->nll_l.gl_freefunc = freefunc;
1640
1641#ifdef __i386__
1642	KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1643#endif
1644
1645	lookaside->nll_l.gl_type = NonPagedPool;
1646	lookaside->nll_l.gl_depth = depth;
1647	lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1648
1649	return;
1650}
1651
1652__stdcall static void
1653ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
1654{
1655	void			*buf;
1656	__stdcall void		(*freefunc)(void *);
1657
1658	freefunc = lookaside->nll_l.gl_freefunc;
1659	while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1660		MSCALL1(freefunc, buf);
1661
1662	return;
1663}
1664
1665/*
1666 * Note: the interlocked slist push and pop routines are
1667 * declared to be _fastcall in Windows. gcc 3.4 is supposed
1668 * to have support for this calling convention, however we
1669 * don't have that version available yet, so we kludge things
1670 * up using __regparm__(3) and some argument shuffling.
1671 */
1672
1673__fastcall static slist_entry *
1674InterlockedPushEntrySList(REGARGS2(slist_header *head, slist_entry *entry))
1675{
1676	slist_entry		*oldhead;
1677
1678	oldhead = (slist_entry *)FASTCALL3(ExInterlockedPushEntrySList,
1679	    head, entry, &ntoskrnl_global);
1680
1681	return(oldhead);
1682}
1683
1684__fastcall static slist_entry *
1685InterlockedPopEntrySList(REGARGS1(slist_header *head))
1686{
1687	slist_entry		*first;
1688
1689	first = (slist_entry *)FASTCALL2(ExInterlockedPopEntrySList,
1690	    head, &ntoskrnl_global);
1691
1692	return(first);
1693}
1694
1695__fastcall static slist_entry *
1696ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
1697	slist_entry *entry), kspin_lock *lock)
1698{
1699	slist_entry		*oldhead;
1700	uint8_t			irql;
1701
1702	KeAcquireSpinLock(lock, &irql);
1703	oldhead = ntoskrnl_pushsl(head, entry);
1704	KeReleaseSpinLock(lock, irql);
1705
1706	return(oldhead);
1707}
1708
1709__fastcall static slist_entry *
1710ExInterlockedPopEntrySList(REGARGS2(slist_header *head, kspin_lock *lock))
1711{
1712	slist_entry		*first;
1713	uint8_t			irql;
1714
1715	KeAcquireSpinLock(lock, &irql);
1716	first = ntoskrnl_popsl(head);
1717	KeReleaseSpinLock(lock, irql);
1718
1719	return(first);
1720}
1721
1722__stdcall static uint16_t
1723ExQueryDepthSList(slist_header *head)
1724{
1725	uint16_t		depth;
1726	uint8_t			irql;
1727
1728	KeAcquireSpinLock(&ntoskrnl_global, &irql);
1729	depth = head->slh_list.slh_depth;
1730	KeReleaseSpinLock(&ntoskrnl_global, irql);
1731
1732	return(depth);
1733}
1734
1735/*
1736 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1737 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1738 * to splnet()/splx() in their use. We can't create a new mutex
1739 * lock here because there is no complimentary KeFreeSpinLock()
1740 * function. Instead, we grab a mutex from the mutex pool.
1741 */
1742__stdcall void
1743KeInitializeSpinLock(kspin_lock *lock)
1744{
1745
1746	__cpu_simple_lock_init((__cpu_simple_lock_t *)lock);
1747}
1748
1749#ifdef __i386__
1750__fastcall void
1751KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock *lock))
1752{
1753
1754	__cpu_simple_lock((__cpu_simple_lock_t *)lock);
1755}
1756
1757__fastcall void
1758KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock *lock))
1759{
1760
1761	__cpu_simple_unlock((__cpu_simple_lock_t *)lock);
1762}
1763
1764__stdcall uint8_t
1765KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
1766{
1767        uint8_t                 oldirql;
1768
1769        if (KeGetCurrentIrql() > DISPATCH_LEVEL)
1770                panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1771
1772        oldirql = KeRaiseIrql(DISPATCH_LEVEL);
1773        KeAcquireSpinLockAtDpcLevel(lock);
1774
1775        return(oldirql);
1776}
1777#else
1778__stdcall void
1779KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
1780{
1781	while (atomic_swap_uint((volatile u_int *)lock, 1) == 1)
1782		/* sit and spin */;
1783
1784	return;
1785}
1786
1787__stdcall void
1788KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
1789{
1790	*(volatile u_int *)lock = 0;
1791
1792	return;
1793}
1794#endif /* __i386__ */
1795
1796__fastcall uintptr_t
1797InterlockedExchange(REGARGS2(volatile uint32_t *dst, uintptr_t val))
1798{
1799	uint8_t			irql;
1800	uintptr_t		r;
1801
1802	KeAcquireSpinLock(&ntoskrnl_global, &irql);
1803	r = *dst;
1804	*dst = val;
1805	KeReleaseSpinLock(&ntoskrnl_global, irql);
1806
1807	return(r);
1808}
1809
1810__fastcall static uint32_t
1811InterlockedIncrement(REGARGS1(volatile uint32_t *addend))
1812{
1813	atomic_inc_32(addend);
1814	return(*addend);
1815}
1816
1817__fastcall static uint32_t
1818InterlockedDecrement(REGARGS1(volatile uint32_t *addend))
1819{
1820	atomic_dec_32(addend);
1821	return(*addend);
1822}
1823
1824__fastcall static void
1825ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t inc))
1826{
1827	uint8_t			irql;
1828
1829	KeAcquireSpinLock(&ntoskrnl_global, &irql);
1830	*addend += inc;
1831	KeReleaseSpinLock(&ntoskrnl_global, irql);
1832
1833	return;
1834};
1835
1836__stdcall mdl *
1837IoAllocateMdl(
1838	void			*vaddr,
1839	uint32_t		len,
1840	uint8_t			secondarybuf,
1841	uint8_t			chargequota,
1842	irp			*iopkt)
1843{
1844	mdl			*m;
1845	int			zone = 0;
1846
1847	if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
1848		m = ExAllocatePoolWithTag(NonPagedPool,
1849		    MmSizeOfMdl(vaddr, len), 0);
1850	else {
1851#ifdef __FreeBSD__
1852		m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
1853#else
1854		m = pool_get(&mdl_pool, PR_WAITOK);
1855#endif
1856		zone++;
1857	}
1858
1859	if (m == NULL)
1860		return (NULL);
1861
1862	MmInitializeMdl(m, vaddr, len);
1863
1864	/*
1865	 * MmInitializMdl() clears the flags field, so we
1866	 * have to set this here. If the MDL came from the
1867	 * MDL UMA zone, tag it so we can release it to
1868	 * the right place later.
1869	 */
1870	if (zone)
1871		m->mdl_flags = MDL_ZONE_ALLOCED;
1872
1873	if (iopkt != NULL) {
1874		if (secondarybuf == TRUE) {
1875			mdl			*last;
1876			last = iopkt->irp_mdl;
1877			while (last->mdl_next != NULL)
1878				last = last->mdl_next;
1879			last->mdl_next = m;
1880		} else {
1881			if (iopkt->irp_mdl != NULL)
1882				panic("leaking an MDL in IoAllocateMdl()");
1883			iopkt->irp_mdl = m;
1884		}
1885	}
1886
1887	return (m);
1888}
1889
1890__stdcall void
1891IoFreeMdl(mdl *m)
1892{
1893	if (m == NULL)
1894		return;
1895
1896	if (m->mdl_flags & MDL_ZONE_ALLOCED)
1897#ifdef __FreeBSD__
1898		uma_zfree(mdl_zone, m);
1899#else
1900		pool_put(&mdl_pool, m);
1901#endif
1902	else
1903		ExFreePool(m);
1904
1905        return;
1906}
1907
1908__stdcall static uint32_t
1909MmSizeOfMdl(void *vaddr, size_t len)
1910{
1911	uint32_t		l;
1912
1913        l = sizeof(struct mdl) +
1914	    (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
1915
1916	return(l);
1917}
1918
1919/*
1920 * The Microsoft documentation says this routine fills in the
1921 * page array of an MDL with the _physical_ page addresses that
1922 * comprise the buffer, but we don't really want to do that here.
1923 * Instead, we just fill in the page array with the kernel virtual
1924 * addresses of the buffers.
1925 */
1926__stdcall static void
1927MmBuildMdlForNonPagedPool(mdl *m)
1928{
1929	vm_offset_t		*mdl_pages;
1930	int			pagecnt, i;
1931
1932	pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
1933
1934	if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
1935		panic("not enough pages in MDL to describe buffer");
1936
1937	mdl_pages = MmGetMdlPfnArray(m);
1938
1939	for (i = 0; i < pagecnt; i++)
1940		*mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
1941
1942	m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
1943	m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
1944
1945	return;
1946}
1947
1948__stdcall static void *
1949MmMapLockedPages(
1950	mdl			*buf,
1951	uint8_t			accessmode)
1952{
1953	buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
1954	return(MmGetMdlVirtualAddress(buf));
1955}
1956
1957__stdcall static void *
1958MmMapLockedPagesSpecifyCache(
1959	mdl			*buf,
1960	uint8_t			accessmode,
1961	uint32_t		cachetype,
1962	void			*vaddr,
1963	uint32_t		bugcheck,
1964	uint32_t		prio)
1965{
1966	return(MmMapLockedPages(buf, accessmode));
1967}
1968
1969__stdcall static void
1970MmUnmapLockedPages(
1971	void			*vaddr,
1972	mdl			*buf)
1973{
1974	buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
1975	return;
1976}
1977
1978__stdcall static size_t
1979RtlCompareMemory(const void *s1, const void *s2, size_t len)
1980{
1981	size_t			i, total = 0;
1982	uint8_t			*m1, *m2;
1983
1984	m1 = __DECONST(char *, s1);
1985	m2 = __DECONST(char *, s2);
1986
1987	for (i = 0; i < len; i++) {
1988		if (m1[i] == m2[i])
1989			total++;
1990	}
1991	return(total);
1992}
1993
1994__stdcall static void
1995RtlInitAnsiString(ndis_ansi_string *dst, char *src)
1996{
1997	ndis_ansi_string	*a;
1998
1999	a = dst;
2000	if (a == NULL)
2001		return;
2002	if (src == NULL) {
2003		a->nas_len = a->nas_maxlen = 0;
2004		a->nas_buf = NULL;
2005	} else {
2006		a->nas_buf = src;
2007		a->nas_len = a->nas_maxlen = strlen(src);
2008	}
2009
2010	return;
2011}
2012
2013__stdcall static void
2014RtlInitUnicodeString(ndis_unicode_string *dst, uint16_t *src)
2015{
2016	ndis_unicode_string	*u;
2017	int			i;
2018
2019	u = dst;
2020	if (u == NULL)
2021		return;
2022	if (src == NULL) {
2023		u->us_len = u->us_maxlen = 0;
2024		u->us_buf = NULL;
2025	} else {
2026		i = 0;
2027		while(src[i] != 0)
2028			i++;
2029		u->us_buf = src;
2030		u->us_len = u->us_maxlen = i * 2;
2031	}
2032
2033	return;
2034}
2035
2036__stdcall ndis_status
2037RtlUnicodeStringToInteger(ndis_unicode_string *ustr, uint32_t base, uint32_t *val)
2038{
2039	uint16_t		*uchr;
2040	int			len, neg = 0;
2041	char			abuf[64];
2042	char			*astr;
2043
2044	uchr = ustr->us_buf;
2045	len = ustr->us_len;
2046	memset(abuf, 0, sizeof(abuf));
2047
2048	if ((char)((*uchr) & 0xFF) == '-') {
2049		neg = 1;
2050		uchr++;
2051		len -= 2;
2052	} else if ((char)((*uchr) & 0xFF) == '+') {
2053		neg = 0;
2054		uchr++;
2055		len -= 2;
2056	}
2057
2058	if (base == 0) {
2059		if ((char)((*uchr) & 0xFF) == 'b') {
2060			base = 2;
2061			uchr++;
2062			len -= 2;
2063		} else if ((char)((*uchr) & 0xFF) == 'o') {
2064			base = 8;
2065			uchr++;
2066			len -= 2;
2067		} else if ((char)((*uchr) & 0xFF) == 'x') {
2068			base = 16;
2069			uchr++;
2070			len -= 2;
2071		} else
2072			base = 10;
2073	}
2074
2075	astr = abuf;
2076	if (neg) {
2077		strcpy(astr, "-");
2078		astr++;
2079	}
2080
2081	ndis_unicode_to_ascii(uchr, len, &astr);
2082	*val = strtoul(abuf, NULL, base);
2083
2084	return(NDIS_STATUS_SUCCESS);
2085}
2086
2087__stdcall static void
2088RtlFreeUnicodeString(ndis_unicode_string *ustr)
2089{
2090	if (ustr->us_buf == NULL)
2091		return;
2092	free(ustr->us_buf, M_DEVBUF);
2093	ustr->us_buf = NULL;
2094	return;
2095}
2096
2097__stdcall static void
2098RtlFreeAnsiString(ndis_ansi_string *astr)
2099{
2100	if (astr->nas_buf == NULL)
2101		return;
2102	free(astr->nas_buf, M_DEVBUF);
2103	astr->nas_buf = NULL;
2104	return;
2105}
2106
2107static int
2108atoi(const char *str)
2109{
2110#ifdef __FreeBSD__
2111	return (int)strtol(str, NULL, 10);
2112#else
2113    int n;
2114
2115    for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2116        n = n * 10 + *str - '0';
2117    return n;
2118#endif
2119
2120}
2121
2122static long
2123atol(const char *str)
2124{
2125#ifdef __FreeBSD__
2126	return strtol(str, NULL, 10);
2127#else
2128     long n;
2129
2130     for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2131        n = n * 10 + *str - '0';
2132    return n;
2133#endif
2134
2135}
2136
2137
2138/*
2139 * stolen from ./netipsec/key.c
2140 */
2141
2142#ifdef __NetBSD__
2143void srandom(int);
2144void srandom(int arg) {return;}
2145#endif
2146
2147
2148static int
2149rand(void)
2150{
2151	struct timeval		tv;
2152
2153	microtime(&tv);
2154	srandom(tv.tv_usec);
2155	return((int)random());
2156}
2157
2158static void
2159srand(unsigned int seed)
2160{
2161	srandom(seed);
2162	return;
2163}
2164
2165__stdcall static uint8_t
2166IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
2167{
2168	if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
2169		return(TRUE);
2170	return(FALSE);
2171}
2172
2173__stdcall static ndis_status
2174IoGetDeviceProperty(
2175	device_object		*devobj,
2176	uint32_t		regprop,
2177	uint32_t		buflen,
2178	void			*prop,
2179	uint32_t		*reslen)
2180{
2181	driver_object		*drv;
2182	uint16_t		**name;
2183
2184	drv = devobj->do_drvobj;
2185
2186	switch (regprop) {
2187	case DEVPROP_DRIVER_KEYNAME:
2188		name = prop;
2189		*name = drv->dro_drivername.us_buf;
2190		*reslen = drv->dro_drivername.us_len;
2191		break;
2192	default:
2193		return(STATUS_INVALID_PARAMETER_2);
2194		break;
2195	}
2196
2197	return(STATUS_SUCCESS);
2198}
2199
2200__stdcall static void
2201KeInitializeMutex(
2202	kmutant			*kmutex,
2203	uint32_t		level)
2204{
2205	INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
2206	kmutex->km_abandoned = FALSE;
2207	kmutex->km_apcdisable = 1;
2208	kmutex->km_header.dh_sigstate = TRUE;
2209	kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
2210	kmutex->km_header.dh_size = OTYPE_MUTEX;
2211	kmutex->km_acquirecnt = 0;
2212	kmutex->km_ownerthread = NULL;
2213	return;
2214}
2215
2216__stdcall static uint32_t
2217KeReleaseMutex(
2218	kmutant			*kmutex,
2219	uint8_t			kwait)
2220{
2221
2222	mtx_lock(&ntoskrnl_dispatchlock);
2223
2224#ifdef __FreeBSD__
2225	if (kmutex->km_ownerthread != curthread->td_proc) {
2226#else
2227	if (kmutex->km_ownerthread != curproc) {
2228#endif
2229		mtx_unlock(&ntoskrnl_dispatchlock);
2230		return(STATUS_MUTANT_NOT_OWNED);
2231	}
2232	kmutex->km_acquirecnt--;
2233	if (kmutex->km_acquirecnt == 0) {
2234		kmutex->km_ownerthread = NULL;
2235		ntoskrnl_wakeup(&kmutex->km_header);
2236	}
2237	mtx_unlock(&ntoskrnl_dispatchlock);
2238
2239	return(kmutex->km_acquirecnt);
2240}
2241
2242__stdcall static uint32_t
2243KeReadStateMutex(kmutant *kmutex)
2244{
2245	return(kmutex->km_header.dh_sigstate);
2246}
2247
2248__stdcall void
2249KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
2250{
2251	INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
2252	kevent->k_header.dh_sigstate = state;
2253	kevent->k_header.dh_type = type;
2254	kevent->k_header.dh_size = OTYPE_EVENT;
2255	return;
2256}
2257
2258__stdcall uint32_t
2259KeResetEvent(nt_kevent *kevent)
2260{
2261	uint32_t		prevstate;
2262
2263	mtx_lock(&ntoskrnl_dispatchlock);
2264	prevstate = kevent->k_header.dh_sigstate;
2265	kevent->k_header.dh_sigstate = FALSE;
2266	mtx_unlock(&ntoskrnl_dispatchlock);
2267
2268	return(prevstate);
2269}
2270
2271__stdcall uint32_t
2272KeSetEvent(
2273	nt_kevent		*kevent,
2274	uint32_t		increment,
2275	uint8_t			kwait)
2276{
2277	uint32_t		prevstate;
2278
2279	mtx_lock(&ntoskrnl_dispatchlock);
2280	prevstate = kevent->k_header.dh_sigstate;
2281	ntoskrnl_wakeup(&kevent->k_header);
2282	mtx_unlock(&ntoskrnl_dispatchlock);
2283
2284	return(prevstate);
2285}
2286
2287__stdcall void
2288KeClearEvent(nt_kevent *kevent)
2289{
2290	kevent->k_header.dh_sigstate = FALSE;
2291	return;
2292}
2293
2294__stdcall uint32_t
2295KeReadStateEvent(nt_kevent *kevent)
2296{
2297	return(kevent->k_header.dh_sigstate);
2298}
2299
2300__stdcall static ndis_status
2301ObReferenceObjectByHandle(
2302	ndis_handle		handle,
2303	uint32_t		reqaccess,
2304	void			*otype,
2305	uint8_t			accessmode,
2306	void			**object,
2307	void			**handleinfo)
2308{
2309	nt_objref		*nr;
2310
2311	nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
2312	if (nr == NULL)
2313		return(NDIS_STATUS_FAILURE);
2314
2315	INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
2316	nr->no_obj = handle;
2317	nr->no_dh.dh_size = OTYPE_THREAD;
2318	TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
2319	*object = nr;
2320
2321	return(NDIS_STATUS_SUCCESS);
2322}
2323
2324__fastcall static void
2325ObfDereferenceObject(REGARGS1(void *object))
2326{
2327	nt_objref		*nr;
2328
2329	nr = object;
2330	TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
2331	free(nr, M_DEVBUF);
2332
2333	return;
2334}
2335
2336__stdcall static uint32_t
2337ZwClose(ndis_handle handle)
2338{
2339	return(STATUS_SUCCESS);
2340}
2341
2342/*
2343 * This is here just in case the thread returns without calling
2344 * PsTerminateSystemThread().
2345 */
2346static void
2347ntoskrnl_thrfunc(void *arg)
2348{
2349	thread_context		*thrctx;
2350	__stdcall uint32_t (*tfunc)(void *);
2351	void			*tctx;
2352	uint32_t		rval;
2353
2354	thrctx = arg;
2355	tfunc = thrctx->tc_thrfunc;
2356	tctx = thrctx->tc_thrctx;
2357	free(thrctx, M_TEMP);
2358
2359	rval = MSCALL1(tfunc, tctx);
2360
2361	PsTerminateSystemThread(rval);
2362	return; /* notreached */
2363}
2364
2365__stdcall static ndis_status
2366PsCreateSystemThread(
2367	ndis_handle		*handle,
2368	uint32_t		reqaccess,
2369	void			*objattrs,
2370	ndis_handle		phandle,
2371	void			*clientid,
2372	void			*thrfunc,
2373	void			*thrctx)
2374{
2375	int			error;
2376	char			tname[128];
2377	thread_context		*tc;
2378	struct proc		*p;
2379
2380	tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
2381	if (tc == NULL)
2382		return(NDIS_STATUS_FAILURE);
2383
2384	tc->tc_thrctx = thrctx;
2385	tc->tc_thrfunc = thrfunc;
2386
2387	sprintf(tname, "windows kthread %d", ntoskrnl_kth);
2388#ifdef __FreeBSD__
2389	error = kthread_create(ntoskrnl_thrfunc, tc, &p,
2390	    RFHIGHPID, NDIS_KSTACK_PAGES, tname);
2391#else
2392/* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2393	error = ndis_kthread_create(ntoskrnl_thrfunc, tc, &p, NULL, 0, tname);
2394#endif
2395	*handle = p;
2396
2397	ntoskrnl_kth++;
2398
2399	return(error);
2400}
2401
2402/*
2403 * In Windows, the exit of a thread is an event that you're allowed
2404 * to wait on, assuming you've obtained a reference to the thread using
2405 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2406 * simulate this behavior is to register each thread we create in a
2407 * reference list, and if someone holds a reference to us, we poke
2408 * them.
2409 */
2410__stdcall static ndis_status
2411PsTerminateSystemThread(ndis_status status)
2412{
2413	struct nt_objref	*nr;
2414
2415	mtx_lock(&ntoskrnl_dispatchlock);
2416	TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
2417#ifdef __FreeBSD__
2418		if (nr->no_obj != curthread->td_proc)
2419#else
2420		if (nr->no_obj != curproc)
2421#endif
2422			continue;
2423		ntoskrnl_wakeup(&nr->no_dh);
2424		break;
2425	}
2426	ntoskrnl_kth--;
2427	mtx_unlock(&ntoskrnl_dispatchlock);
2428
2429	kthread_exit(0);
2430	/* NOTREACHED */
2431}
2432
2433static uint32_t
2434DbgPrint(char *fmt, ...)
2435{
2436	//va_list			ap;
2437
2438	if (bootverbose) {
2439		//va_start(ap, fmt);
2440		//vprintf(fmt, ap);
2441	}
2442
2443	return(STATUS_SUCCESS);
2444}
2445
2446__stdcall static void
2447DbgBreakPoint(void)
2448{
2449#if defined(__FreeBSD__) && __FreeBSD_version < 502113
2450	Debugger("DbgBreakPoint(): breakpoint");
2451#elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
2452	kdb_enter("DbgBreakPoint(): breakpoint");
2453#else /* NetBSD case */
2454    ; /* TODO Search how to go into debugger without panic */
2455#endif
2456}
2457
2458static void
2459ntoskrnl_timercall(void *arg)
2460{
2461	ktimer			*timer;
2462	struct timeval		tv;
2463
2464	mtx_lock(&ntoskrnl_dispatchlock);
2465
2466	timer = arg;
2467
2468	timer->k_header.dh_inserted = FALSE;
2469
2470	/*
2471	 * If this is a periodic timer, re-arm it
2472	 * so it will fire again. We do this before
2473	 * calling any deferred procedure calls because
2474	 * it's possible the DPC might cancel the timer,
2475	 * in which case it would be wrong for us to
2476	 * re-arm it again afterwards.
2477	 */
2478
2479	if (timer->k_period) {
2480		tv.tv_sec = 0;
2481		tv.tv_usec = timer->k_period * 1000;
2482		timer->k_header.dh_inserted = TRUE;
2483#ifdef __FreeBSD__
2484		timer->k_handle = timeout(ntoskrnl_timercall,
2485		    timer, tvtohz(&tv));
2486#else /* __NetBSD__ */
2487		callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2488#endif /* __NetBSD__ */
2489	}
2490
2491	if (timer->k_dpc != NULL)
2492		KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
2493
2494	ntoskrnl_wakeup(&timer->k_header);
2495	mtx_unlock(&ntoskrnl_dispatchlock);
2496}
2497
2498__stdcall void
2499KeInitializeTimer(ktimer *timer)
2500{
2501	if (timer == NULL)
2502		return;
2503
2504	KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
2505
2506	return;
2507}
2508
2509__stdcall void
2510KeInitializeTimerEx(ktimer *timer, uint32_t type)
2511{
2512	if (timer == NULL)
2513		return;
2514
2515	INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
2516	timer->k_header.dh_sigstate = FALSE;
2517	timer->k_header.dh_inserted = FALSE;
2518	timer->k_header.dh_type = type;
2519	timer->k_header.dh_size = OTYPE_TIMER;
2520#ifdef __FreeBSD__
2521	callout_handle_init(&timer->k_handle);
2522#else
2523	callout_init(timer->k_handle, 0);
2524#endif
2525
2526	return;
2527}
2528
2529/*
2530 * This is a wrapper for Windows deferred procedure calls that
2531 * have been placed on an NDIS thread work queue. We need it
2532 * since the DPC could be a _stdcall function. Also, as far as
2533 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2534 */
2535static void
2536ntoskrnl_run_dpc(void *arg)
2537{
2538	__stdcall kdpc_func	dpcfunc;
2539	kdpc			*dpc;
2540	uint8_t			irql;
2541
2542	dpc = arg;
2543	dpcfunc = dpc->k_deferedfunc;
2544	irql = KeRaiseIrql(DISPATCH_LEVEL);
2545	MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
2546	    dpc->k_sysarg1, dpc->k_sysarg2);
2547	KeLowerIrql(irql);
2548
2549	return;
2550}
2551
2552__stdcall void
2553KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
2554{
2555
2556	if (dpc == NULL)
2557		return;
2558
2559	dpc->k_deferedfunc = dpcfunc;
2560	dpc->k_deferredctx = dpcctx;
2561
2562	return;
2563}
2564
2565__stdcall uint8_t
2566KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
2567{
2568	dpc->k_sysarg1 = sysarg1;
2569	dpc->k_sysarg2 = sysarg2;
2570
2571	if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2572		return(FALSE);
2573
2574	return(TRUE);
2575}
2576
2577__stdcall uint8_t
2578KeRemoveQueueDpc(kdpc *dpc)
2579{
2580	if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2581		return(FALSE);
2582
2583	return(TRUE);
2584}
2585
2586__stdcall uint8_t
2587KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
2588{
2589	struct timeval		tv;
2590	uint64_t		curtime;
2591	uint8_t			pending;
2592
2593	if (timer == NULL)
2594		return(FALSE);
2595
2596	mtx_lock(&ntoskrnl_dispatchlock);
2597	if (timer->k_header.dh_inserted == TRUE) {
2598#ifdef __FreeBSD__
2599		untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2600#else /* __NetBSD__ */
2601		callout_stop(timer->k_handle);
2602#endif
2603		timer->k_header.dh_inserted = FALSE;
2604		pending = TRUE;
2605	} else
2606		pending = FALSE;
2607
2608	timer->k_duetime = duetime;
2609	timer->k_period = period;
2610	timer->k_header.dh_sigstate = FALSE;
2611	timer->k_dpc = dpc;
2612
2613	if (duetime < 0) {
2614		tv.tv_sec = - (duetime) / 10000000;
2615		tv.tv_usec = (- (duetime) / 10) -
2616		    (tv.tv_sec * 1000000);
2617	} else {
2618		ntoskrnl_time(&curtime);
2619		if (duetime < curtime)
2620			tv.tv_sec = tv.tv_usec = 0;
2621		else {
2622			tv.tv_sec = ((duetime) - curtime) / 10000000;
2623			tv.tv_usec = ((duetime) - curtime) / 10 -
2624			    (tv.tv_sec * 1000000);
2625		}
2626	}
2627
2628	timer->k_header.dh_inserted = TRUE;
2629#ifdef __FreeBSD__
2630	timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv));
2631#else
2632	callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2633#endif
2634	mtx_unlock(&ntoskrnl_dispatchlock);
2635
2636	return(pending);
2637}
2638
2639__stdcall uint8_t
2640KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
2641{
2642	return (KeSetTimerEx(timer, duetime, 0, dpc));
2643}
2644
2645__stdcall uint8_t
2646KeCancelTimer(ktimer *timer)
2647{
2648	uint8_t			pending;
2649
2650	if (timer == NULL)
2651		return(FALSE);
2652
2653	mtx_lock(&ntoskrnl_dispatchlock);
2654
2655	if (timer->k_header.dh_inserted == TRUE) {
2656#ifdef __FreeBSD__
2657		untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2658#else /* __NetBSD__ */
2659		callout_stop(timer->k_handle);
2660#endif
2661		pending = TRUE;
2662	} else
2663		pending = KeRemoveQueueDpc(timer->k_dpc);
2664
2665	mtx_unlock(&ntoskrnl_dispatchlock);
2666
2667	return(pending);
2668}
2669
2670__stdcall uint8_t
2671KeReadStateTimer(ktimer *timer)
2672{
2673	return(timer->k_header.dh_sigstate);
2674}
2675
2676__stdcall static void
2677dummy(void)
2678{
2679	printf ("ntoskrnl dummy called...\n");
2680	return;
2681}
2682
2683
2684image_patch_table ntoskrnl_functbl[] = {
2685	IMPORT_FUNC(RtlCompareMemory),
2686	IMPORT_FUNC(RtlEqualUnicodeString),
2687	IMPORT_FUNC(RtlCopyUnicodeString),
2688	IMPORT_FUNC(RtlUnicodeStringToAnsiString),
2689	IMPORT_FUNC(RtlAnsiStringToUnicodeString),
2690	IMPORT_FUNC(RtlInitAnsiString),
2691	IMPORT_FUNC_MAP(RtlInitString, RtlInitAnsiString),
2692	IMPORT_FUNC(RtlInitUnicodeString),
2693	IMPORT_FUNC(RtlFreeAnsiString),
2694	IMPORT_FUNC(RtlFreeUnicodeString),
2695	IMPORT_FUNC(RtlUnicodeStringToInteger),
2696	IMPORT_FUNC(sprintf),
2697	IMPORT_FUNC(vsprintf),
2698	IMPORT_FUNC_MAP(_snprintf, snprintf),
2699	IMPORT_FUNC_MAP(_vsnprintf, vsnprintf),
2700	IMPORT_FUNC(DbgPrint),
2701	IMPORT_FUNC(DbgBreakPoint),
2702	IMPORT_FUNC(strncmp),
2703	IMPORT_FUNC(strcmp),
2704	IMPORT_FUNC(strncpy),
2705	IMPORT_FUNC(strcpy),
2706	IMPORT_FUNC(strlen),
2707	IMPORT_FUNC(memcpy),
2708	IMPORT_FUNC_MAP(memmove, ntoskrnl_memset),
2709	IMPORT_FUNC_MAP(memset, ntoskrnl_memset),
2710	IMPORT_FUNC(IoAllocateDriverObjectExtension),
2711	IMPORT_FUNC(IoGetDriverObjectExtension),
2712	IMPORT_FUNC(IofCallDriver),
2713	IMPORT_FUNC(IofCompleteRequest),
2714	IMPORT_FUNC(IoAcquireCancelSpinLock),
2715	IMPORT_FUNC(IoReleaseCancelSpinLock),
2716	IMPORT_FUNC(IoCancelIrp),
2717	IMPORT_FUNC(IoCreateDevice),
2718	IMPORT_FUNC(IoDeleteDevice),
2719	IMPORT_FUNC(IoGetAttachedDevice),
2720	IMPORT_FUNC(IoAttachDeviceToDeviceStack),
2721	IMPORT_FUNC(IoDetachDevice),
2722	IMPORT_FUNC(IoBuildSynchronousFsdRequest),
2723	IMPORT_FUNC(IoBuildAsynchronousFsdRequest),
2724	IMPORT_FUNC(IoBuildDeviceIoControlRequest),
2725	IMPORT_FUNC(IoAllocateIrp),
2726	IMPORT_FUNC(IoReuseIrp),
2727	IMPORT_FUNC(IoMakeAssociatedIrp),
2728	IMPORT_FUNC(IoFreeIrp),
2729	IMPORT_FUNC(IoInitializeIrp),
2730	IMPORT_FUNC(KeWaitForSingleObject),
2731	IMPORT_FUNC(KeWaitForMultipleObjects),
2732	IMPORT_FUNC(_allmul),
2733	IMPORT_FUNC(_alldiv),
2734	IMPORT_FUNC(_allrem),
2735	IMPORT_FUNC(_allshr),
2736	IMPORT_FUNC(_allshl),
2737	IMPORT_FUNC(_aullmul),
2738	IMPORT_FUNC(_aulldiv),
2739	IMPORT_FUNC(_aullrem),
2740	IMPORT_FUNC(_aullshr),
2741	IMPORT_FUNC(_aullshl),
2742	IMPORT_FUNC(atoi),
2743	IMPORT_FUNC(atol),
2744	IMPORT_FUNC(rand),
2745	IMPORT_FUNC(srand),
2746	IMPORT_FUNC(WRITE_REGISTER_USHORT),
2747	IMPORT_FUNC(READ_REGISTER_USHORT),
2748	IMPORT_FUNC(WRITE_REGISTER_ULONG),
2749	IMPORT_FUNC(READ_REGISTER_ULONG),
2750	IMPORT_FUNC(READ_REGISTER_UCHAR),
2751	IMPORT_FUNC(WRITE_REGISTER_UCHAR),
2752	IMPORT_FUNC(ExInitializePagedLookasideList),
2753	IMPORT_FUNC(ExDeletePagedLookasideList),
2754	IMPORT_FUNC(ExInitializeNPagedLookasideList),
2755	IMPORT_FUNC(ExDeleteNPagedLookasideList),
2756	IMPORT_FUNC(InterlockedPopEntrySList),
2757	IMPORT_FUNC(InterlockedPushEntrySList),
2758	IMPORT_FUNC(ExQueryDepthSList),
2759	IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList),
2760	IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList,
2761		InterlockedPushEntrySList),
2762	IMPORT_FUNC(ExInterlockedPopEntrySList),
2763	IMPORT_FUNC(ExInterlockedPushEntrySList),
2764	IMPORT_FUNC(ExAllocatePoolWithTag),
2765	IMPORT_FUNC(ExFreePool),
2766#ifdef __i386__
2767	IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel),
2768	IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel),
2769	IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc),
2770#else
2771	/*
2772	 * For AMD64, we can get away with just mapping
2773	 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
2774	 * because the calling conventions end up being the same.
2775	 * On i386, we have to be careful because KfAcquireSpinLock()
2776	 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
2777	 */
2778	IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel),
2779	IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel),
2780	IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock),
2781#endif
2782	IMPORT_FUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock),
2783	IMPORT_FUNC(InterlockedIncrement),
2784	IMPORT_FUNC(InterlockedDecrement),
2785	IMPORT_FUNC(ExInterlockedAddLargeStatistic),
2786	IMPORT_FUNC(IoAllocateMdl),
2787	IMPORT_FUNC(IoFreeMdl),
2788	IMPORT_FUNC(MmSizeOfMdl),
2789	IMPORT_FUNC(MmMapLockedPages),
2790	IMPORT_FUNC(MmMapLockedPagesSpecifyCache),
2791	IMPORT_FUNC(MmUnmapLockedPages),
2792	IMPORT_FUNC(MmBuildMdlForNonPagedPool),
2793	IMPORT_FUNC(KeInitializeSpinLock),
2794	IMPORT_FUNC(IoIsWdmVersionAvailable),
2795	IMPORT_FUNC(IoGetDeviceProperty),
2796	IMPORT_FUNC(KeInitializeMutex),
2797	IMPORT_FUNC(KeReleaseMutex),
2798	IMPORT_FUNC(KeReadStateMutex),
2799	IMPORT_FUNC(KeInitializeEvent),
2800	IMPORT_FUNC(KeSetEvent),
2801	IMPORT_FUNC(KeResetEvent),
2802	IMPORT_FUNC(KeClearEvent),
2803	IMPORT_FUNC(KeReadStateEvent),
2804	IMPORT_FUNC(KeInitializeTimer),
2805	IMPORT_FUNC(KeInitializeTimerEx),
2806	IMPORT_FUNC(KeSetTimer),
2807	IMPORT_FUNC(KeSetTimerEx),
2808	IMPORT_FUNC(KeCancelTimer),
2809	IMPORT_FUNC(KeReadStateTimer),
2810	IMPORT_FUNC(KeInitializeDpc),
2811	IMPORT_FUNC(KeInsertQueueDpc),
2812	IMPORT_FUNC(KeRemoveQueueDpc),
2813	IMPORT_FUNC(ObReferenceObjectByHandle),
2814	IMPORT_FUNC(ObfDereferenceObject),
2815	IMPORT_FUNC(ZwClose),
2816	IMPORT_FUNC(PsCreateSystemThread),
2817	IMPORT_FUNC(PsTerminateSystemThread),
2818
2819	/*
2820	 * This last entry is a catch-all for any function we haven't
2821	 * implemented yet. The PE import list patching routine will
2822	 * use it for any function that doesn't have an explicit match
2823	 * in this table.
2824	 */
2825
2826	{ NULL, (FUNC)dummy, NULL },
2827
2828	/* End of list. */
2829
2830	{ NULL, NULL, NULL }
2831};
2832