35 36#include <sys/ctype.h> 37#include <sys/unistd.h> 38#include <sys/param.h> 39#include <sys/types.h> 40#include <sys/errno.h> 41#include <sys/systm.h> 42#include <sys/malloc.h> 43#include <sys/lock.h> 44#include <sys/mutex.h> 45 46#include <sys/callout.h> 47#include <sys/kdb.h> 48#include <sys/kernel.h> 49#include <sys/proc.h> 50#include <sys/condvar.h> 51#include <sys/kthread.h> 52#include <sys/module.h> 53#include <sys/smp.h> 54#include <sys/sched.h> 55#include <sys/sysctl.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/stdarg.h> 60#include <machine/resource.h> 61 62#include <sys/bus.h> 63#include <sys/rman.h> 64 65#include <vm/vm.h> 66#include <vm/vm_param.h> 67#include <vm/pmap.h> 68#include <vm/uma.h> 69#include <vm/vm_kern.h> 70#include <vm/vm_map.h> 71#include <vm/vm_extern.h> 72 73#include <compat/ndis/pe_var.h> 74#include <compat/ndis/cfg_var.h> 75#include <compat/ndis/resource_var.h> 76#include <compat/ndis/ntoskrnl_var.h> 77#include <compat/ndis/hal_var.h> 78#include <compat/ndis/ndis_var.h> 79 80#ifdef NTOSKRNL_DEBUG_TIMERS 81static int sysctl_show_timers(SYSCTL_HANDLER_ARGS); 82 83SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW, 84 NULL, 0, sysctl_show_timers, "I", 85 "Show ntoskrnl timer stats"); 86#endif 87 88struct kdpc_queue { 89 list_entry kq_disp; 90 struct thread *kq_td; 91 int kq_cpu; 92 int kq_exit; 93 int kq_running; 94 kspin_lock kq_lock; 95 nt_kevent kq_proc; 96 nt_kevent kq_done; 97}; 98 99typedef struct kdpc_queue kdpc_queue; 100 101struct wb_ext { 102 struct cv we_cv; 103 struct thread *we_td; 104}; 105 106typedef struct wb_ext wb_ext; 107 108#define NTOSKRNL_TIMEOUTS 256 109#ifdef NTOSKRNL_DEBUG_TIMERS 110static uint64_t ntoskrnl_timer_fires; 111static uint64_t ntoskrnl_timer_sets; 112static uint64_t ntoskrnl_timer_reloads; 113static uint64_t ntoskrnl_timer_cancels; 114#endif 115 116struct callout_entry { 117 struct callout ce_callout; 118 list_entry ce_list; 119}; 120 121typedef struct callout_entry callout_entry; 122 123static struct list_entry ntoskrnl_calllist; 124static struct mtx ntoskrnl_calllock; 125struct kuser_shared_data kuser_shared_data; 126 127static struct list_entry ntoskrnl_intlist; 128static kspin_lock ntoskrnl_intlock; 129 130static uint8_t RtlEqualUnicodeString(unicode_string *, 131 unicode_string *, uint8_t); 132static void RtlCopyString(ansi_string *, const ansi_string *); 133static void RtlCopyUnicodeString(unicode_string *, 134 unicode_string *); 135static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *, 136 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *); 137static irp *IoBuildAsynchronousFsdRequest(uint32_t, 138 device_object *, void *, uint32_t, uint64_t *, io_status_block *); 139static irp *IoBuildDeviceIoControlRequest(uint32_t, 140 device_object *, void *, uint32_t, void *, uint32_t, 141 uint8_t, nt_kevent *, io_status_block *); 142static irp *IoAllocateIrp(uint8_t, uint8_t); 143static void IoReuseIrp(irp *, uint32_t); 144static void IoFreeIrp(irp *); 145static void IoInitializeIrp(irp *, uint16_t, uint8_t); 146static irp *IoMakeAssociatedIrp(irp *, uint8_t); 147static uint32_t KeWaitForMultipleObjects(uint32_t, 148 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t, 149 int64_t *, wait_block *); 150static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t); 151static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *); 152static void ntoskrnl_satisfy_multiple_waits(wait_block *); 153static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *); 154static void ntoskrnl_insert_timer(ktimer *, int); 155static void ntoskrnl_remove_timer(ktimer *); 156#ifdef NTOSKRNL_DEBUG_TIMERS 157static void ntoskrnl_show_timers(void); 158#endif 159static void ntoskrnl_timercall(void *); 160static void ntoskrnl_dpc_thread(void *); 161static void ntoskrnl_destroy_dpc_threads(void); 162static void ntoskrnl_destroy_workitem_threads(void); 163static void ntoskrnl_workitem_thread(void *); 164static void ntoskrnl_workitem(device_object *, void *); 165static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int); 166static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int); 167static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *); 168static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t); 169static uint16_t READ_REGISTER_USHORT(uint16_t *); 170static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t); 171static uint32_t READ_REGISTER_ULONG(uint32_t *); 172static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t); 173static uint8_t READ_REGISTER_UCHAR(uint8_t *); 174static int64_t _allmul(int64_t, int64_t); 175static int64_t _alldiv(int64_t, int64_t); 176static int64_t _allrem(int64_t, int64_t); 177static int64_t _allshr(int64_t, uint8_t); 178static int64_t _allshl(int64_t, uint8_t); 179static uint64_t _aullmul(uint64_t, uint64_t); 180static uint64_t _aulldiv(uint64_t, uint64_t); 181static uint64_t _aullrem(uint64_t, uint64_t); 182static uint64_t _aullshr(uint64_t, uint8_t); 183static uint64_t _aullshl(uint64_t, uint8_t); 184static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *); 185static void InitializeSListHead(slist_header *); 186static slist_entry *ntoskrnl_popsl(slist_header *); 187static void ExFreePoolWithTag(void *, uint32_t); 188static void ExInitializePagedLookasideList(paged_lookaside_list *, 189 lookaside_alloc_func *, lookaside_free_func *, 190 uint32_t, size_t, uint32_t, uint16_t); 191static void ExDeletePagedLookasideList(paged_lookaside_list *); 192static void ExInitializeNPagedLookasideList(npaged_lookaside_list *, 193 lookaside_alloc_func *, lookaside_free_func *, 194 uint32_t, size_t, uint32_t, uint16_t); 195static void ExDeleteNPagedLookasideList(npaged_lookaside_list *); 196static slist_entry 197 *ExInterlockedPushEntrySList(slist_header *, 198 slist_entry *, kspin_lock *); 199static slist_entry 200 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *); 201static uint32_t InterlockedIncrement(volatile uint32_t *); 202static uint32_t InterlockedDecrement(volatile uint32_t *); 203static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t); 204static void *MmAllocateContiguousMemory(uint32_t, uint64_t); 205static void *MmAllocateContiguousMemorySpecifyCache(uint32_t, 206 uint64_t, uint64_t, uint64_t, enum nt_caching_type); 207static void MmFreeContiguousMemory(void *); 208static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, 209 enum nt_caching_type); 210static uint32_t MmSizeOfMdl(void *, size_t); 211static void *MmMapLockedPages(mdl *, uint8_t); 212static void *MmMapLockedPagesSpecifyCache(mdl *, 213 uint8_t, uint32_t, void *, uint32_t, uint32_t); 214static void MmUnmapLockedPages(void *, mdl *); 215static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **); 216static void RtlZeroMemory(void *, size_t); 217static void RtlSecureZeroMemory(void *, size_t); 218static void RtlFillMemory(void *, size_t, uint8_t); 219static void RtlMoveMemory(void *, const void *, size_t); 220static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *); 221static void RtlCopyMemory(void *, const void *, size_t); 222static size_t RtlCompareMemory(const void *, const void *, size_t); 223static ndis_status RtlUnicodeStringToInteger(unicode_string *, 224 uint32_t, uint32_t *); 225static int atoi (const char *); 226static long atol (const char *); 227static int rand(void); 228static void srand(unsigned int); 229static void KeQuerySystemTime(uint64_t *); 230static uint32_t KeTickCount(void); 231static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t); 232static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t, 233 uint32_t, void **); 234static void ntoskrnl_thrfunc(void *); 235static ndis_status PsCreateSystemThread(ndis_handle *, 236 uint32_t, void *, ndis_handle, void *, void *, void *); 237static ndis_status PsTerminateSystemThread(ndis_status); 238static ndis_status IoGetDeviceObjectPointer(unicode_string *, 239 uint32_t, void *, device_object *); 240static ndis_status IoGetDeviceProperty(device_object *, uint32_t, 241 uint32_t, void *, uint32_t *); 242static void KeInitializeMutex(kmutant *, uint32_t); 243static uint32_t KeReleaseMutex(kmutant *, uint8_t); 244static uint32_t KeReadStateMutex(kmutant *); 245static ndis_status ObReferenceObjectByHandle(ndis_handle, 246 uint32_t, void *, uint8_t, void **, void **); 247static void ObfDereferenceObject(void *); 248static uint32_t ZwClose(ndis_handle); 249static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t, 250 uint32_t, void *); 251static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...); 252static uint32_t IoWMIRegistrationControl(device_object *, uint32_t); 253static void *ntoskrnl_memset(void *, int, size_t); 254static void *ntoskrnl_memmove(void *, void *, size_t); 255static void *ntoskrnl_memchr(void *, unsigned char, size_t); 256static char *ntoskrnl_strstr(char *, char *); 257static char *ntoskrnl_strncat(char *, char *, size_t); 258static int ntoskrnl_toupper(int); 259static int ntoskrnl_tolower(int); 260static funcptr ntoskrnl_findwrap(funcptr); 261static uint32_t DbgPrint(char *, ...); 262static void DbgBreakPoint(void); 263static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long); 264static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *); 265static int32_t KeSetPriorityThread(struct thread *, int32_t); 266static void dummy(void); 267 268static struct mtx ntoskrnl_dispatchlock; 269static struct mtx ntoskrnl_interlock; 270static kspin_lock ntoskrnl_cancellock; 271static int ntoskrnl_kth = 0; 272static struct nt_objref_head ntoskrnl_reflist; 273static uma_zone_t mdl_zone; 274static uma_zone_t iw_zone; 275static struct kdpc_queue *kq_queues; 276static struct kdpc_queue *wq_queues; 277static int wq_idx = 0; 278 279int 280ntoskrnl_libinit() 281{ 282 image_patch_table *patch; 283 int error; 284 struct proc *p; 285 kdpc_queue *kq; 286 callout_entry *e; 287 int i; 288 289 mtx_init(&ntoskrnl_dispatchlock, 290 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE); 291 mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN); 292 KeInitializeSpinLock(&ntoskrnl_cancellock); 293 KeInitializeSpinLock(&ntoskrnl_intlock); 294 TAILQ_INIT(&ntoskrnl_reflist); 295 296 InitializeListHead(&ntoskrnl_calllist); 297 InitializeListHead(&ntoskrnl_intlist); 298 mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN); 299 300 kq_queues = ExAllocatePoolWithTag(NonPagedPool, 301#ifdef NTOSKRNL_MULTIPLE_DPCS 302 sizeof(kdpc_queue) * mp_ncpus, 0); 303#else 304 sizeof(kdpc_queue), 0); 305#endif 306 307 if (kq_queues == NULL) 308 return (ENOMEM); 309 310 wq_queues = ExAllocatePoolWithTag(NonPagedPool, 311 sizeof(kdpc_queue) * WORKITEM_THREADS, 0); 312 313 if (wq_queues == NULL) 314 return (ENOMEM); 315 316#ifdef NTOSKRNL_MULTIPLE_DPCS 317 bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus); 318#else 319 bzero((char *)kq_queues, sizeof(kdpc_queue)); 320#endif 321 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS); 322 323 /* 324 * Launch the DPC threads. 325 */ 326 327#ifdef NTOSKRNL_MULTIPLE_DPCS 328 for (i = 0; i < mp_ncpus; i++) { 329#else 330 for (i = 0; i < 1; i++) { 331#endif 332 kq = kq_queues + i; 333 kq->kq_cpu = i; 334 error = kproc_create(ntoskrnl_dpc_thread, kq, &p, 335 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i); 336 if (error) 337 panic("failed to launch DPC thread"); 338 } 339 340 /* 341 * Launch the workitem threads. 342 */ 343 344 for (i = 0; i < WORKITEM_THREADS; i++) { 345 kq = wq_queues + i; 346 error = kproc_create(ntoskrnl_workitem_thread, kq, &p, 347 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i); 348 if (error) 349 panic("failed to launch workitem thread"); 350 } 351 352 patch = ntoskrnl_functbl; 353 while (patch->ipt_func != NULL) { 354 windrv_wrap((funcptr)patch->ipt_func, 355 (funcptr *)&patch->ipt_wrap, 356 patch->ipt_argcnt, patch->ipt_ftype); 357 patch++; 358 } 359 360 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) { 361 e = ExAllocatePoolWithTag(NonPagedPool, 362 sizeof(callout_entry), 0); 363 if (e == NULL) 364 panic("failed to allocate timeouts"); 365 mtx_lock_spin(&ntoskrnl_calllock); 366 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list)); 367 mtx_unlock_spin(&ntoskrnl_calllock); 368 } 369 370 /* 371 * MDLs are supposed to be variable size (they describe 372 * buffers containing some number of pages, but we don't 373 * know ahead of time how many pages that will be). But 374 * always allocating them off the heap is very slow. As 375 * a compromise, we create an MDL UMA zone big enough to 376 * handle any buffer requiring up to 16 pages, and we 377 * use those for any MDLs for buffers of 16 pages or less 378 * in size. For buffers larger than that (which we assume 379 * will be few and far between, we allocate the MDLs off 380 * the heap. 381 */ 382 383 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE, 384 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 385 386 iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem), 387 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 388 389 return (0); 390} 391 392int 393ntoskrnl_libfini() 394{ 395 image_patch_table *patch; 396 callout_entry *e; 397 list_entry *l; 398 399 patch = ntoskrnl_functbl; 400 while (patch->ipt_func != NULL) { 401 windrv_unwrap(patch->ipt_wrap); 402 patch++; 403 } 404 405 /* Stop the workitem queues. */ 406 ntoskrnl_destroy_workitem_threads(); 407 /* Stop the DPC queues. */ 408 ntoskrnl_destroy_dpc_threads(); 409 410 ExFreePool(kq_queues); 411 ExFreePool(wq_queues); 412 413 uma_zdestroy(mdl_zone); 414 uma_zdestroy(iw_zone); 415 416 mtx_lock_spin(&ntoskrnl_calllock); 417 while(!IsListEmpty(&ntoskrnl_calllist)) { 418 l = RemoveHeadList(&ntoskrnl_calllist); 419 e = CONTAINING_RECORD(l, callout_entry, ce_list); 420 mtx_unlock_spin(&ntoskrnl_calllock); 421 ExFreePool(e); 422 mtx_lock_spin(&ntoskrnl_calllock); 423 } 424 mtx_unlock_spin(&ntoskrnl_calllock); 425 426 mtx_destroy(&ntoskrnl_dispatchlock); 427 mtx_destroy(&ntoskrnl_interlock); 428 mtx_destroy(&ntoskrnl_calllock); 429 430 return (0); 431} 432 433/* 434 * We need to be able to reference this externally from the wrapper; 435 * GCC only generates a local implementation of memset. 436 */ 437static void * 438ntoskrnl_memset(buf, ch, size) 439 void *buf; 440 int ch; 441 size_t size; 442{ 443 return (memset(buf, ch, size)); 444} 445 446static void * 447ntoskrnl_memmove(dst, src, size) 448 void *src; 449 void *dst; 450 size_t size; 451{ 452 bcopy(src, dst, size); 453 return (dst); 454} 455 456static void * 457ntoskrnl_memchr(void *buf, unsigned char ch, size_t len) 458{ 459 if (len != 0) { 460 unsigned char *p = buf; 461 462 do { 463 if (*p++ == ch) 464 return (p - 1); 465 } while (--len != 0); 466 } 467 return (NULL); 468} 469 470static char * 471ntoskrnl_strstr(s, find) 472 char *s, *find; 473{ 474 char c, sc; 475 size_t len; 476 477 if ((c = *find++) != 0) { 478 len = strlen(find); 479 do { 480 do { 481 if ((sc = *s++) == 0) 482 return (NULL); 483 } while (sc != c); 484 } while (strncmp(s, find, len) != 0); 485 s--; 486 } 487 return ((char *)s); 488} 489 490/* Taken from libc */ 491static char * 492ntoskrnl_strncat(dst, src, n) 493 char *dst; 494 char *src; 495 size_t n; 496{ 497 if (n != 0) { 498 char *d = dst; 499 const char *s = src; 500 501 while (*d != 0) 502 d++; 503 do { 504 if ((*d = *s++) == 0) 505 break; 506 d++; 507 } while (--n != 0); 508 *d = 0; 509 } 510 return (dst); 511} 512 513static int 514ntoskrnl_toupper(c) 515 int c; 516{ 517 return (toupper(c)); 518} 519 520static int 521ntoskrnl_tolower(c) 522 int c; 523{ 524 return (tolower(c)); 525} 526 527static uint8_t 528RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2, 529 uint8_t caseinsensitive) 530{ 531 int i; 532 533 if (str1->us_len != str2->us_len) 534 return (FALSE); 535 536 for (i = 0; i < str1->us_len; i++) { 537 if (caseinsensitive == TRUE) { 538 if (toupper((char)(str1->us_buf[i] & 0xFF)) != 539 toupper((char)(str2->us_buf[i] & 0xFF))) 540 return (FALSE); 541 } else { 542 if (str1->us_buf[i] != str2->us_buf[i]) 543 return (FALSE); 544 } 545 } 546 547 return (TRUE); 548} 549 550static void 551RtlCopyString(dst, src) 552 ansi_string *dst; 553 const ansi_string *src; 554{ 555 if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) { 556 dst->as_len = min(src->as_len, dst->as_maxlen); 557 memcpy(dst->as_buf, src->as_buf, dst->as_len); 558 if (dst->as_len < dst->as_maxlen) 559 dst->as_buf[dst->as_len] = 0; 560 } else 561 dst->as_len = 0; 562} 563 564static void 565RtlCopyUnicodeString(dest, src) 566 unicode_string *dest; 567 unicode_string *src; 568{ 569 570 if (dest->us_maxlen >= src->us_len) 571 dest->us_len = src->us_len; 572 else 573 dest->us_len = dest->us_maxlen; 574 memcpy(dest->us_buf, src->us_buf, dest->us_len); 575} 576 577static void 578ntoskrnl_ascii_to_unicode(ascii, unicode, len) 579 char *ascii; 580 uint16_t *unicode; 581 int len; 582{ 583 int i; 584 uint16_t *ustr; 585 586 ustr = unicode; 587 for (i = 0; i < len; i++) { 588 *ustr = (uint16_t)ascii[i]; 589 ustr++; 590 } 591} 592 593static void 594ntoskrnl_unicode_to_ascii(unicode, ascii, len) 595 uint16_t *unicode; 596 char *ascii; 597 int len; 598{ 599 int i; 600 uint8_t *astr; 601 602 astr = ascii; 603 for (i = 0; i < len / 2; i++) { 604 *astr = (uint8_t)unicode[i]; 605 astr++; 606 } 607} 608 609uint32_t 610RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate) 611{ 612 if (dest == NULL || src == NULL) 613 return (STATUS_INVALID_PARAMETER); 614 615 dest->as_len = src->us_len / 2; 616 if (dest->as_maxlen < dest->as_len) 617 dest->as_len = dest->as_maxlen; 618 619 if (allocate == TRUE) { 620 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool, 621 (src->us_len / 2) + 1, 0); 622 if (dest->as_buf == NULL) 623 return (STATUS_INSUFFICIENT_RESOURCES); 624 dest->as_len = dest->as_maxlen = src->us_len / 2; 625 } else { 626 dest->as_len = src->us_len / 2; /* XXX */ 627 if (dest->as_maxlen < dest->as_len) 628 dest->as_len = dest->as_maxlen; 629 } 630 631 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf, 632 dest->as_len * 2); 633 634 return (STATUS_SUCCESS); 635} 636 637uint32_t 638RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src, 639 uint8_t allocate) 640{ 641 if (dest == NULL || src == NULL) 642 return (STATUS_INVALID_PARAMETER); 643 644 if (allocate == TRUE) { 645 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool, 646 src->as_len * 2, 0); 647 if (dest->us_buf == NULL) 648 return (STATUS_INSUFFICIENT_RESOURCES); 649 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2; 650 } else { 651 dest->us_len = src->as_len * 2; /* XXX */ 652 if (dest->us_maxlen < dest->us_len) 653 dest->us_len = dest->us_maxlen; 654 } 655 656 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf, 657 dest->us_len / 2); 658 659 return (STATUS_SUCCESS); 660} 661 662void * 663ExAllocatePoolWithTag(pooltype, len, tag) 664 uint32_t pooltype; 665 size_t len; 666 uint32_t tag; 667{ 668 void *buf; 669 670 buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 671 if (buf == NULL) 672 return (NULL); 673 674 return (buf); 675} 676 677static void 678ExFreePoolWithTag(buf, tag) 679 void *buf; 680 uint32_t tag; 681{ 682 ExFreePool(buf); 683} 684 685void 686ExFreePool(buf) 687 void *buf; 688{ 689 free(buf, M_DEVBUF); 690} 691 692uint32_t 693IoAllocateDriverObjectExtension(drv, clid, extlen, ext) 694 driver_object *drv; 695 void *clid; 696 uint32_t extlen; 697 void **ext; 698{ 699 custom_extension *ce; 700 701 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension) 702 + extlen, 0); 703 704 if (ce == NULL) 705 return (STATUS_INSUFFICIENT_RESOURCES); 706 707 ce->ce_clid = clid; 708 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list)); 709 710 *ext = (void *)(ce + 1); 711 712 return (STATUS_SUCCESS); 713} 714 715void * 716IoGetDriverObjectExtension(drv, clid) 717 driver_object *drv; 718 void *clid; 719{ 720 list_entry *e; 721 custom_extension *ce; 722 723 /* 724 * Sanity check. Our dummy bus drivers don't have 725 * any driver extentions. 726 */ 727 728 if (drv->dro_driverext == NULL) 729 return (NULL); 730 731 e = drv->dro_driverext->dre_usrext.nle_flink; 732 while (e != &drv->dro_driverext->dre_usrext) { 733 ce = (custom_extension *)e; 734 if (ce->ce_clid == clid) 735 return ((void *)(ce + 1)); 736 e = e->nle_flink; 737 } 738 739 return (NULL); 740} 741 742 743uint32_t 744IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname, 745 uint32_t devtype, uint32_t devchars, uint8_t exclusive, 746 device_object **newdev) 747{ 748 device_object *dev; 749 750 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0); 751 if (dev == NULL) 752 return (STATUS_INSUFFICIENT_RESOURCES); 753 754 dev->do_type = devtype; 755 dev->do_drvobj = drv; 756 dev->do_currirp = NULL; 757 dev->do_flags = 0; 758 759 if (devextlen) { 760 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool, 761 devextlen, 0); 762 763 if (dev->do_devext == NULL) { 764 ExFreePool(dev); 765 return (STATUS_INSUFFICIENT_RESOURCES); 766 } 767 768 bzero(dev->do_devext, devextlen); 769 } else 770 dev->do_devext = NULL; 771 772 dev->do_size = sizeof(device_object) + devextlen; 773 dev->do_refcnt = 1; 774 dev->do_attacheddev = NULL; 775 dev->do_nextdev = NULL; 776 dev->do_devtype = devtype; 777 dev->do_stacksize = 1; 778 dev->do_alignreq = 1; 779 dev->do_characteristics = devchars; 780 dev->do_iotimer = NULL; 781 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE); 782 783 /* 784 * Vpd is used for disk/tape devices, 785 * but we don't support those. (Yet.) 786 */ 787 dev->do_vpb = NULL; 788 789 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool, 790 sizeof(devobj_extension), 0); 791 792 if (dev->do_devobj_ext == NULL) { 793 if (dev->do_devext != NULL) 794 ExFreePool(dev->do_devext); 795 ExFreePool(dev); 796 return (STATUS_INSUFFICIENT_RESOURCES); 797 } 798 799 dev->do_devobj_ext->dve_type = 0; 800 dev->do_devobj_ext->dve_size = sizeof(devobj_extension); 801 dev->do_devobj_ext->dve_devobj = dev; 802 803 /* 804 * Attach this device to the driver object's list 805 * of devices. Note: this is not the same as attaching 806 * the device to the device stack. The driver's AddDevice 807 * routine must explicitly call IoAddDeviceToDeviceStack() 808 * to do that. 809 */ 810 811 if (drv->dro_devobj == NULL) { 812 drv->dro_devobj = dev; 813 dev->do_nextdev = NULL; 814 } else { 815 dev->do_nextdev = drv->dro_devobj; 816 drv->dro_devobj = dev; 817 } 818 819 *newdev = dev; 820 821 return (STATUS_SUCCESS); 822} 823 824void 825IoDeleteDevice(dev) 826 device_object *dev; 827{ 828 device_object *prev; 829 830 if (dev == NULL) 831 return; 832 833 if (dev->do_devobj_ext != NULL) 834 ExFreePool(dev->do_devobj_ext); 835 836 if (dev->do_devext != NULL) 837 ExFreePool(dev->do_devext); 838 839 /* Unlink the device from the driver's device list. */ 840 841 prev = dev->do_drvobj->dro_devobj; 842 if (prev == dev) 843 dev->do_drvobj->dro_devobj = dev->do_nextdev; 844 else { 845 while (prev->do_nextdev != dev) 846 prev = prev->do_nextdev; 847 prev->do_nextdev = dev->do_nextdev; 848 } 849 850 ExFreePool(dev); 851} 852 853device_object * 854IoGetAttachedDevice(dev) 855 device_object *dev; 856{ 857 device_object *d; 858 859 if (dev == NULL) 860 return (NULL); 861 862 d = dev; 863 864 while (d->do_attacheddev != NULL) 865 d = d->do_attacheddev; 866 867 return (d); 868} 869 870static irp * 871IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status) 872 uint32_t func; 873 device_object *dobj; 874 void *buf; 875 uint32_t len; 876 uint64_t *off; 877 nt_kevent *event; 878 io_status_block *status; 879{ 880 irp *ip; 881 882 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status); 883 if (ip == NULL) 884 return (NULL); 885 ip->irp_usrevent = event; 886 887 return (ip); 888} 889 890static irp * 891IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status) 892 uint32_t func; 893 device_object *dobj; 894 void *buf; 895 uint32_t len; 896 uint64_t *off; 897 io_status_block *status; 898{ 899 irp *ip; 900 io_stack_location *sl; 901 902 ip = IoAllocateIrp(dobj->do_stacksize, TRUE); 903 if (ip == NULL) 904 return (NULL); 905 906 ip->irp_usriostat = status; 907 ip->irp_tail.irp_overlay.irp_thread = NULL; 908 909 sl = IoGetNextIrpStackLocation(ip); 910 sl->isl_major = func; 911 sl->isl_minor = 0; 912 sl->isl_flags = 0; 913 sl->isl_ctl = 0; 914 sl->isl_devobj = dobj; 915 sl->isl_fileobj = NULL; 916 sl->isl_completionfunc = NULL; 917 918 ip->irp_userbuf = buf; 919 920 if (dobj->do_flags & DO_BUFFERED_IO) { 921 ip->irp_assoc.irp_sysbuf = 922 ExAllocatePoolWithTag(NonPagedPool, len, 0); 923 if (ip->irp_assoc.irp_sysbuf == NULL) { 924 IoFreeIrp(ip); 925 return (NULL); 926 } 927 bcopy(buf, ip->irp_assoc.irp_sysbuf, len); 928 } 929 930 if (dobj->do_flags & DO_DIRECT_IO) { 931 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip); 932 if (ip->irp_mdl == NULL) { 933 if (ip->irp_assoc.irp_sysbuf != NULL) 934 ExFreePool(ip->irp_assoc.irp_sysbuf); 935 IoFreeIrp(ip); 936 return (NULL); 937 } 938 ip->irp_userbuf = NULL; 939 ip->irp_assoc.irp_sysbuf = NULL; 940 } 941 942 if (func == IRP_MJ_READ) { 943 sl->isl_parameters.isl_read.isl_len = len; 944 if (off != NULL) 945 sl->isl_parameters.isl_read.isl_byteoff = *off; 946 else 947 sl->isl_parameters.isl_read.isl_byteoff = 0; 948 } 949 950 if (func == IRP_MJ_WRITE) { 951 sl->isl_parameters.isl_write.isl_len = len; 952 if (off != NULL) 953 sl->isl_parameters.isl_write.isl_byteoff = *off; 954 else 955 sl->isl_parameters.isl_write.isl_byteoff = 0; 956 } 957 958 return (ip); 959} 960 961static irp * 962IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf, 963 uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal, 964 nt_kevent *event, io_status_block *status) 965{ 966 irp *ip; 967 io_stack_location *sl; 968 uint32_t buflen; 969 970 ip = IoAllocateIrp(dobj->do_stacksize, TRUE); 971 if (ip == NULL) 972 return (NULL); 973 ip->irp_usrevent = event; 974 ip->irp_usriostat = status; 975 ip->irp_tail.irp_overlay.irp_thread = NULL; 976 977 sl = IoGetNextIrpStackLocation(ip); 978 sl->isl_major = isinternal == TRUE ? 979 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL; 980 sl->isl_minor = 0; 981 sl->isl_flags = 0; 982 sl->isl_ctl = 0; 983 sl->isl_devobj = dobj; 984 sl->isl_fileobj = NULL; 985 sl->isl_completionfunc = NULL; 986 sl->isl_parameters.isl_ioctl.isl_iocode = iocode; 987 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen; 988 sl->isl_parameters.isl_ioctl.isl_obuflen = olen; 989 990 switch(IO_METHOD(iocode)) { 991 case METHOD_BUFFERED: 992 if (ilen > olen) 993 buflen = ilen; 994 else 995 buflen = olen; 996 if (buflen) { 997 ip->irp_assoc.irp_sysbuf = 998 ExAllocatePoolWithTag(NonPagedPool, buflen, 0); 999 if (ip->irp_assoc.irp_sysbuf == NULL) { 1000 IoFreeIrp(ip); 1001 return (NULL); 1002 } 1003 } 1004 if (ilen && ibuf != NULL) { 1005 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen); 1006 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen, 1007 buflen - ilen); 1008 } else 1009 bzero(ip->irp_assoc.irp_sysbuf, ilen); 1010 ip->irp_userbuf = obuf; 1011 break; 1012 case METHOD_IN_DIRECT: 1013 case METHOD_OUT_DIRECT: 1014 if (ilen && ibuf != NULL) { 1015 ip->irp_assoc.irp_sysbuf = 1016 ExAllocatePoolWithTag(NonPagedPool, ilen, 0); 1017 if (ip->irp_assoc.irp_sysbuf == NULL) { 1018 IoFreeIrp(ip); 1019 return (NULL); 1020 } 1021 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen); 1022 } 1023 if (olen && obuf != NULL) { 1024 ip->irp_mdl = IoAllocateMdl(obuf, olen, 1025 FALSE, FALSE, ip); 1026 /* 1027 * Normally we would MmProbeAndLockPages() 1028 * here, but we don't have to in our 1029 * imlementation. 1030 */ 1031 } 1032 break; 1033 case METHOD_NEITHER: 1034 ip->irp_userbuf = obuf; 1035 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf; 1036 break; 1037 default: 1038 break; 1039 } 1040 1041 /* 1042 * Ideally, we should associate this IRP with the calling 1043 * thread here. 1044 */ 1045 1046 return (ip); 1047} 1048 1049static irp * 1050IoAllocateIrp(uint8_t stsize, uint8_t chargequota) 1051{ 1052 irp *i; 1053 1054 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0); 1055 if (i == NULL) 1056 return (NULL); 1057 1058 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize); 1059 1060 return (i); 1061} 1062 1063static irp * 1064IoMakeAssociatedIrp(irp *ip, uint8_t stsize) 1065{ 1066 irp *associrp; 1067 1068 associrp = IoAllocateIrp(stsize, FALSE); 1069 if (associrp == NULL) 1070 return (NULL); 1071 1072 mtx_lock(&ntoskrnl_dispatchlock); 1073 associrp->irp_flags |= IRP_ASSOCIATED_IRP; 1074 associrp->irp_tail.irp_overlay.irp_thread = 1075 ip->irp_tail.irp_overlay.irp_thread; 1076 associrp->irp_assoc.irp_master = ip; 1077 mtx_unlock(&ntoskrnl_dispatchlock); 1078 1079 return (associrp); 1080} 1081 1082static void 1083IoFreeIrp(ip) 1084 irp *ip; 1085{ 1086 ExFreePool(ip); 1087} 1088 1089static void 1090IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize) 1091{ 1092 bzero((char *)io, IoSizeOfIrp(ssize)); 1093 io->irp_size = psize; 1094 io->irp_stackcnt = ssize; 1095 io->irp_currentstackloc = ssize; 1096 InitializeListHead(&io->irp_thlist); 1097 io->irp_tail.irp_overlay.irp_csl = 1098 (io_stack_location *)(io + 1) + ssize; 1099} 1100 1101static void 1102IoReuseIrp(ip, status) 1103 irp *ip; 1104 uint32_t status; 1105{ 1106 uint8_t allocflags; 1107 1108 allocflags = ip->irp_allocflags; 1109 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt); 1110 ip->irp_iostat.isb_status = status; 1111 ip->irp_allocflags = allocflags; 1112} 1113 1114void 1115IoAcquireCancelSpinLock(uint8_t *irql) 1116{ 1117 KeAcquireSpinLock(&ntoskrnl_cancellock, irql); 1118} 1119 1120void 1121IoReleaseCancelSpinLock(uint8_t irql) 1122{ 1123 KeReleaseSpinLock(&ntoskrnl_cancellock, irql); 1124} 1125 1126uint8_t 1127IoCancelIrp(irp *ip) 1128{ 1129 cancel_func cfunc; 1130 uint8_t cancelirql; 1131 1132 IoAcquireCancelSpinLock(&cancelirql); 1133 cfunc = IoSetCancelRoutine(ip, NULL); 1134 ip->irp_cancel = TRUE; 1135 if (cfunc == NULL) { 1136 IoReleaseCancelSpinLock(cancelirql); 1137 return (FALSE); 1138 } 1139 ip->irp_cancelirql = cancelirql; 1140 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip); 1141 return (uint8_t)IoSetCancelValue(ip, TRUE); 1142} 1143 1144uint32_t 1145IofCallDriver(dobj, ip) 1146 device_object *dobj; 1147 irp *ip; 1148{ 1149 driver_object *drvobj; 1150 io_stack_location *sl; 1151 uint32_t status; 1152 driver_dispatch disp; 1153 1154 drvobj = dobj->do_drvobj; 1155 1156 if (ip->irp_currentstackloc <= 0) 1157 panic("IoCallDriver(): out of stack locations"); 1158 1159 IoSetNextIrpStackLocation(ip); 1160 sl = IoGetCurrentIrpStackLocation(ip); 1161 1162 sl->isl_devobj = dobj; 1163 1164 disp = drvobj->dro_dispatch[sl->isl_major]; 1165 status = MSCALL2(disp, dobj, ip); 1166 1167 return (status); 1168} 1169 1170void 1171IofCompleteRequest(irp *ip, uint8_t prioboost) 1172{ 1173 uint32_t status; 1174 device_object *dobj; 1175 io_stack_location *sl; 1176 completion_func cf; 1177 1178 KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING, 1179 ("incorrect IRP(%p) status (STATUS_PENDING)", ip)); 1180 1181 sl = IoGetCurrentIrpStackLocation(ip); 1182 IoSkipCurrentIrpStackLocation(ip); 1183 1184 do { 1185 if (sl->isl_ctl & SL_PENDING_RETURNED) 1186 ip->irp_pendingreturned = TRUE; 1187 1188 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1)) 1189 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj; 1190 else 1191 dobj = NULL; 1192 1193 if (sl->isl_completionfunc != NULL && 1194 ((ip->irp_iostat.isb_status == STATUS_SUCCESS && 1195 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) || 1196 (ip->irp_iostat.isb_status != STATUS_SUCCESS && 1197 sl->isl_ctl & SL_INVOKE_ON_ERROR) || 1198 (ip->irp_cancel == TRUE && 1199 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) { 1200 cf = sl->isl_completionfunc; 1201 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx); 1202 if (status == STATUS_MORE_PROCESSING_REQUIRED) 1203 return; 1204 } else { 1205 if ((ip->irp_currentstackloc <= ip->irp_stackcnt) && 1206 (ip->irp_pendingreturned == TRUE)) 1207 IoMarkIrpPending(ip); 1208 } 1209 1210 /* move to the next. */ 1211 IoSkipCurrentIrpStackLocation(ip); 1212 sl++; 1213 } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1)); 1214 1215 if (ip->irp_usriostat != NULL) 1216 *ip->irp_usriostat = ip->irp_iostat; 1217 if (ip->irp_usrevent != NULL) 1218 KeSetEvent(ip->irp_usrevent, prioboost, FALSE); 1219 1220 /* Handle any associated IRPs. */ 1221 1222 if (ip->irp_flags & IRP_ASSOCIATED_IRP) { 1223 uint32_t masterirpcnt; 1224 irp *masterirp; 1225 mdl *m; 1226 1227 masterirp = ip->irp_assoc.irp_master; 1228 masterirpcnt = 1229 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt); 1230 1231 while ((m = ip->irp_mdl) != NULL) { 1232 ip->irp_mdl = m->mdl_next; 1233 IoFreeMdl(m); 1234 } 1235 IoFreeIrp(ip); 1236 if (masterirpcnt == 0) 1237 IoCompleteRequest(masterirp, IO_NO_INCREMENT); 1238 return; 1239 } 1240 1241 /* With any luck, these conditions will never arise. */ 1242 1243 if (ip->irp_flags & IRP_PAGING_IO) { 1244 if (ip->irp_mdl != NULL) 1245 IoFreeMdl(ip->irp_mdl); 1246 IoFreeIrp(ip); 1247 } 1248} 1249 1250void 1251ntoskrnl_intr(arg) 1252 void *arg; 1253{ 1254 kinterrupt *iobj; 1255 uint8_t irql; 1256 uint8_t claimed; 1257 list_entry *l; 1258 1259 KeAcquireSpinLock(&ntoskrnl_intlock, &irql); 1260 l = ntoskrnl_intlist.nle_flink; 1261 while (l != &ntoskrnl_intlist) { 1262 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list); 1263 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx); 1264 if (claimed == TRUE) 1265 break; 1266 l = l->nle_flink; 1267 } 1268 KeReleaseSpinLock(&ntoskrnl_intlock, irql); 1269} 1270 1271uint8_t 1272KeAcquireInterruptSpinLock(iobj) 1273 kinterrupt *iobj; 1274{ 1275 uint8_t irql; 1276 KeAcquireSpinLock(&ntoskrnl_intlock, &irql); 1277 return (irql); 1278} 1279 1280void 1281KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql) 1282{ 1283 KeReleaseSpinLock(&ntoskrnl_intlock, irql); 1284} 1285 1286uint8_t 1287KeSynchronizeExecution(iobj, syncfunc, syncctx) 1288 kinterrupt *iobj; 1289 void *syncfunc; 1290 void *syncctx; 1291{ 1292 uint8_t irql; 1293 1294 KeAcquireSpinLock(&ntoskrnl_intlock, &irql); 1295 MSCALL1(syncfunc, syncctx); 1296 KeReleaseSpinLock(&ntoskrnl_intlock, irql); 1297 1298 return (TRUE); 1299} 1300 1301/* 1302 * IoConnectInterrupt() is passed only the interrupt vector and 1303 * irql that a device wants to use, but no device-specific tag 1304 * of any kind. This conflicts rather badly with FreeBSD's 1305 * bus_setup_intr(), which needs the device_t for the device 1306 * requesting interrupt delivery. In order to bypass this 1307 * inconsistency, we implement a second level of interrupt 1308 * dispatching on top of bus_setup_intr(). All devices use 1309 * ntoskrnl_intr() as their ISR, and any device requesting 1310 * interrupts will be registered with ntoskrnl_intr()'s interrupt 1311 * dispatch list. When an interrupt arrives, we walk the list 1312 * and invoke all the registered ISRs. This effectively makes all 1313 * interrupts shared, but it's the only way to duplicate the 1314 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly. 1315 */ 1316 1317uint32_t 1318IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx, 1319 kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql, 1320 uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat) 1321{ 1322 uint8_t curirql; 1323 1324 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0); 1325 if (*iobj == NULL) 1326 return (STATUS_INSUFFICIENT_RESOURCES); 1327 1328 (*iobj)->ki_svcfunc = svcfunc; 1329 (*iobj)->ki_svcctx = svcctx; 1330 1331 if (lock == NULL) { 1332 KeInitializeSpinLock(&(*iobj)->ki_lock_priv); 1333 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv; 1334 } else 1335 (*iobj)->ki_lock = lock; 1336 1337 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql); 1338 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list)); 1339 KeReleaseSpinLock(&ntoskrnl_intlock, curirql); 1340 1341 return (STATUS_SUCCESS); 1342} 1343 1344void 1345IoDisconnectInterrupt(iobj) 1346 kinterrupt *iobj; 1347{ 1348 uint8_t irql; 1349 1350 if (iobj == NULL) 1351 return; 1352 1353 KeAcquireSpinLock(&ntoskrnl_intlock, &irql); 1354 RemoveEntryList((&iobj->ki_list)); 1355 KeReleaseSpinLock(&ntoskrnl_intlock, irql); 1356 1357 ExFreePool(iobj); 1358} 1359 1360device_object * 1361IoAttachDeviceToDeviceStack(src, dst) 1362 device_object *src; 1363 device_object *dst; 1364{ 1365 device_object *attached; 1366 1367 mtx_lock(&ntoskrnl_dispatchlock); 1368 attached = IoGetAttachedDevice(dst); 1369 attached->do_attacheddev = src; 1370 src->do_attacheddev = NULL; 1371 src->do_stacksize = attached->do_stacksize + 1; 1372 mtx_unlock(&ntoskrnl_dispatchlock); 1373 1374 return (attached); 1375} 1376 1377void 1378IoDetachDevice(topdev) 1379 device_object *topdev; 1380{ 1381 device_object *tail; 1382 1383 mtx_lock(&ntoskrnl_dispatchlock); 1384 1385 /* First, break the chain. */ 1386 tail = topdev->do_attacheddev; 1387 if (tail == NULL) { 1388 mtx_unlock(&ntoskrnl_dispatchlock); 1389 return; 1390 } 1391 topdev->do_attacheddev = tail->do_attacheddev; 1392 topdev->do_refcnt--; 1393 1394 /* Now reduce the stacksize count for the takm_il objects. */ 1395 1396 tail = topdev->do_attacheddev; 1397 while (tail != NULL) { 1398 tail->do_stacksize--; 1399 tail = tail->do_attacheddev; 1400 } 1401 1402 mtx_unlock(&ntoskrnl_dispatchlock); 1403} 1404 1405/* 1406 * For the most part, an object is considered signalled if 1407 * dh_sigstate == TRUE. The exception is for mutant objects 1408 * (mutexes), where the logic works like this: 1409 * 1410 * - If the thread already owns the object and sigstate is 1411 * less than or equal to 0, then the object is considered 1412 * signalled (recursive acquisition). 1413 * - If dh_sigstate == 1, the object is also considered 1414 * signalled. 1415 */ 1416 1417static int 1418ntoskrnl_is_signalled(obj, td) 1419 nt_dispatch_header *obj; 1420 struct thread *td; 1421{ 1422 kmutant *km; 1423 1424 if (obj->dh_type == DISP_TYPE_MUTANT) { 1425 km = (kmutant *)obj; 1426 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) || 1427 obj->dh_sigstate == 1) 1428 return (TRUE); 1429 return (FALSE); 1430 } 1431 1432 if (obj->dh_sigstate > 0) 1433 return (TRUE); 1434 return (FALSE); 1435} 1436 1437static void 1438ntoskrnl_satisfy_wait(obj, td) 1439 nt_dispatch_header *obj; 1440 struct thread *td; 1441{ 1442 kmutant *km; 1443 1444 switch (obj->dh_type) { 1445 case DISP_TYPE_MUTANT: 1446 km = (struct kmutant *)obj; 1447 obj->dh_sigstate--; 1448 /* 1449 * If sigstate reaches 0, the mutex is now 1450 * non-signalled (the new thread owns it). 1451 */ 1452 if (obj->dh_sigstate == 0) { 1453 km->km_ownerthread = td; 1454 if (km->km_abandoned == TRUE) 1455 km->km_abandoned = FALSE; 1456 } 1457 break; 1458 /* Synchronization objects get reset to unsignalled. */ 1459 case DISP_TYPE_SYNCHRONIZATION_EVENT: 1460 case DISP_TYPE_SYNCHRONIZATION_TIMER: 1461 obj->dh_sigstate = 0; 1462 break; 1463 case DISP_TYPE_SEMAPHORE: 1464 obj->dh_sigstate--; 1465 break; 1466 default: 1467 break; 1468 } 1469} 1470 1471static void 1472ntoskrnl_satisfy_multiple_waits(wb) 1473 wait_block *wb; 1474{ 1475 wait_block *cur; 1476 struct thread *td; 1477 1478 cur = wb; 1479 td = wb->wb_kthread; 1480 1481 do { 1482 ntoskrnl_satisfy_wait(wb->wb_object, td); 1483 cur->wb_awakened = TRUE; 1484 cur = cur->wb_next; 1485 } while (cur != wb); 1486} 1487 1488/* Always called with dispatcher lock held. */ 1489static void 1490ntoskrnl_waittest(obj, increment) 1491 nt_dispatch_header *obj; 1492 uint32_t increment; 1493{ 1494 wait_block *w, *next; 1495 list_entry *e; 1496 struct thread *td; 1497 wb_ext *we; 1498 int satisfied; 1499 1500 /* 1501 * Once an object has been signalled, we walk its list of 1502 * wait blocks. If a wait block can be awakened, then satisfy 1503 * waits as necessary and wake the thread. 1504 * 1505 * The rules work like this: 1506 * 1507 * If a wait block is marked as WAITTYPE_ANY, then 1508 * we can satisfy the wait conditions on the current 1509 * object and wake the thread right away. Satisfying 1510 * the wait also has the effect of breaking us out 1511 * of the search loop. 1512 * 1513 * If the object is marked as WAITTYLE_ALL, then the 1514 * wait block will be part of a circularly linked 1515 * list of wait blocks belonging to a waiting thread 1516 * that's sleeping in KeWaitForMultipleObjects(). In 1517 * order to wake the thread, all the objects in the 1518 * wait list must be in the signalled state. If they 1519 * are, we then satisfy all of them and wake the 1520 * thread. 1521 * 1522 */ 1523 1524 e = obj->dh_waitlisthead.nle_flink; 1525 1526 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) { 1527 w = CONTAINING_RECORD(e, wait_block, wb_waitlist); 1528 we = w->wb_ext; 1529 td = we->we_td; 1530 satisfied = FALSE; 1531 if (w->wb_waittype == WAITTYPE_ANY) { 1532 /* 1533 * Thread can be awakened if 1534 * any wait is satisfied. 1535 */ 1536 ntoskrnl_satisfy_wait(obj, td); 1537 satisfied = TRUE; 1538 w->wb_awakened = TRUE; 1539 } else { 1540 /* 1541 * Thread can only be woken up 1542 * if all waits are satisfied. 1543 * If the thread is waiting on multiple 1544 * objects, they should all be linked 1545 * through the wb_next pointers in the 1546 * wait blocks. 1547 */ 1548 satisfied = TRUE; 1549 next = w->wb_next; 1550 while (next != w) { 1551 if (ntoskrnl_is_signalled(obj, td) == FALSE) { 1552 satisfied = FALSE; 1553 break; 1554 } 1555 next = next->wb_next; 1556 } 1557 ntoskrnl_satisfy_multiple_waits(w); 1558 } 1559 1560 if (satisfied == TRUE) 1561 cv_broadcastpri(&we->we_cv, 1562 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ? 1563 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN); 1564 1565 e = e->nle_flink; 1566 } 1567} 1568 1569/* 1570 * Return the number of 100 nanosecond intervals since 1571 * January 1, 1601. (?!?!) 1572 */ 1573void 1574ntoskrnl_time(tval) 1575 uint64_t *tval; 1576{ 1577 struct timespec ts; 1578 1579 nanotime(&ts); 1580 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 + 1581 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */ 1582} 1583 1584static void 1585KeQuerySystemTime(current_time) 1586 uint64_t *current_time; 1587{ 1588 ntoskrnl_time(current_time); 1589} 1590 1591static uint32_t 1592KeTickCount(void) 1593{ 1594 struct timeval tv; 1595 getmicrouptime(&tv); 1596 return tvtohz(&tv); 1597} 1598 1599 1600/* 1601 * KeWaitForSingleObject() is a tricky beast, because it can be used 1602 * with several different object types: semaphores, timers, events, 1603 * mutexes and threads. Semaphores don't appear very often, but the 1604 * other object types are quite common. KeWaitForSingleObject() is 1605 * what's normally used to acquire a mutex, and it can be used to 1606 * wait for a thread termination. 1607 * 1608 * The Windows NDIS API is implemented in terms of Windows kernel 1609 * primitives, and some of the object manipulation is duplicated in 1610 * NDIS. For example, NDIS has timers and events, which are actually 1611 * Windows kevents and ktimers. Now, you're supposed to only use the 1612 * NDIS variants of these objects within the confines of the NDIS API, 1613 * but there are some naughty developers out there who will use 1614 * KeWaitForSingleObject() on NDIS timer and event objects, so we 1615 * have to support that as well. Conseqently, our NDIS timer and event 1616 * code has to be closely tied into our ntoskrnl timer and event code, 1617 * just as it is in Windows. 1618 * 1619 * KeWaitForSingleObject() may do different things for different kinds 1620 * of objects: 1621 * 1622 * - For events, we check if the event has been signalled. If the 1623 * event is already in the signalled state, we just return immediately, 1624 * otherwise we wait for it to be set to the signalled state by someone 1625 * else calling KeSetEvent(). Events can be either synchronization or 1626 * notification events. 1627 * 1628 * - For timers, if the timer has already fired and the timer is in 1629 * the signalled state, we just return, otherwise we wait on the 1630 * timer. Unlike an event, timers get signalled automatically when 1631 * they expire rather than someone having to trip them manually. 1632 * Timers initialized with KeInitializeTimer() are always notification 1633 * events: KeInitializeTimerEx() lets you initialize a timer as 1634 * either a notification or synchronization event. 1635 * 1636 * - For mutexes, we try to acquire the mutex and if we can't, we wait 1637 * on the mutex until it's available and then grab it. When a mutex is 1638 * released, it enters the signalled state, which wakes up one of the 1639 * threads waiting to acquire it. Mutexes are always synchronization 1640 * events. 1641 * 1642 * - For threads, the only thing we do is wait until the thread object 1643 * enters a signalled state, which occurs when the thread terminates. 1644 * Threads are always notification events. 1645 * 1646 * A notification event wakes up all threads waiting on an object. A 1647 * synchronization event wakes up just one. Also, a synchronization event 1648 * is auto-clearing, which means we automatically set the event back to 1649 * the non-signalled state once the wakeup is done. 1650 */ 1651 1652uint32_t 1653KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode, 1654 uint8_t alertable, int64_t *duetime) 1655{ 1656 wait_block w; 1657 struct thread *td = curthread; 1658 struct timeval tv; 1659 int error = 0; 1660 uint64_t curtime; 1661 wb_ext we; 1662 nt_dispatch_header *obj; 1663 1664 obj = arg; 1665 1666 if (obj == NULL) 1667 return (STATUS_INVALID_PARAMETER); 1668 1669 mtx_lock(&ntoskrnl_dispatchlock); 1670 1671 cv_init(&we.we_cv, "KeWFS"); 1672 we.we_td = td; 1673 1674 /* 1675 * Check to see if this object is already signalled, 1676 * and just return without waiting if it is. 1677 */ 1678 if (ntoskrnl_is_signalled(obj, td) == TRUE) { 1679 /* Sanity check the signal state value. */ 1680 if (obj->dh_sigstate != INT32_MIN) { 1681 ntoskrnl_satisfy_wait(obj, curthread); 1682 mtx_unlock(&ntoskrnl_dispatchlock); 1683 return (STATUS_SUCCESS); 1684 } else { 1685 /* 1686 * There's a limit to how many times we can 1687 * recursively acquire a mutant. If we hit 1688 * the limit, something is very wrong. 1689 */ 1690 if (obj->dh_type == DISP_TYPE_MUTANT) { 1691 mtx_unlock(&ntoskrnl_dispatchlock); 1692 panic("mutant limit exceeded"); 1693 } 1694 } 1695 } 1696 1697 bzero((char *)&w, sizeof(wait_block)); 1698 w.wb_object = obj; 1699 w.wb_ext = &we; 1700 w.wb_waittype = WAITTYPE_ANY; 1701 w.wb_next = &w; 1702 w.wb_waitkey = 0; 1703 w.wb_awakened = FALSE; 1704 w.wb_oldpri = td->td_priority; 1705 1706 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist)); 1707 1708 /* 1709 * The timeout value is specified in 100 nanosecond units 1710 * and can be a positive or negative number. If it's positive, 1711 * then the duetime is absolute, and we need to convert it 1712 * to an absolute offset relative to now in order to use it. 1713 * If it's negative, then the duetime is relative and we 1714 * just have to convert the units. 1715 */ 1716 1717 if (duetime != NULL) { 1718 if (*duetime < 0) { 1719 tv.tv_sec = - (*duetime) / 10000000; 1720 tv.tv_usec = (- (*duetime) / 10) - 1721 (tv.tv_sec * 1000000); 1722 } else { 1723 ntoskrnl_time(&curtime); 1724 if (*duetime < curtime) 1725 tv.tv_sec = tv.tv_usec = 0; 1726 else { 1727 tv.tv_sec = ((*duetime) - curtime) / 10000000; 1728 tv.tv_usec = ((*duetime) - curtime) / 10 - 1729 (tv.tv_sec * 1000000); 1730 } 1731 } 1732 } 1733 1734 if (duetime == NULL) 1735 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock); 1736 else 1737 error = cv_timedwait(&we.we_cv, 1738 &ntoskrnl_dispatchlock, tvtohz(&tv)); 1739 1740 RemoveEntryList(&w.wb_waitlist); 1741 1742 cv_destroy(&we.we_cv); 1743 1744 /* We timed out. Leave the object alone and return status. */ 1745 1746 if (error == EWOULDBLOCK) { 1747 mtx_unlock(&ntoskrnl_dispatchlock); 1748 return (STATUS_TIMEOUT); 1749 } 1750 1751 mtx_unlock(&ntoskrnl_dispatchlock); 1752 1753 return (STATUS_SUCCESS); 1754/* 1755 return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason, 1756 mode, alertable, duetime, &w)); 1757*/ 1758} 1759 1760static uint32_t 1761KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype, 1762 uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime, 1763 wait_block *wb_array) 1764{ 1765 struct thread *td = curthread; 1766 wait_block *whead, *w; 1767 wait_block _wb_array[MAX_WAIT_OBJECTS]; 1768 nt_dispatch_header *cur; 1769 struct timeval tv; 1770 int i, wcnt = 0, error = 0; 1771 uint64_t curtime; 1772 struct timespec t1, t2; 1773 uint32_t status = STATUS_SUCCESS; 1774 wb_ext we; 1775 1776 if (cnt > MAX_WAIT_OBJECTS) 1777 return (STATUS_INVALID_PARAMETER); 1778 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL) 1779 return (STATUS_INVALID_PARAMETER); 1780 1781 mtx_lock(&ntoskrnl_dispatchlock); 1782 1783 cv_init(&we.we_cv, "KeWFM"); 1784 we.we_td = td; 1785 1786 if (wb_array == NULL) 1787 whead = _wb_array; 1788 else 1789 whead = wb_array; 1790 1791 bzero((char *)whead, sizeof(wait_block) * cnt); 1792 1793 /* First pass: see if we can satisfy any waits immediately. */ 1794 1795 wcnt = 0; 1796 w = whead; 1797 1798 for (i = 0; i < cnt; i++) { 1799 InsertTailList((&obj[i]->dh_waitlisthead), 1800 (&w->wb_waitlist)); 1801 w->wb_ext = &we; 1802 w->wb_object = obj[i]; 1803 w->wb_waittype = wtype; 1804 w->wb_waitkey = i; 1805 w->wb_awakened = FALSE; 1806 w->wb_oldpri = td->td_priority; 1807 w->wb_next = w + 1; 1808 w++; 1809 wcnt++; 1810 if (ntoskrnl_is_signalled(obj[i], td)) { 1811 /* 1812 * There's a limit to how many times 1813 * we can recursively acquire a mutant. 1814 * If we hit the limit, something 1815 * is very wrong. 1816 */ 1817 if (obj[i]->dh_sigstate == INT32_MIN && 1818 obj[i]->dh_type == DISP_TYPE_MUTANT) { 1819 mtx_unlock(&ntoskrnl_dispatchlock); 1820 panic("mutant limit exceeded"); 1821 } 1822 1823 /* 1824 * If this is a WAITTYPE_ANY wait, then 1825 * satisfy the waited object and exit 1826 * right now. 1827 */ 1828 1829 if (wtype == WAITTYPE_ANY) { 1830 ntoskrnl_satisfy_wait(obj[i], td); 1831 status = STATUS_WAIT_0 + i; 1832 goto wait_done; 1833 } else { 1834 w--; 1835 wcnt--; 1836 w->wb_object = NULL; 1837 RemoveEntryList(&w->wb_waitlist); 1838 } 1839 } 1840 } 1841 1842 /* 1843 * If this is a WAITTYPE_ALL wait and all objects are 1844 * already signalled, satisfy the waits and exit now. 1845 */ 1846 1847 if (wtype == WAITTYPE_ALL && wcnt == 0) { 1848 for (i = 0; i < cnt; i++) 1849 ntoskrnl_satisfy_wait(obj[i], td); 1850 status = STATUS_SUCCESS; 1851 goto wait_done; 1852 } 1853 1854 /* 1855 * Create a circular waitblock list. The waitcount 1856 * must always be non-zero when we get here. 1857 */ 1858 1859 (w - 1)->wb_next = whead; 1860 1861 /* Wait on any objects that aren't yet signalled. */ 1862 1863 /* Calculate timeout, if any. */ 1864 1865 if (duetime != NULL) { 1866 if (*duetime < 0) { 1867 tv.tv_sec = - (*duetime) / 10000000; 1868 tv.tv_usec = (- (*duetime) / 10) - 1869 (tv.tv_sec * 1000000); 1870 } else { 1871 ntoskrnl_time(&curtime); 1872 if (*duetime < curtime) 1873 tv.tv_sec = tv.tv_usec = 0; 1874 else { 1875 tv.tv_sec = ((*duetime) - curtime) / 10000000; 1876 tv.tv_usec = ((*duetime) - curtime) / 10 - 1877 (tv.tv_sec * 1000000); 1878 } 1879 } 1880 } 1881 1882 while (wcnt) { 1883 nanotime(&t1); 1884 1885 if (duetime == NULL) 1886 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock); 1887 else 1888 error = cv_timedwait(&we.we_cv, 1889 &ntoskrnl_dispatchlock, tvtohz(&tv)); 1890 1891 /* Wait with timeout expired. */ 1892 1893 if (error) { 1894 status = STATUS_TIMEOUT; 1895 goto wait_done; 1896 } 1897 1898 nanotime(&t2); 1899 1900 /* See what's been signalled. */ 1901 1902 w = whead; 1903 do { 1904 cur = w->wb_object; 1905 if (ntoskrnl_is_signalled(cur, td) == TRUE || 1906 w->wb_awakened == TRUE) { 1907 /* Sanity check the signal state value. */ 1908 if (cur->dh_sigstate == INT32_MIN && 1909 cur->dh_type == DISP_TYPE_MUTANT) { 1910 mtx_unlock(&ntoskrnl_dispatchlock); 1911 panic("mutant limit exceeded"); 1912 } 1913 wcnt--; 1914 if (wtype == WAITTYPE_ANY) { 1915 status = w->wb_waitkey & 1916 STATUS_WAIT_0; 1917 goto wait_done; 1918 } 1919 } 1920 w = w->wb_next; 1921 } while (w != whead); 1922 1923 /* 1924 * If all objects have been signalled, or if this 1925 * is a WAITTYPE_ANY wait and we were woke up by 1926 * someone, we can bail. 1927 */ 1928 1929 if (wcnt == 0) { 1930 status = STATUS_SUCCESS; 1931 goto wait_done; 1932 } 1933 1934 /* 1935 * If this is WAITTYPE_ALL wait, and there's still 1936 * objects that haven't been signalled, deduct the 1937 * time that's elapsed so far from the timeout and 1938 * wait again (or continue waiting indefinitely if 1939 * there's no timeout). 1940 */ 1941 1942 if (duetime != NULL) { 1943 tv.tv_sec -= (t2.tv_sec - t1.tv_sec); 1944 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000; 1945 } 1946 } 1947 1948 1949wait_done: 1950 1951 cv_destroy(&we.we_cv); 1952 1953 for (i = 0; i < cnt; i++) { 1954 if (whead[i].wb_object != NULL) 1955 RemoveEntryList(&whead[i].wb_waitlist); 1956 1957 } 1958 mtx_unlock(&ntoskrnl_dispatchlock); 1959 1960 return (status); 1961} 1962 1963static void 1964WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val) 1965{ 1966 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); 1967} 1968 1969static uint16_t 1970READ_REGISTER_USHORT(reg) 1971 uint16_t *reg; 1972{ 1973 return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); 1974} 1975 1976static void 1977WRITE_REGISTER_ULONG(reg, val) 1978 uint32_t *reg; 1979 uint32_t val; 1980{ 1981 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); 1982} 1983 1984static uint32_t 1985READ_REGISTER_ULONG(reg) 1986 uint32_t *reg; 1987{ 1988 return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); 1989} 1990 1991static uint8_t 1992READ_REGISTER_UCHAR(uint8_t *reg) 1993{ 1994 return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg)); 1995} 1996 1997static void 1998WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val) 1999{ 2000 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val); 2001} 2002 2003static int64_t 2004_allmul(a, b) 2005 int64_t a; 2006 int64_t b; 2007{ 2008 return (a * b); 2009} 2010 2011static int64_t 2012_alldiv(a, b) 2013 int64_t a; 2014 int64_t b; 2015{ 2016 return (a / b); 2017} 2018 2019static int64_t 2020_allrem(a, b) 2021 int64_t a; 2022 int64_t b; 2023{ 2024 return (a % b); 2025} 2026 2027static uint64_t 2028_aullmul(a, b) 2029 uint64_t a; 2030 uint64_t b; 2031{ 2032 return (a * b); 2033} 2034 2035static uint64_t 2036_aulldiv(a, b) 2037 uint64_t a; 2038 uint64_t b; 2039{ 2040 return (a / b); 2041} 2042 2043static uint64_t 2044_aullrem(a, b) 2045 uint64_t a; 2046 uint64_t b; 2047{ 2048 return (a % b); 2049} 2050 2051static int64_t 2052_allshl(int64_t a, uint8_t b) 2053{ 2054 return (a << b); 2055} 2056 2057static uint64_t 2058_aullshl(uint64_t a, uint8_t b) 2059{ 2060 return (a << b); 2061} 2062 2063static int64_t 2064_allshr(int64_t a, uint8_t b) 2065{ 2066 return (a >> b); 2067} 2068 2069static uint64_t 2070_aullshr(uint64_t a, uint8_t b) 2071{ 2072 return (a >> b); 2073} 2074 2075static slist_entry * 2076ntoskrnl_pushsl(head, entry) 2077 slist_header *head; 2078 slist_entry *entry; 2079{ 2080 slist_entry *oldhead; 2081 2082 oldhead = head->slh_list.slh_next; 2083 entry->sl_next = head->slh_list.slh_next; 2084 head->slh_list.slh_next = entry; 2085 head->slh_list.slh_depth++; 2086 head->slh_list.slh_seq++; 2087 2088 return (oldhead); 2089} 2090 2091static void 2092InitializeSListHead(head) 2093 slist_header *head; 2094{ 2095 memset(head, 0, sizeof(*head)); 2096} 2097 2098static slist_entry * 2099ntoskrnl_popsl(head) 2100 slist_header *head; 2101{ 2102 slist_entry *first; 2103 2104 first = head->slh_list.slh_next; 2105 if (first != NULL) { 2106 head->slh_list.slh_next = first->sl_next; 2107 head->slh_list.slh_depth--; 2108 head->slh_list.slh_seq++; 2109 } 2110 2111 return (first); 2112} 2113 2114/* 2115 * We need this to make lookaside lists work for amd64. 2116 * We pass a pointer to ExAllocatePoolWithTag() the lookaside 2117 * list structure. For amd64 to work right, this has to be a 2118 * pointer to the wrapped version of the routine, not the 2119 * original. Letting the Windows driver invoke the original 2120 * function directly will result in a convention calling 2121 * mismatch and a pretty crash. On x86, this effectively 2122 * becomes a no-op since ipt_func and ipt_wrap are the same. 2123 */ 2124 2125static funcptr 2126ntoskrnl_findwrap(func) 2127 funcptr func; 2128{ 2129 image_patch_table *patch; 2130 2131 patch = ntoskrnl_functbl; 2132 while (patch->ipt_func != NULL) { 2133 if ((funcptr)patch->ipt_func == func) 2134 return ((funcptr)patch->ipt_wrap); 2135 patch++; 2136 } 2137 2138 return (NULL); 2139} 2140 2141static void 2142ExInitializePagedLookasideList(paged_lookaside_list *lookaside, 2143 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc, 2144 uint32_t flags, size_t size, uint32_t tag, uint16_t depth) 2145{ 2146 bzero((char *)lookaside, sizeof(paged_lookaside_list)); 2147 2148 if (size < sizeof(slist_entry)) 2149 lookaside->nll_l.gl_size = sizeof(slist_entry); 2150 else 2151 lookaside->nll_l.gl_size = size; 2152 lookaside->nll_l.gl_tag = tag; 2153 if (allocfunc == NULL) 2154 lookaside->nll_l.gl_allocfunc = 2155 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag); 2156 else 2157 lookaside->nll_l.gl_allocfunc = allocfunc; 2158 2159 if (freefunc == NULL) 2160 lookaside->nll_l.gl_freefunc = 2161 ntoskrnl_findwrap((funcptr)ExFreePool); 2162 else 2163 lookaside->nll_l.gl_freefunc = freefunc; 2164 2165#ifdef __i386__ 2166 KeInitializeSpinLock(&lookaside->nll_obsoletelock); 2167#endif 2168 2169 lookaside->nll_l.gl_type = NonPagedPool; 2170 lookaside->nll_l.gl_depth = depth; 2171 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; 2172} 2173 2174static void 2175ExDeletePagedLookasideList(lookaside) 2176 paged_lookaside_list *lookaside; 2177{ 2178 void *buf; 2179 void (*freefunc)(void *); 2180 2181 freefunc = lookaside->nll_l.gl_freefunc; 2182 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) 2183 MSCALL1(freefunc, buf); 2184} 2185 2186static void 2187ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside, 2188 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc, 2189 uint32_t flags, size_t size, uint32_t tag, uint16_t depth) 2190{ 2191 bzero((char *)lookaside, sizeof(npaged_lookaside_list)); 2192 2193 if (size < sizeof(slist_entry)) 2194 lookaside->nll_l.gl_size = sizeof(slist_entry); 2195 else 2196 lookaside->nll_l.gl_size = size; 2197 lookaside->nll_l.gl_tag = tag; 2198 if (allocfunc == NULL) 2199 lookaside->nll_l.gl_allocfunc = 2200 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag); 2201 else 2202 lookaside->nll_l.gl_allocfunc = allocfunc; 2203 2204 if (freefunc == NULL) 2205 lookaside->nll_l.gl_freefunc = 2206 ntoskrnl_findwrap((funcptr)ExFreePool); 2207 else 2208 lookaside->nll_l.gl_freefunc = freefunc; 2209 2210#ifdef __i386__ 2211 KeInitializeSpinLock(&lookaside->nll_obsoletelock); 2212#endif 2213 2214 lookaside->nll_l.gl_type = NonPagedPool; 2215 lookaside->nll_l.gl_depth = depth; 2216 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH; 2217} 2218 2219static void 2220ExDeleteNPagedLookasideList(lookaside) 2221 npaged_lookaside_list *lookaside; 2222{ 2223 void *buf; 2224 void (*freefunc)(void *); 2225 2226 freefunc = lookaside->nll_l.gl_freefunc; 2227 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL) 2228 MSCALL1(freefunc, buf); 2229} 2230 2231slist_entry * 2232InterlockedPushEntrySList(head, entry) 2233 slist_header *head; 2234 slist_entry *entry; 2235{ 2236 slist_entry *oldhead; 2237 2238 mtx_lock_spin(&ntoskrnl_interlock); 2239 oldhead = ntoskrnl_pushsl(head, entry); 2240 mtx_unlock_spin(&ntoskrnl_interlock); 2241 2242 return (oldhead); 2243} 2244 2245slist_entry * 2246InterlockedPopEntrySList(head) 2247 slist_header *head; 2248{ 2249 slist_entry *first; 2250 2251 mtx_lock_spin(&ntoskrnl_interlock); 2252 first = ntoskrnl_popsl(head); 2253 mtx_unlock_spin(&ntoskrnl_interlock); 2254 2255 return (first); 2256} 2257 2258static slist_entry * 2259ExInterlockedPushEntrySList(head, entry, lock) 2260 slist_header *head; 2261 slist_entry *entry; 2262 kspin_lock *lock; 2263{ 2264 return (InterlockedPushEntrySList(head, entry)); 2265} 2266 2267static slist_entry * 2268ExInterlockedPopEntrySList(head, lock) 2269 slist_header *head; 2270 kspin_lock *lock; 2271{ 2272 return (InterlockedPopEntrySList(head)); 2273} 2274 2275uint16_t 2276ExQueryDepthSList(head) 2277 slist_header *head; 2278{ 2279 uint16_t depth; 2280 2281 mtx_lock_spin(&ntoskrnl_interlock); 2282 depth = head->slh_list.slh_depth; 2283 mtx_unlock_spin(&ntoskrnl_interlock); 2284 2285 return (depth); 2286} 2287 2288void 2289KeInitializeSpinLock(lock) 2290 kspin_lock *lock; 2291{ 2292 *lock = 0; 2293} 2294 2295#ifdef __i386__ 2296void 2297KefAcquireSpinLockAtDpcLevel(lock) 2298 kspin_lock *lock; 2299{ 2300#ifdef NTOSKRNL_DEBUG_SPINLOCKS 2301 int i = 0; 2302#endif 2303 2304 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) { 2305 /* sit and spin */; 2306#ifdef NTOSKRNL_DEBUG_SPINLOCKS 2307 i++; 2308 if (i > 200000000) 2309 panic("DEADLOCK!"); 2310#endif 2311 } 2312} 2313 2314void 2315KefReleaseSpinLockFromDpcLevel(lock) 2316 kspin_lock *lock; 2317{ 2318 atomic_store_rel_int((volatile u_int *)lock, 0); 2319} 2320 2321uint8_t 2322KeAcquireSpinLockRaiseToDpc(kspin_lock *lock) 2323{ 2324 uint8_t oldirql; 2325 2326 if (KeGetCurrentIrql() > DISPATCH_LEVEL) 2327 panic("IRQL_NOT_LESS_THAN_OR_EQUAL"); 2328 2329 KeRaiseIrql(DISPATCH_LEVEL, &oldirql); 2330 KeAcquireSpinLockAtDpcLevel(lock); 2331 2332 return (oldirql); 2333} 2334#else 2335void 2336KeAcquireSpinLockAtDpcLevel(kspin_lock *lock) 2337{ 2338 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) 2339 /* sit and spin */; 2340} 2341 2342void 2343KeReleaseSpinLockFromDpcLevel(kspin_lock *lock) 2344{ 2345 atomic_store_rel_int((volatile u_int *)lock, 0); 2346} 2347#endif /* __i386__ */ 2348 2349uintptr_t 2350InterlockedExchange(dst, val) 2351 volatile uint32_t *dst; 2352 uintptr_t val; 2353{ 2354 uintptr_t r; 2355 2356 mtx_lock_spin(&ntoskrnl_interlock); 2357 r = *dst; 2358 *dst = val; 2359 mtx_unlock_spin(&ntoskrnl_interlock); 2360 2361 return (r); 2362} 2363 2364static uint32_t 2365InterlockedIncrement(addend) 2366 volatile uint32_t *addend; 2367{ 2368 atomic_add_long((volatile u_long *)addend, 1); 2369 return (*addend); 2370} 2371 2372static uint32_t 2373InterlockedDecrement(addend) 2374 volatile uint32_t *addend; 2375{ 2376 atomic_subtract_long((volatile u_long *)addend, 1); 2377 return (*addend); 2378} 2379 2380static void 2381ExInterlockedAddLargeStatistic(addend, inc) 2382 uint64_t *addend; 2383 uint32_t inc; 2384{ 2385 mtx_lock_spin(&ntoskrnl_interlock); 2386 *addend += inc; 2387 mtx_unlock_spin(&ntoskrnl_interlock); 2388}; 2389 2390mdl * 2391IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf, 2392 uint8_t chargequota, irp *iopkt) 2393{ 2394 mdl *m; 2395 int zone = 0; 2396 2397 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE) 2398 m = ExAllocatePoolWithTag(NonPagedPool, 2399 MmSizeOfMdl(vaddr, len), 0); 2400 else { 2401 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO); 2402 zone++; 2403 } 2404 2405 if (m == NULL) 2406 return (NULL); 2407 2408 MmInitializeMdl(m, vaddr, len); 2409 2410 /* 2411 * MmInitializMdl() clears the flags field, so we 2412 * have to set this here. If the MDL came from the 2413 * MDL UMA zone, tag it so we can release it to 2414 * the right place later. 2415 */ 2416 if (zone) 2417 m->mdl_flags = MDL_ZONE_ALLOCED; 2418 2419 if (iopkt != NULL) { 2420 if (secondarybuf == TRUE) { 2421 mdl *last; 2422 last = iopkt->irp_mdl; 2423 while (last->mdl_next != NULL) 2424 last = last->mdl_next; 2425 last->mdl_next = m; 2426 } else { 2427 if (iopkt->irp_mdl != NULL) 2428 panic("leaking an MDL in IoAllocateMdl()"); 2429 iopkt->irp_mdl = m; 2430 } 2431 } 2432 2433 return (m); 2434} 2435 2436void 2437IoFreeMdl(m) 2438 mdl *m; 2439{ 2440 if (m == NULL) 2441 return; 2442 2443 if (m->mdl_flags & MDL_ZONE_ALLOCED) 2444 uma_zfree(mdl_zone, m); 2445 else 2446 ExFreePool(m); 2447} 2448 2449static void * 2450MmAllocateContiguousMemory(size, highest) 2451 uint32_t size; 2452 uint64_t highest; 2453{ 2454 void *addr; 2455 size_t pagelength = roundup(size, PAGE_SIZE); 2456 2457 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0); 2458 2459 return (addr); 2460} 2461 2462static void * 2463MmAllocateContiguousMemorySpecifyCache(size, lowest, highest, 2464 boundary, cachetype) 2465 uint32_t size; 2466 uint64_t lowest; 2467 uint64_t highest; 2468 uint64_t boundary; 2469 enum nt_caching_type cachetype; 2470{ 2471 vm_memattr_t memattr; 2472 void *ret; 2473 2474 switch (cachetype) { 2475 case MmNonCached: 2476 memattr = VM_MEMATTR_UNCACHEABLE; 2477 break; 2478 case MmWriteCombined: 2479 memattr = VM_MEMATTR_WRITE_COMBINING; 2480 break; 2481 case MmNonCachedUnordered: 2482 memattr = VM_MEMATTR_UNCACHEABLE; 2483 break; 2484 case MmCached: 2485 case MmHardwareCoherentCached: 2486 case MmUSWCCached: 2487 default: 2488 memattr = VM_MEMATTR_DEFAULT; 2489 break; 2490 } 2491
|
2493 lowest, highest, PAGE_SIZE, boundary, memattr); 2494 if (ret != NULL) 2495 malloc_type_allocated(M_DEVBUF, round_page(size)); 2496 return (ret); 2497} 2498 2499static void 2500MmFreeContiguousMemory(base) 2501 void *base; 2502{ 2503 ExFreePool(base); 2504} 2505 2506static void 2507MmFreeContiguousMemorySpecifyCache(base, size, cachetype) 2508 void *base; 2509 uint32_t size; 2510 enum nt_caching_type cachetype; 2511{ 2512 contigfree(base, size, M_DEVBUF); 2513} 2514 2515static uint32_t 2516MmSizeOfMdl(vaddr, len) 2517 void *vaddr; 2518 size_t len; 2519{ 2520 uint32_t l; 2521 2522 l = sizeof(struct mdl) + 2523 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len)); 2524 2525 return (l); 2526} 2527 2528/* 2529 * The Microsoft documentation says this routine fills in the 2530 * page array of an MDL with the _physical_ page addresses that 2531 * comprise the buffer, but we don't really want to do that here. 2532 * Instead, we just fill in the page array with the kernel virtual 2533 * addresses of the buffers. 2534 */ 2535void 2536MmBuildMdlForNonPagedPool(m) 2537 mdl *m; 2538{ 2539 vm_offset_t *mdl_pages; 2540 int pagecnt, i; 2541 2542 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount); 2543 2544 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *)) 2545 panic("not enough pages in MDL to describe buffer"); 2546 2547 mdl_pages = MmGetMdlPfnArray(m); 2548 2549 for (i = 0; i < pagecnt; i++) 2550 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE); 2551 2552 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL; 2553 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m); 2554} 2555 2556static void * 2557MmMapLockedPages(mdl *buf, uint8_t accessmode) 2558{ 2559 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA; 2560 return (MmGetMdlVirtualAddress(buf)); 2561} 2562 2563static void * 2564MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype, 2565 void *vaddr, uint32_t bugcheck, uint32_t prio) 2566{ 2567 return (MmMapLockedPages(buf, accessmode)); 2568} 2569 2570static void 2571MmUnmapLockedPages(vaddr, buf) 2572 void *vaddr; 2573 mdl *buf; 2574{ 2575 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA; 2576} 2577 2578/* 2579 * This function has a problem in that it will break if you 2580 * compile this module without PAE and try to use it on a PAE 2581 * kernel. Unfortunately, there's no way around this at the 2582 * moment. It's slightly less broken that using pmap_kextract(). 2583 * You'd think the virtual memory subsystem would help us out 2584 * here, but it doesn't. 2585 */ 2586 2587static uint64_t 2588MmGetPhysicalAddress(void *base) 2589{ 2590 return (pmap_extract(kernel_map->pmap, (vm_offset_t)base)); 2591} 2592 2593void * 2594MmGetSystemRoutineAddress(ustr) 2595 unicode_string *ustr; 2596{ 2597 ansi_string astr; 2598 2599 if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE)) 2600 return (NULL); 2601 return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf)); 2602} 2603 2604uint8_t 2605MmIsAddressValid(vaddr) 2606 void *vaddr; 2607{ 2608 if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr)) 2609 return (TRUE); 2610 2611 return (FALSE); 2612} 2613 2614void * 2615MmMapIoSpace(paddr, len, cachetype) 2616 uint64_t paddr; 2617 uint32_t len; 2618 uint32_t cachetype; 2619{ 2620 devclass_t nexus_class; 2621 device_t *nexus_devs, devp; 2622 int nexus_count = 0; 2623 device_t matching_dev = NULL; 2624 struct resource *res; 2625 int i; 2626 vm_offset_t v; 2627 2628 /* There will always be at least one nexus. */ 2629 2630 nexus_class = devclass_find("nexus"); 2631 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count); 2632 2633 for (i = 0; i < nexus_count; i++) { 2634 devp = nexus_devs[i]; 2635 matching_dev = ntoskrnl_finddev(devp, paddr, &res); 2636 if (matching_dev) 2637 break; 2638 } 2639 2640 free(nexus_devs, M_TEMP); 2641 2642 if (matching_dev == NULL) 2643 return (NULL); 2644 2645 v = (vm_offset_t)rman_get_virtual(res); 2646 if (paddr > rman_get_start(res)) 2647 v += paddr - rman_get_start(res); 2648 2649 return ((void *)v); 2650} 2651 2652void 2653MmUnmapIoSpace(vaddr, len) 2654 void *vaddr; 2655 size_t len; 2656{ 2657} 2658 2659 2660static device_t 2661ntoskrnl_finddev(dev, paddr, res) 2662 device_t dev; 2663 uint64_t paddr; 2664 struct resource **res; 2665{ 2666 device_t *children = NULL; 2667 device_t matching_dev; 2668 int childcnt; 2669 struct resource *r; 2670 struct resource_list *rl; 2671 struct resource_list_entry *rle; 2672 uint32_t flags; 2673 int i; 2674 2675 /* We only want devices that have been successfully probed. */ 2676 2677 if (device_is_alive(dev) == FALSE) 2678 return (NULL); 2679 2680 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); 2681 if (rl != NULL) { 2682 STAILQ_FOREACH(rle, rl, link) { 2683 r = rle->res; 2684 2685 if (r == NULL) 2686 continue; 2687 2688 flags = rman_get_flags(r); 2689 2690 if (rle->type == SYS_RES_MEMORY && 2691 paddr >= rman_get_start(r) && 2692 paddr <= rman_get_end(r)) { 2693 if (!(flags & RF_ACTIVE)) 2694 bus_activate_resource(dev, 2695 SYS_RES_MEMORY, 0, r); 2696 *res = r; 2697 return (dev); 2698 } 2699 } 2700 } 2701 2702 /* 2703 * If this device has children, do another 2704 * level of recursion to inspect them. 2705 */ 2706 2707 device_get_children(dev, &children, &childcnt); 2708 2709 for (i = 0; i < childcnt; i++) { 2710 matching_dev = ntoskrnl_finddev(children[i], paddr, res); 2711 if (matching_dev != NULL) { 2712 free(children, M_TEMP); 2713 return (matching_dev); 2714 } 2715 } 2716 2717 2718 /* Won't somebody please think of the children! */ 2719 2720 if (children != NULL) 2721 free(children, M_TEMP); 2722 2723 return (NULL); 2724} 2725 2726/* 2727 * Workitems are unlike DPCs, in that they run in a user-mode thread 2728 * context rather than at DISPATCH_LEVEL in kernel context. In our 2729 * case we run them in kernel context anyway. 2730 */ 2731static void 2732ntoskrnl_workitem_thread(arg) 2733 void *arg; 2734{ 2735 kdpc_queue *kq; 2736 list_entry *l; 2737 io_workitem *iw; 2738 uint8_t irql; 2739 2740 kq = arg; 2741 2742 InitializeListHead(&kq->kq_disp); 2743 kq->kq_td = curthread; 2744 kq->kq_exit = 0; 2745 KeInitializeSpinLock(&kq->kq_lock); 2746 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE); 2747 2748 while (1) { 2749 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL); 2750 2751 KeAcquireSpinLock(&kq->kq_lock, &irql); 2752 2753 if (kq->kq_exit) { 2754 kq->kq_exit = 0; 2755 KeReleaseSpinLock(&kq->kq_lock, irql); 2756 break; 2757 } 2758 2759 while (!IsListEmpty(&kq->kq_disp)) { 2760 l = RemoveHeadList(&kq->kq_disp); 2761 iw = CONTAINING_RECORD(l, 2762 io_workitem, iw_listentry); 2763 InitializeListHead((&iw->iw_listentry)); 2764 if (iw->iw_func == NULL) 2765 continue; 2766 KeReleaseSpinLock(&kq->kq_lock, irql); 2767 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx); 2768 KeAcquireSpinLock(&kq->kq_lock, &irql); 2769 } 2770 2771 KeReleaseSpinLock(&kq->kq_lock, irql); 2772 } 2773 2774 kproc_exit(0); 2775 return; /* notreached */ 2776} 2777 2778static ndis_status 2779RtlCharToInteger(src, base, val) 2780 const char *src; 2781 uint32_t base; 2782 uint32_t *val; 2783{ 2784 int negative = 0; 2785 uint32_t res; 2786 2787 if (!src || !val) 2788 return (STATUS_ACCESS_VIOLATION); 2789 while (*src != '\0' && *src <= ' ') 2790 src++; 2791 if (*src == '+') 2792 src++; 2793 else if (*src == '-') { 2794 src++; 2795 negative = 1; 2796 } 2797 if (base == 0) { 2798 base = 10; 2799 if (*src == '0') { 2800 src++; 2801 if (*src == 'b') { 2802 base = 2; 2803 src++; 2804 } else if (*src == 'o') { 2805 base = 8; 2806 src++; 2807 } else if (*src == 'x') { 2808 base = 16; 2809 src++; 2810 } 2811 } 2812 } else if (!(base == 2 || base == 8 || base == 10 || base == 16)) 2813 return (STATUS_INVALID_PARAMETER); 2814 2815 for (res = 0; *src; src++) { 2816 int v; 2817 if (isdigit(*src)) 2818 v = *src - '0'; 2819 else if (isxdigit(*src)) 2820 v = tolower(*src) - 'a' + 10; 2821 else 2822 v = base; 2823 if (v >= base) 2824 return (STATUS_INVALID_PARAMETER); 2825 res = res * base + v; 2826 } 2827 *val = negative ? -res : res; 2828 return (STATUS_SUCCESS); 2829} 2830 2831static void 2832ntoskrnl_destroy_workitem_threads(void) 2833{ 2834 kdpc_queue *kq; 2835 int i; 2836 2837 for (i = 0; i < WORKITEM_THREADS; i++) { 2838 kq = wq_queues + i; 2839 kq->kq_exit = 1; 2840 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); 2841 while (kq->kq_exit) 2842 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10); 2843 } 2844} 2845 2846io_workitem * 2847IoAllocateWorkItem(dobj) 2848 device_object *dobj; 2849{ 2850 io_workitem *iw; 2851 2852 iw = uma_zalloc(iw_zone, M_NOWAIT); 2853 if (iw == NULL) 2854 return (NULL); 2855 2856 InitializeListHead(&iw->iw_listentry); 2857 iw->iw_dobj = dobj; 2858 2859 mtx_lock(&ntoskrnl_dispatchlock); 2860 iw->iw_idx = wq_idx; 2861 WORKIDX_INC(wq_idx); 2862 mtx_unlock(&ntoskrnl_dispatchlock); 2863 2864 return (iw); 2865} 2866 2867void 2868IoFreeWorkItem(iw) 2869 io_workitem *iw; 2870{ 2871 uma_zfree(iw_zone, iw); 2872} 2873 2874void 2875IoQueueWorkItem(iw, iw_func, qtype, ctx) 2876 io_workitem *iw; 2877 io_workitem_func iw_func; 2878 uint32_t qtype; 2879 void *ctx; 2880{ 2881 kdpc_queue *kq; 2882 list_entry *l; 2883 io_workitem *cur; 2884 uint8_t irql; 2885 2886 kq = wq_queues + iw->iw_idx; 2887 2888 KeAcquireSpinLock(&kq->kq_lock, &irql); 2889 2890 /* 2891 * Traverse the list and make sure this workitem hasn't 2892 * already been inserted. Queuing the same workitem 2893 * twice will hose the list but good. 2894 */ 2895 2896 l = kq->kq_disp.nle_flink; 2897 while (l != &kq->kq_disp) { 2898 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry); 2899 if (cur == iw) { 2900 /* Already queued -- do nothing. */ 2901 KeReleaseSpinLock(&kq->kq_lock, irql); 2902 return; 2903 } 2904 l = l->nle_flink; 2905 } 2906 2907 iw->iw_func = iw_func; 2908 iw->iw_ctx = ctx; 2909 2910 InsertTailList((&kq->kq_disp), (&iw->iw_listentry)); 2911 KeReleaseSpinLock(&kq->kq_lock, irql); 2912 2913 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); 2914} 2915 2916static void 2917ntoskrnl_workitem(dobj, arg) 2918 device_object *dobj; 2919 void *arg; 2920{ 2921 io_workitem *iw; 2922 work_queue_item *w; 2923 work_item_func f; 2924 2925 iw = arg; 2926 w = (work_queue_item *)dobj; 2927 f = (work_item_func)w->wqi_func; 2928 uma_zfree(iw_zone, iw); 2929 MSCALL2(f, w, w->wqi_ctx); 2930} 2931 2932/* 2933 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft 2934 * warns that it's unsafe and to use IoQueueWorkItem() instead. The 2935 * problem with ExQueueWorkItem() is that it can't guard against 2936 * the condition where a driver submits a job to the work queue and 2937 * is then unloaded before the job is able to run. IoQueueWorkItem() 2938 * acquires a reference to the device's device_object via the 2939 * object manager and retains it until after the job has completed, 2940 * which prevents the driver from being unloaded before the job 2941 * runs. (We don't currently support this behavior, though hopefully 2942 * that will change once the object manager API is fleshed out a bit.) 2943 * 2944 * Having said all that, the ExQueueWorkItem() API remains, because 2945 * there are still other parts of Windows that use it, including 2946 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem(). 2947 * We fake up the ExQueueWorkItem() API on top of our implementation 2948 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively 2949 * for ExQueueWorkItem() jobs, and we pass a pointer to the work 2950 * queue item (provided by the caller) in to IoAllocateWorkItem() 2951 * instead of the device_object. We need to save this pointer so 2952 * we can apply a sanity check: as with the DPC queue and other 2953 * workitem queues, we can't allow the same work queue item to 2954 * be queued twice. If it's already pending, we silently return 2955 */ 2956 2957void 2958ExQueueWorkItem(w, qtype) 2959 work_queue_item *w; 2960 uint32_t qtype; 2961{ 2962 io_workitem *iw; 2963 io_workitem_func iwf; 2964 kdpc_queue *kq; 2965 list_entry *l; 2966 io_workitem *cur; 2967 uint8_t irql; 2968 2969 2970 /* 2971 * We need to do a special sanity test to make sure 2972 * the ExQueueWorkItem() API isn't used to queue 2973 * the same workitem twice. Rather than checking the 2974 * io_workitem pointer itself, we test the attached 2975 * device object, which is really a pointer to the 2976 * legacy work queue item structure. 2977 */ 2978 2979 kq = wq_queues + WORKITEM_LEGACY_THREAD; 2980 KeAcquireSpinLock(&kq->kq_lock, &irql); 2981 l = kq->kq_disp.nle_flink; 2982 while (l != &kq->kq_disp) { 2983 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry); 2984 if (cur->iw_dobj == (device_object *)w) { 2985 /* Already queued -- do nothing. */ 2986 KeReleaseSpinLock(&kq->kq_lock, irql); 2987 return; 2988 } 2989 l = l->nle_flink; 2990 } 2991 KeReleaseSpinLock(&kq->kq_lock, irql); 2992 2993 iw = IoAllocateWorkItem((device_object *)w); 2994 if (iw == NULL) 2995 return; 2996 2997 iw->iw_idx = WORKITEM_LEGACY_THREAD; 2998 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem); 2999 IoQueueWorkItem(iw, iwf, qtype, iw); 3000} 3001 3002static void 3003RtlZeroMemory(dst, len) 3004 void *dst; 3005 size_t len; 3006{ 3007 bzero(dst, len); 3008} 3009 3010static void 3011RtlSecureZeroMemory(dst, len) 3012 void *dst; 3013 size_t len; 3014{ 3015 memset(dst, 0, len); 3016} 3017 3018static void 3019RtlFillMemory(void *dst, size_t len, uint8_t c) 3020{ 3021 memset(dst, c, len); 3022} 3023 3024static void 3025RtlMoveMemory(dst, src, len) 3026 void *dst; 3027 const void *src; 3028 size_t len; 3029{ 3030 memmove(dst, src, len); 3031} 3032 3033static void 3034RtlCopyMemory(dst, src, len) 3035 void *dst; 3036 const void *src; 3037 size_t len; 3038{ 3039 bcopy(src, dst, len); 3040} 3041 3042static size_t 3043RtlCompareMemory(s1, s2, len) 3044 const void *s1; 3045 const void *s2; 3046 size_t len; 3047{ 3048 size_t i; 3049 uint8_t *m1, *m2; 3050 3051 m1 = __DECONST(char *, s1); 3052 m2 = __DECONST(char *, s2); 3053 3054 for (i = 0; i < len && m1[i] == m2[i]; i++); 3055 return (i); 3056} 3057 3058void 3059RtlInitAnsiString(dst, src) 3060 ansi_string *dst; 3061 char *src; 3062{ 3063 ansi_string *a; 3064 3065 a = dst; 3066 if (a == NULL) 3067 return; 3068 if (src == NULL) { 3069 a->as_len = a->as_maxlen = 0; 3070 a->as_buf = NULL; 3071 } else { 3072 a->as_buf = src; 3073 a->as_len = a->as_maxlen = strlen(src); 3074 } 3075} 3076 3077void 3078RtlInitUnicodeString(dst, src) 3079 unicode_string *dst; 3080 uint16_t *src; 3081{ 3082 unicode_string *u; 3083 int i; 3084 3085 u = dst; 3086 if (u == NULL) 3087 return; 3088 if (src == NULL) { 3089 u->us_len = u->us_maxlen = 0; 3090 u->us_buf = NULL; 3091 } else { 3092 i = 0; 3093 while(src[i] != 0) 3094 i++; 3095 u->us_buf = src; 3096 u->us_len = u->us_maxlen = i * 2; 3097 } 3098} 3099 3100ndis_status 3101RtlUnicodeStringToInteger(ustr, base, val) 3102 unicode_string *ustr; 3103 uint32_t base; 3104 uint32_t *val; 3105{ 3106 uint16_t *uchr; 3107 int len, neg = 0; 3108 char abuf[64]; 3109 char *astr; 3110 3111 uchr = ustr->us_buf; 3112 len = ustr->us_len; 3113 bzero(abuf, sizeof(abuf)); 3114 3115 if ((char)((*uchr) & 0xFF) == '-') { 3116 neg = 1; 3117 uchr++; 3118 len -= 2; 3119 } else if ((char)((*uchr) & 0xFF) == '+') { 3120 neg = 0; 3121 uchr++; 3122 len -= 2; 3123 } 3124 3125 if (base == 0) { 3126 if ((char)((*uchr) & 0xFF) == 'b') { 3127 base = 2; 3128 uchr++; 3129 len -= 2; 3130 } else if ((char)((*uchr) & 0xFF) == 'o') { 3131 base = 8; 3132 uchr++; 3133 len -= 2; 3134 } else if ((char)((*uchr) & 0xFF) == 'x') { 3135 base = 16; 3136 uchr++; 3137 len -= 2; 3138 } else 3139 base = 10; 3140 } 3141 3142 astr = abuf; 3143 if (neg) { 3144 strcpy(astr, "-"); 3145 astr++; 3146 } 3147 3148 ntoskrnl_unicode_to_ascii(uchr, astr, len); 3149 *val = strtoul(abuf, NULL, base); 3150 3151 return (STATUS_SUCCESS); 3152} 3153 3154void 3155RtlFreeUnicodeString(ustr) 3156 unicode_string *ustr; 3157{ 3158 if (ustr->us_buf == NULL) 3159 return; 3160 ExFreePool(ustr->us_buf); 3161 ustr->us_buf = NULL; 3162} 3163 3164void 3165RtlFreeAnsiString(astr) 3166 ansi_string *astr; 3167{ 3168 if (astr->as_buf == NULL) 3169 return; 3170 ExFreePool(astr->as_buf); 3171 astr->as_buf = NULL; 3172} 3173 3174static int 3175atoi(str) 3176 const char *str; 3177{ 3178 return (int)strtol(str, (char **)NULL, 10); 3179} 3180 3181static long 3182atol(str) 3183 const char *str; 3184{ 3185 return strtol(str, (char **)NULL, 10); 3186} 3187 3188static int 3189rand(void) 3190{ 3191 struct timeval tv; 3192 3193 microtime(&tv); 3194 srandom(tv.tv_usec); 3195 return ((int)random()); 3196} 3197 3198static void 3199srand(seed) 3200 unsigned int seed; 3201{ 3202 srandom(seed); 3203} 3204 3205static uint8_t 3206IoIsWdmVersionAvailable(uint8_t major, uint8_t minor) 3207{ 3208 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP) 3209 return (TRUE); 3210 return (FALSE); 3211} 3212 3213static int32_t 3214IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type, 3215 uint32_t mask, void **key) 3216{ 3217 return (NDIS_STATUS_INVALID_DEVICE_REQUEST); 3218} 3219 3220static ndis_status 3221IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj) 3222 unicode_string *name; 3223 uint32_t reqaccess; 3224 void *fileobj; 3225 device_object *devobj; 3226{ 3227 return (STATUS_SUCCESS); 3228} 3229 3230static ndis_status 3231IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen) 3232 device_object *devobj; 3233 uint32_t regprop; 3234 uint32_t buflen; 3235 void *prop; 3236 uint32_t *reslen; 3237{ 3238 driver_object *drv; 3239 uint16_t **name; 3240 3241 drv = devobj->do_drvobj; 3242 3243 switch (regprop) { 3244 case DEVPROP_DRIVER_KEYNAME: 3245 name = prop; 3246 *name = drv->dro_drivername.us_buf; 3247 *reslen = drv->dro_drivername.us_len; 3248 break; 3249 default: 3250 return (STATUS_INVALID_PARAMETER_2); 3251 break; 3252 } 3253 3254 return (STATUS_SUCCESS); 3255} 3256 3257static void 3258KeInitializeMutex(kmutex, level) 3259 kmutant *kmutex; 3260 uint32_t level; 3261{ 3262 InitializeListHead((&kmutex->km_header.dh_waitlisthead)); 3263 kmutex->km_abandoned = FALSE; 3264 kmutex->km_apcdisable = 1; 3265 kmutex->km_header.dh_sigstate = 1; 3266 kmutex->km_header.dh_type = DISP_TYPE_MUTANT; 3267 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t); 3268 kmutex->km_ownerthread = NULL; 3269} 3270 3271static uint32_t 3272KeReleaseMutex(kmutant *kmutex, uint8_t kwait) 3273{ 3274 uint32_t prevstate; 3275 3276 mtx_lock(&ntoskrnl_dispatchlock); 3277 prevstate = kmutex->km_header.dh_sigstate; 3278 if (kmutex->km_ownerthread != curthread) { 3279 mtx_unlock(&ntoskrnl_dispatchlock); 3280 return (STATUS_MUTANT_NOT_OWNED); 3281 } 3282 3283 kmutex->km_header.dh_sigstate++; 3284 kmutex->km_abandoned = FALSE; 3285 3286 if (kmutex->km_header.dh_sigstate == 1) { 3287 kmutex->km_ownerthread = NULL; 3288 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT); 3289 } 3290 3291 mtx_unlock(&ntoskrnl_dispatchlock); 3292 3293 return (prevstate); 3294} 3295 3296static uint32_t 3297KeReadStateMutex(kmutex) 3298 kmutant *kmutex; 3299{ 3300 return (kmutex->km_header.dh_sigstate); 3301} 3302 3303void 3304KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state) 3305{ 3306 InitializeListHead((&kevent->k_header.dh_waitlisthead)); 3307 kevent->k_header.dh_sigstate = state; 3308 if (type == EVENT_TYPE_NOTIFY) 3309 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT; 3310 else 3311 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT; 3312 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t); 3313} 3314 3315uint32_t 3316KeResetEvent(kevent) 3317 nt_kevent *kevent; 3318{ 3319 uint32_t prevstate; 3320 3321 mtx_lock(&ntoskrnl_dispatchlock); 3322 prevstate = kevent->k_header.dh_sigstate; 3323 kevent->k_header.dh_sigstate = FALSE; 3324 mtx_unlock(&ntoskrnl_dispatchlock); 3325 3326 return (prevstate); 3327} 3328 3329uint32_t 3330KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait) 3331{ 3332 uint32_t prevstate; 3333 wait_block *w; 3334 nt_dispatch_header *dh; 3335 struct thread *td; 3336 wb_ext *we; 3337 3338 mtx_lock(&ntoskrnl_dispatchlock); 3339 prevstate = kevent->k_header.dh_sigstate; 3340 dh = &kevent->k_header; 3341 3342 if (IsListEmpty(&dh->dh_waitlisthead)) 3343 /* 3344 * If there's nobody in the waitlist, just set 3345 * the state to signalled. 3346 */ 3347 dh->dh_sigstate = 1; 3348 else { 3349 /* 3350 * Get the first waiter. If this is a synchronization 3351 * event, just wake up that one thread (don't bother 3352 * setting the state to signalled since we're supposed 3353 * to automatically clear synchronization events anyway). 3354 * 3355 * If it's a notification event, or the first 3356 * waiter is doing a WAITTYPE_ALL wait, go through 3357 * the full wait satisfaction process. 3358 */ 3359 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink, 3360 wait_block, wb_waitlist); 3361 we = w->wb_ext; 3362 td = we->we_td; 3363 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT || 3364 w->wb_waittype == WAITTYPE_ALL) { 3365 if (prevstate == 0) { 3366 dh->dh_sigstate = 1; 3367 ntoskrnl_waittest(dh, increment); 3368 } 3369 } else { 3370 w->wb_awakened |= TRUE; 3371 cv_broadcastpri(&we->we_cv, 3372 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ? 3373 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN); 3374 } 3375 } 3376 3377 mtx_unlock(&ntoskrnl_dispatchlock); 3378 3379 return (prevstate); 3380} 3381 3382void 3383KeClearEvent(kevent) 3384 nt_kevent *kevent; 3385{ 3386 kevent->k_header.dh_sigstate = FALSE; 3387} 3388 3389uint32_t 3390KeReadStateEvent(kevent) 3391 nt_kevent *kevent; 3392{ 3393 return (kevent->k_header.dh_sigstate); 3394} 3395 3396/* 3397 * The object manager in Windows is responsible for managing 3398 * references and access to various types of objects, including 3399 * device_objects, events, threads, timers and so on. However, 3400 * there's a difference in the way objects are handled in user 3401 * mode versus kernel mode. 3402 * 3403 * In user mode (i.e. Win32 applications), all objects are 3404 * managed by the object manager. For example, when you create 3405 * a timer or event object, you actually end up with an 3406 * object_header (for the object manager's bookkeeping 3407 * purposes) and an object body (which contains the actual object 3408 * structure, e.g. ktimer, kevent, etc...). This allows Windows 3409 * to manage resource quotas and to enforce access restrictions 3410 * on basically every kind of system object handled by the kernel. 3411 * 3412 * However, in kernel mode, you only end up using the object 3413 * manager some of the time. For example, in a driver, you create 3414 * a timer object by simply allocating the memory for a ktimer 3415 * structure and initializing it with KeInitializeTimer(). Hence, 3416 * the timer has no object_header and no reference counting or 3417 * security/resource checks are done on it. The assumption in 3418 * this case is that if you're running in kernel mode, you know 3419 * what you're doing, and you're already at an elevated privilege 3420 * anyway. 3421 * 3422 * There are some exceptions to this. The two most important ones 3423 * for our purposes are device_objects and threads. We need to use 3424 * the object manager to do reference counting on device_objects, 3425 * and for threads, you can only get a pointer to a thread's 3426 * dispatch header by using ObReferenceObjectByHandle() on the 3427 * handle returned by PsCreateSystemThread(). 3428 */ 3429 3430static ndis_status 3431ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype, 3432 uint8_t accessmode, void **object, void **handleinfo) 3433{ 3434 nt_objref *nr; 3435 3436 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO); 3437 if (nr == NULL) 3438 return (STATUS_INSUFFICIENT_RESOURCES); 3439 3440 InitializeListHead((&nr->no_dh.dh_waitlisthead)); 3441 nr->no_obj = handle; 3442 nr->no_dh.dh_type = DISP_TYPE_THREAD; 3443 nr->no_dh.dh_sigstate = 0; 3444 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) / 3445 sizeof(uint32_t)); 3446 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link); 3447 *object = nr; 3448 3449 return (STATUS_SUCCESS); 3450} 3451 3452static void 3453ObfDereferenceObject(object) 3454 void *object; 3455{ 3456 nt_objref *nr; 3457 3458 nr = object; 3459 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link); 3460 free(nr, M_DEVBUF); 3461} 3462 3463static uint32_t 3464ZwClose(handle) 3465 ndis_handle handle; 3466{ 3467 return (STATUS_SUCCESS); 3468} 3469 3470static uint32_t 3471WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf) 3472 uint32_t traceclass; 3473 void *traceinfo; 3474 uint32_t infolen; 3475 uint32_t reqlen; 3476 void *buf; 3477{ 3478 return (STATUS_NOT_FOUND); 3479} 3480 3481static uint32_t 3482WmiTraceMessage(uint64_t loghandle, uint32_t messageflags, 3483 void *guid, uint16_t messagenum, ...) 3484{ 3485 return (STATUS_SUCCESS); 3486} 3487 3488static uint32_t 3489IoWMIRegistrationControl(dobj, action) 3490 device_object *dobj; 3491 uint32_t action; 3492{ 3493 return (STATUS_SUCCESS); 3494} 3495 3496/* 3497 * This is here just in case the thread returns without calling 3498 * PsTerminateSystemThread(). 3499 */ 3500static void 3501ntoskrnl_thrfunc(arg) 3502 void *arg; 3503{ 3504 thread_context *thrctx; 3505 uint32_t (*tfunc)(void *); 3506 void *tctx; 3507 uint32_t rval; 3508 3509 thrctx = arg; 3510 tfunc = thrctx->tc_thrfunc; 3511 tctx = thrctx->tc_thrctx; 3512 free(thrctx, M_TEMP); 3513 3514 rval = MSCALL1(tfunc, tctx); 3515 3516 PsTerminateSystemThread(rval); 3517 return; /* notreached */ 3518} 3519 3520static ndis_status 3521PsCreateSystemThread(handle, reqaccess, objattrs, phandle, 3522 clientid, thrfunc, thrctx) 3523 ndis_handle *handle; 3524 uint32_t reqaccess; 3525 void *objattrs; 3526 ndis_handle phandle; 3527 void *clientid; 3528 void *thrfunc; 3529 void *thrctx; 3530{ 3531 int error; 3532 thread_context *tc; 3533 struct proc *p; 3534 3535 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT); 3536 if (tc == NULL) 3537 return (STATUS_INSUFFICIENT_RESOURCES); 3538 3539 tc->tc_thrctx = thrctx; 3540 tc->tc_thrfunc = thrfunc; 3541 3542 error = kproc_create(ntoskrnl_thrfunc, tc, &p, 3543 RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth); 3544 3545 if (error) { 3546 free(tc, M_TEMP); 3547 return (STATUS_INSUFFICIENT_RESOURCES); 3548 } 3549 3550 *handle = p; 3551 ntoskrnl_kth++; 3552 3553 return (STATUS_SUCCESS); 3554} 3555 3556/* 3557 * In Windows, the exit of a thread is an event that you're allowed 3558 * to wait on, assuming you've obtained a reference to the thread using 3559 * ObReferenceObjectByHandle(). Unfortunately, the only way we can 3560 * simulate this behavior is to register each thread we create in a 3561 * reference list, and if someone holds a reference to us, we poke 3562 * them. 3563 */ 3564static ndis_status 3565PsTerminateSystemThread(status) 3566 ndis_status status; 3567{ 3568 struct nt_objref *nr; 3569 3570 mtx_lock(&ntoskrnl_dispatchlock); 3571 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) { 3572 if (nr->no_obj != curthread->td_proc) 3573 continue; 3574 nr->no_dh.dh_sigstate = 1; 3575 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT); 3576 break; 3577 } 3578 mtx_unlock(&ntoskrnl_dispatchlock); 3579 3580 ntoskrnl_kth--; 3581 3582 kproc_exit(0); 3583 return (0); /* notreached */ 3584} 3585 3586static uint32_t 3587DbgPrint(char *fmt, ...) 3588{ 3589 va_list ap; 3590 3591 if (bootverbose) { 3592 va_start(ap, fmt); 3593 vprintf(fmt, ap); 3594 va_end(ap); 3595 } 3596 3597 return (STATUS_SUCCESS); 3598} 3599 3600static void 3601DbgBreakPoint(void) 3602{ 3603 3604 kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint"); 3605} 3606 3607static void 3608KeBugCheckEx(code, param1, param2, param3, param4) 3609 uint32_t code; 3610 u_long param1; 3611 u_long param2; 3612 u_long param3; 3613 u_long param4; 3614{ 3615 panic("KeBugCheckEx: STOP 0x%X", code); 3616} 3617 3618static void 3619ntoskrnl_timercall(arg) 3620 void *arg; 3621{ 3622 ktimer *timer; 3623 struct timeval tv; 3624 kdpc *dpc; 3625 3626 mtx_lock(&ntoskrnl_dispatchlock); 3627 3628 timer = arg; 3629 3630#ifdef NTOSKRNL_DEBUG_TIMERS 3631 ntoskrnl_timer_fires++; 3632#endif 3633 ntoskrnl_remove_timer(timer); 3634 3635 /* 3636 * This should never happen, but complain 3637 * if it does. 3638 */ 3639 3640 if (timer->k_header.dh_inserted == FALSE) { 3641 mtx_unlock(&ntoskrnl_dispatchlock); 3642 printf("NTOS: timer %p fired even though " 3643 "it was canceled\n", timer); 3644 return; 3645 } 3646 3647 /* Mark the timer as no longer being on the timer queue. */ 3648 3649 timer->k_header.dh_inserted = FALSE; 3650 3651 /* Now signal the object and satisfy any waits on it. */ 3652 3653 timer->k_header.dh_sigstate = 1; 3654 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT); 3655 3656 /* 3657 * If this is a periodic timer, re-arm it 3658 * so it will fire again. We do this before 3659 * calling any deferred procedure calls because 3660 * it's possible the DPC might cancel the timer, 3661 * in which case it would be wrong for us to 3662 * re-arm it again afterwards. 3663 */ 3664 3665 if (timer->k_period) { 3666 tv.tv_sec = 0; 3667 tv.tv_usec = timer->k_period * 1000; 3668 timer->k_header.dh_inserted = TRUE; 3669 ntoskrnl_insert_timer(timer, tvtohz(&tv)); 3670#ifdef NTOSKRNL_DEBUG_TIMERS 3671 ntoskrnl_timer_reloads++; 3672#endif 3673 } 3674 3675 dpc = timer->k_dpc; 3676 3677 mtx_unlock(&ntoskrnl_dispatchlock); 3678 3679 /* If there's a DPC associated with the timer, queue it up. */ 3680 3681 if (dpc != NULL) 3682 KeInsertQueueDpc(dpc, NULL, NULL); 3683} 3684 3685#ifdef NTOSKRNL_DEBUG_TIMERS 3686static int 3687sysctl_show_timers(SYSCTL_HANDLER_ARGS) 3688{ 3689 int ret; 3690 3691 ret = 0; 3692 ntoskrnl_show_timers(); 3693 return (sysctl_handle_int(oidp, &ret, 0, req)); 3694} 3695 3696static void 3697ntoskrnl_show_timers() 3698{ 3699 int i = 0; 3700 list_entry *l; 3701 3702 mtx_lock_spin(&ntoskrnl_calllock); 3703 l = ntoskrnl_calllist.nle_flink; 3704 while(l != &ntoskrnl_calllist) { 3705 i++; 3706 l = l->nle_flink; 3707 } 3708 mtx_unlock_spin(&ntoskrnl_calllock); 3709 3710 printf("\n"); 3711 printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS); 3712 printf("timer sets: %qu\n", ntoskrnl_timer_sets); 3713 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads); 3714 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels); 3715 printf("timer fires: %qu\n", ntoskrnl_timer_fires); 3716 printf("\n"); 3717} 3718#endif 3719 3720/* 3721 * Must be called with dispatcher lock held. 3722 */ 3723 3724static void 3725ntoskrnl_insert_timer(timer, ticks) 3726 ktimer *timer; 3727 int ticks; 3728{ 3729 callout_entry *e; 3730 list_entry *l; 3731 struct callout *c; 3732 3733 /* 3734 * Try and allocate a timer. 3735 */ 3736 mtx_lock_spin(&ntoskrnl_calllock); 3737 if (IsListEmpty(&ntoskrnl_calllist)) { 3738 mtx_unlock_spin(&ntoskrnl_calllock); 3739#ifdef NTOSKRNL_DEBUG_TIMERS 3740 ntoskrnl_show_timers(); 3741#endif 3742 panic("out of timers!"); 3743 } 3744 l = RemoveHeadList(&ntoskrnl_calllist); 3745 mtx_unlock_spin(&ntoskrnl_calllock); 3746 3747 e = CONTAINING_RECORD(l, callout_entry, ce_list); 3748 c = &e->ce_callout; 3749 3750 timer->k_callout = c; 3751 3752 callout_init(c, CALLOUT_MPSAFE); 3753 callout_reset(c, ticks, ntoskrnl_timercall, timer); 3754} 3755 3756static void 3757ntoskrnl_remove_timer(timer) 3758 ktimer *timer; 3759{ 3760 callout_entry *e; 3761 3762 e = (callout_entry *)timer->k_callout; 3763 callout_stop(timer->k_callout); 3764 3765 mtx_lock_spin(&ntoskrnl_calllock); 3766 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list)); 3767 mtx_unlock_spin(&ntoskrnl_calllock); 3768} 3769 3770void 3771KeInitializeTimer(timer) 3772 ktimer *timer; 3773{ 3774 if (timer == NULL) 3775 return; 3776 3777 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY); 3778} 3779 3780void 3781KeInitializeTimerEx(timer, type) 3782 ktimer *timer; 3783 uint32_t type; 3784{ 3785 if (timer == NULL) 3786 return; 3787 3788 bzero((char *)timer, sizeof(ktimer)); 3789 InitializeListHead((&timer->k_header.dh_waitlisthead)); 3790 timer->k_header.dh_sigstate = FALSE; 3791 timer->k_header.dh_inserted = FALSE; 3792 if (type == EVENT_TYPE_NOTIFY) 3793 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER; 3794 else 3795 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER; 3796 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t); 3797} 3798 3799/* 3800 * DPC subsystem. A Windows Defered Procedure Call has the following 3801 * properties: 3802 * - It runs at DISPATCH_LEVEL. 3803 * - It can have one of 3 importance values that control when it 3804 * runs relative to other DPCs in the queue. 3805 * - On SMP systems, it can be set to run on a specific processor. 3806 * In order to satisfy the last property, we create a DPC thread for 3807 * each CPU in the system and bind it to that CPU. Each thread 3808 * maintains three queues with different importance levels, which 3809 * will be processed in order from lowest to highest. 3810 * 3811 * In Windows, interrupt handlers run as DPCs. (Not to be confused 3812 * with ISRs, which run in interrupt context and can preempt DPCs.) 3813 * ISRs are given the highest importance so that they'll take 3814 * precedence over timers and other things. 3815 */ 3816 3817static void 3818ntoskrnl_dpc_thread(arg) 3819 void *arg; 3820{ 3821 kdpc_queue *kq; 3822 kdpc *d; 3823 list_entry *l; 3824 uint8_t irql; 3825 3826 kq = arg; 3827 3828 InitializeListHead(&kq->kq_disp); 3829 kq->kq_td = curthread; 3830 kq->kq_exit = 0; 3831 kq->kq_running = FALSE; 3832 KeInitializeSpinLock(&kq->kq_lock); 3833 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE); 3834 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE); 3835 3836 /* 3837 * Elevate our priority. DPCs are used to run interrupt 3838 * handlers, and they should trigger as soon as possible 3839 * once scheduled by an ISR. 3840 */ 3841 3842 thread_lock(curthread); 3843#ifdef NTOSKRNL_MULTIPLE_DPCS 3844 sched_bind(curthread, kq->kq_cpu); 3845#endif 3846 sched_prio(curthread, PRI_MIN_KERN); 3847 thread_unlock(curthread); 3848 3849 while (1) { 3850 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL); 3851 3852 KeAcquireSpinLock(&kq->kq_lock, &irql); 3853 3854 if (kq->kq_exit) { 3855 kq->kq_exit = 0; 3856 KeReleaseSpinLock(&kq->kq_lock, irql); 3857 break; 3858 } 3859 3860 kq->kq_running = TRUE; 3861 3862 while (!IsListEmpty(&kq->kq_disp)) { 3863 l = RemoveHeadList((&kq->kq_disp)); 3864 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry); 3865 InitializeListHead((&d->k_dpclistentry)); 3866 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock); 3867 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx, 3868 d->k_sysarg1, d->k_sysarg2); 3869 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); 3870 } 3871 3872 kq->kq_running = FALSE; 3873 3874 KeReleaseSpinLock(&kq->kq_lock, irql); 3875 3876 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE); 3877 } 3878 3879 kproc_exit(0); 3880 return; /* notreached */ 3881} 3882 3883static void 3884ntoskrnl_destroy_dpc_threads(void) 3885{ 3886 kdpc_queue *kq; 3887 kdpc dpc; 3888 int i; 3889 3890 kq = kq_queues; 3891#ifdef NTOSKRNL_MULTIPLE_DPCS 3892 for (i = 0; i < mp_ncpus; i++) { 3893#else 3894 for (i = 0; i < 1; i++) { 3895#endif 3896 kq += i; 3897 3898 kq->kq_exit = 1; 3899 KeInitializeDpc(&dpc, NULL, NULL); 3900 KeSetTargetProcessorDpc(&dpc, i); 3901 KeInsertQueueDpc(&dpc, NULL, NULL); 3902 while (kq->kq_exit) 3903 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10); 3904 } 3905} 3906 3907static uint8_t 3908ntoskrnl_insert_dpc(head, dpc) 3909 list_entry *head; 3910 kdpc *dpc; 3911{ 3912 list_entry *l; 3913 kdpc *d; 3914 3915 l = head->nle_flink; 3916 while (l != head) { 3917 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry); 3918 if (d == dpc) 3919 return (FALSE); 3920 l = l->nle_flink; 3921 } 3922 3923 if (dpc->k_importance == KDPC_IMPORTANCE_LOW) 3924 InsertTailList((head), (&dpc->k_dpclistentry)); 3925 else 3926 InsertHeadList((head), (&dpc->k_dpclistentry)); 3927 3928 return (TRUE); 3929} 3930 3931void 3932KeInitializeDpc(dpc, dpcfunc, dpcctx) 3933 kdpc *dpc; 3934 void *dpcfunc; 3935 void *dpcctx; 3936{ 3937 3938 if (dpc == NULL) 3939 return; 3940 3941 dpc->k_deferedfunc = dpcfunc; 3942 dpc->k_deferredctx = dpcctx; 3943 dpc->k_num = KDPC_CPU_DEFAULT; 3944 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM; 3945 InitializeListHead((&dpc->k_dpclistentry)); 3946} 3947 3948uint8_t 3949KeInsertQueueDpc(dpc, sysarg1, sysarg2) 3950 kdpc *dpc; 3951 void *sysarg1; 3952 void *sysarg2; 3953{ 3954 kdpc_queue *kq; 3955 uint8_t r; 3956 uint8_t irql; 3957 3958 if (dpc == NULL) 3959 return (FALSE); 3960 3961 kq = kq_queues; 3962 3963#ifdef NTOSKRNL_MULTIPLE_DPCS 3964 KeRaiseIrql(DISPATCH_LEVEL, &irql); 3965 3966 /* 3967 * By default, the DPC is queued to run on the same CPU 3968 * that scheduled it. 3969 */ 3970 3971 if (dpc->k_num == KDPC_CPU_DEFAULT) 3972 kq += curthread->td_oncpu; 3973 else 3974 kq += dpc->k_num; 3975 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); 3976#else 3977 KeAcquireSpinLock(&kq->kq_lock, &irql); 3978#endif 3979 3980 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc); 3981 if (r == TRUE) { 3982 dpc->k_sysarg1 = sysarg1; 3983 dpc->k_sysarg2 = sysarg2; 3984 } 3985 KeReleaseSpinLock(&kq->kq_lock, irql); 3986 3987 if (r == FALSE) 3988 return (r); 3989 3990 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); 3991 3992 return (r); 3993} 3994 3995uint8_t 3996KeRemoveQueueDpc(dpc) 3997 kdpc *dpc; 3998{ 3999 kdpc_queue *kq; 4000 uint8_t irql; 4001 4002 if (dpc == NULL) 4003 return (FALSE); 4004 4005#ifdef NTOSKRNL_MULTIPLE_DPCS 4006 KeRaiseIrql(DISPATCH_LEVEL, &irql); 4007 4008 kq = kq_queues + dpc->k_num; 4009 4010 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock); 4011#else 4012 kq = kq_queues; 4013 KeAcquireSpinLock(&kq->kq_lock, &irql); 4014#endif 4015 4016 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) { 4017 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock); 4018 KeLowerIrql(irql); 4019 return (FALSE); 4020 } 4021 4022 RemoveEntryList((&dpc->k_dpclistentry)); 4023 InitializeListHead((&dpc->k_dpclistentry)); 4024 4025 KeReleaseSpinLock(&kq->kq_lock, irql); 4026 4027 return (TRUE); 4028} 4029 4030void 4031KeSetImportanceDpc(dpc, imp) 4032 kdpc *dpc; 4033 uint32_t imp; 4034{ 4035 if (imp != KDPC_IMPORTANCE_LOW && 4036 imp != KDPC_IMPORTANCE_MEDIUM && 4037 imp != KDPC_IMPORTANCE_HIGH) 4038 return; 4039 4040 dpc->k_importance = (uint8_t)imp; 4041} 4042 4043void 4044KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu) 4045{ 4046 if (cpu > mp_ncpus) 4047 return; 4048 4049 dpc->k_num = cpu; 4050} 4051 4052void 4053KeFlushQueuedDpcs(void) 4054{ 4055 kdpc_queue *kq; 4056 int i; 4057 4058 /* 4059 * Poke each DPC queue and wait 4060 * for them to drain. 4061 */ 4062 4063#ifdef NTOSKRNL_MULTIPLE_DPCS 4064 for (i = 0; i < mp_ncpus; i++) { 4065#else 4066 for (i = 0; i < 1; i++) { 4067#endif 4068 kq = kq_queues + i; 4069 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE); 4070 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL); 4071 } 4072} 4073 4074uint32_t 4075KeGetCurrentProcessorNumber(void) 4076{ 4077 return ((uint32_t)curthread->td_oncpu); 4078} 4079 4080uint8_t 4081KeSetTimerEx(timer, duetime, period, dpc) 4082 ktimer *timer; 4083 int64_t duetime; 4084 uint32_t period; 4085 kdpc *dpc; 4086{ 4087 struct timeval tv; 4088 uint64_t curtime; 4089 uint8_t pending; 4090 4091 if (timer == NULL) 4092 return (FALSE); 4093 4094 mtx_lock(&ntoskrnl_dispatchlock); 4095 4096 if (timer->k_header.dh_inserted == TRUE) { 4097 ntoskrnl_remove_timer(timer); 4098#ifdef NTOSKRNL_DEBUG_TIMERS 4099 ntoskrnl_timer_cancels++; 4100#endif 4101 timer->k_header.dh_inserted = FALSE; 4102 pending = TRUE; 4103 } else 4104 pending = FALSE; 4105 4106 timer->k_duetime = duetime; 4107 timer->k_period = period; 4108 timer->k_header.dh_sigstate = FALSE; 4109 timer->k_dpc = dpc; 4110 4111 if (duetime < 0) { 4112 tv.tv_sec = - (duetime) / 10000000; 4113 tv.tv_usec = (- (duetime) / 10) - 4114 (tv.tv_sec * 1000000); 4115 } else { 4116 ntoskrnl_time(&curtime); 4117 if (duetime < curtime) 4118 tv.tv_sec = tv.tv_usec = 0; 4119 else { 4120 tv.tv_sec = ((duetime) - curtime) / 10000000; 4121 tv.tv_usec = ((duetime) - curtime) / 10 - 4122 (tv.tv_sec * 1000000); 4123 } 4124 } 4125 4126 timer->k_header.dh_inserted = TRUE; 4127 ntoskrnl_insert_timer(timer, tvtohz(&tv)); 4128#ifdef NTOSKRNL_DEBUG_TIMERS 4129 ntoskrnl_timer_sets++; 4130#endif 4131 4132 mtx_unlock(&ntoskrnl_dispatchlock); 4133 4134 return (pending); 4135} 4136 4137uint8_t 4138KeSetTimer(timer, duetime, dpc) 4139 ktimer *timer; 4140 int64_t duetime; 4141 kdpc *dpc; 4142{ 4143 return (KeSetTimerEx(timer, duetime, 0, dpc)); 4144} 4145 4146/* 4147 * The Windows DDK documentation seems to say that cancelling 4148 * a timer that has a DPC will result in the DPC also being 4149 * cancelled, but this isn't really the case. 4150 */ 4151 4152uint8_t 4153KeCancelTimer(timer) 4154 ktimer *timer; 4155{ 4156 uint8_t pending; 4157 4158 if (timer == NULL) 4159 return (FALSE); 4160 4161 mtx_lock(&ntoskrnl_dispatchlock); 4162 4163 pending = timer->k_header.dh_inserted; 4164 4165 if (timer->k_header.dh_inserted == TRUE) { 4166 timer->k_header.dh_inserted = FALSE; 4167 ntoskrnl_remove_timer(timer); 4168#ifdef NTOSKRNL_DEBUG_TIMERS 4169 ntoskrnl_timer_cancels++; 4170#endif 4171 } 4172 4173 mtx_unlock(&ntoskrnl_dispatchlock); 4174 4175 return (pending); 4176} 4177 4178uint8_t 4179KeReadStateTimer(timer) 4180 ktimer *timer; 4181{ 4182 return (timer->k_header.dh_sigstate); 4183} 4184 4185static int32_t 4186KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval) 4187{ 4188 ktimer timer; 4189 4190 if (wait_mode != 0) 4191 panic("invalid wait_mode %d", wait_mode); 4192 4193 KeInitializeTimer(&timer); 4194 KeSetTimer(&timer, *interval, NULL); 4195 KeWaitForSingleObject(&timer, 0, 0, alertable, NULL); 4196 4197 return STATUS_SUCCESS; 4198} 4199 4200static uint64_t 4201KeQueryInterruptTime(void) 4202{ 4203 int ticks; 4204 struct timeval tv; 4205 4206 getmicrouptime(&tv); 4207 4208 ticks = tvtohz(&tv); 4209 4210 return ticks * ((10000000 + hz - 1) / hz); 4211} 4212 4213static struct thread * 4214KeGetCurrentThread(void) 4215{ 4216 4217 return curthread; 4218} 4219 4220static int32_t 4221KeSetPriorityThread(td, pri) 4222 struct thread *td; 4223 int32_t pri; 4224{ 4225 int32_t old; 4226 4227 if (td == NULL) 4228 return LOW_REALTIME_PRIORITY; 4229 4230 if (td->td_priority <= PRI_MIN_KERN) 4231 old = HIGH_PRIORITY; 4232 else if (td->td_priority >= PRI_MAX_KERN) 4233 old = LOW_PRIORITY; 4234 else 4235 old = LOW_REALTIME_PRIORITY; 4236 4237 thread_lock(td); 4238 if (pri == HIGH_PRIORITY) 4239 sched_prio(td, PRI_MIN_KERN); 4240 if (pri == LOW_REALTIME_PRIORITY) 4241 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2); 4242 if (pri == LOW_PRIORITY) 4243 sched_prio(td, PRI_MAX_KERN); 4244 thread_unlock(td); 4245 4246 return old; 4247} 4248 4249static void 4250dummy() 4251{ 4252 printf("ntoskrnl dummy called...\n"); 4253} 4254 4255 4256image_patch_table ntoskrnl_functbl[] = { 4257 IMPORT_SFUNC(RtlZeroMemory, 2), 4258 IMPORT_SFUNC(RtlSecureZeroMemory, 2), 4259 IMPORT_SFUNC(RtlFillMemory, 3), 4260 IMPORT_SFUNC(RtlMoveMemory, 3), 4261 IMPORT_SFUNC(RtlCharToInteger, 3), 4262 IMPORT_SFUNC(RtlCopyMemory, 3), 4263 IMPORT_SFUNC(RtlCopyString, 2), 4264 IMPORT_SFUNC(RtlCompareMemory, 3), 4265 IMPORT_SFUNC(RtlEqualUnicodeString, 3), 4266 IMPORT_SFUNC(RtlCopyUnicodeString, 2), 4267 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3), 4268 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3), 4269 IMPORT_SFUNC(RtlInitAnsiString, 2), 4270 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2), 4271 IMPORT_SFUNC(RtlInitUnicodeString, 2), 4272 IMPORT_SFUNC(RtlFreeAnsiString, 1), 4273 IMPORT_SFUNC(RtlFreeUnicodeString, 1), 4274 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3), 4275 IMPORT_CFUNC(sprintf, 0), 4276 IMPORT_CFUNC(vsprintf, 0), 4277 IMPORT_CFUNC_MAP(_snprintf, snprintf, 0), 4278 IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0), 4279 IMPORT_CFUNC(DbgPrint, 0), 4280 IMPORT_SFUNC(DbgBreakPoint, 0), 4281 IMPORT_SFUNC(KeBugCheckEx, 5), 4282 IMPORT_CFUNC(strncmp, 0), 4283 IMPORT_CFUNC(strcmp, 0), 4284 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0), 4285 IMPORT_CFUNC(strncpy, 0), 4286 IMPORT_CFUNC(strcpy, 0), 4287 IMPORT_CFUNC(strlen, 0), 4288 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0), 4289 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0), 4290 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0), 4291 IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0), 4292 IMPORT_CFUNC_MAP(strchr, index, 0), 4293 IMPORT_CFUNC_MAP(strrchr, rindex, 0), 4294 IMPORT_CFUNC(memcpy, 0), 4295 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0), 4296 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0), 4297 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0), 4298 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4), 4299 IMPORT_SFUNC(IoGetDriverObjectExtension, 2), 4300 IMPORT_FFUNC(IofCallDriver, 2), 4301 IMPORT_FFUNC(IofCompleteRequest, 2), 4302 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1), 4303 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1), 4304 IMPORT_SFUNC(IoCancelIrp, 1), 4305 IMPORT_SFUNC(IoConnectInterrupt, 11), 4306 IMPORT_SFUNC(IoDisconnectInterrupt, 1), 4307 IMPORT_SFUNC(IoCreateDevice, 7), 4308 IMPORT_SFUNC(IoDeleteDevice, 1), 4309 IMPORT_SFUNC(IoGetAttachedDevice, 1), 4310 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2), 4311 IMPORT_SFUNC(IoDetachDevice, 1), 4312 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7), 4313 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6), 4314 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9), 4315 IMPORT_SFUNC(IoAllocateIrp, 2), 4316 IMPORT_SFUNC(IoReuseIrp, 2), 4317 IMPORT_SFUNC(IoMakeAssociatedIrp, 2), 4318 IMPORT_SFUNC(IoFreeIrp, 1), 4319 IMPORT_SFUNC(IoInitializeIrp, 3), 4320 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1), 4321 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2), 4322 IMPORT_SFUNC(KeSynchronizeExecution, 3), 4323 IMPORT_SFUNC(KeWaitForSingleObject, 5), 4324 IMPORT_SFUNC(KeWaitForMultipleObjects, 8), 4325 IMPORT_SFUNC(_allmul, 4), 4326 IMPORT_SFUNC(_alldiv, 4), 4327 IMPORT_SFUNC(_allrem, 4), 4328 IMPORT_RFUNC(_allshr, 0), 4329 IMPORT_RFUNC(_allshl, 0), 4330 IMPORT_SFUNC(_aullmul, 4), 4331 IMPORT_SFUNC(_aulldiv, 4), 4332 IMPORT_SFUNC(_aullrem, 4), 4333 IMPORT_RFUNC(_aullshr, 0), 4334 IMPORT_RFUNC(_aullshl, 0), 4335 IMPORT_CFUNC(atoi, 0), 4336 IMPORT_CFUNC(atol, 0), 4337 IMPORT_CFUNC(rand, 0), 4338 IMPORT_CFUNC(srand, 0), 4339 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2), 4340 IMPORT_SFUNC(READ_REGISTER_USHORT, 1), 4341 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2), 4342 IMPORT_SFUNC(READ_REGISTER_ULONG, 1), 4343 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1), 4344 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2), 4345 IMPORT_SFUNC(ExInitializePagedLookasideList, 7), 4346 IMPORT_SFUNC(ExDeletePagedLookasideList, 1), 4347 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7), 4348 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1), 4349 IMPORT_FFUNC(InterlockedPopEntrySList, 1), 4350 IMPORT_FFUNC(InitializeSListHead, 1), 4351 IMPORT_FFUNC(InterlockedPushEntrySList, 2), 4352 IMPORT_SFUNC(ExQueryDepthSList, 1), 4353 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList, 4354 InterlockedPopEntrySList, 1), 4355 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList, 4356 InterlockedPushEntrySList, 2), 4357 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2), 4358 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3), 4359 IMPORT_SFUNC(ExAllocatePoolWithTag, 3), 4360 IMPORT_SFUNC(ExFreePoolWithTag, 2), 4361 IMPORT_SFUNC(ExFreePool, 1), 4362#ifdef __i386__ 4363 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1), 4364 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1), 4365 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1), 4366#else 4367 /* 4368 * For AMD64, we can get away with just mapping 4369 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock() 4370 * because the calling conventions end up being the same. 4371 * On i386, we have to be careful because KfAcquireSpinLock() 4372 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't. 4373 */ 4374 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1), 4375 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1), 4376 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1), 4377#endif 4378 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1), 4379 IMPORT_FFUNC(InterlockedIncrement, 1), 4380 IMPORT_FFUNC(InterlockedDecrement, 1), 4381 IMPORT_FFUNC(InterlockedExchange, 2), 4382 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2), 4383 IMPORT_SFUNC(IoAllocateMdl, 5), 4384 IMPORT_SFUNC(IoFreeMdl, 1), 4385 IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1), 4386 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3), 4387 IMPORT_SFUNC(MmFreeContiguousMemory, 1), 4388 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3), 4389 IMPORT_SFUNC(MmSizeOfMdl, 1), 4390 IMPORT_SFUNC(MmMapLockedPages, 2), 4391 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6), 4392 IMPORT_SFUNC(MmUnmapLockedPages, 2), 4393 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1), 4394 IMPORT_SFUNC(MmGetPhysicalAddress, 1), 4395 IMPORT_SFUNC(MmGetSystemRoutineAddress, 1), 4396 IMPORT_SFUNC(MmIsAddressValid, 1), 4397 IMPORT_SFUNC(MmMapIoSpace, 3 + 1), 4398 IMPORT_SFUNC(MmUnmapIoSpace, 2), 4399 IMPORT_SFUNC(KeInitializeSpinLock, 1), 4400 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2), 4401 IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4), 4402 IMPORT_SFUNC(IoGetDeviceObjectPointer, 4), 4403 IMPORT_SFUNC(IoGetDeviceProperty, 5), 4404 IMPORT_SFUNC(IoAllocateWorkItem, 1), 4405 IMPORT_SFUNC(IoFreeWorkItem, 1), 4406 IMPORT_SFUNC(IoQueueWorkItem, 4), 4407 IMPORT_SFUNC(ExQueueWorkItem, 2), 4408 IMPORT_SFUNC(ntoskrnl_workitem, 2), 4409 IMPORT_SFUNC(KeInitializeMutex, 2), 4410 IMPORT_SFUNC(KeReleaseMutex, 2), 4411 IMPORT_SFUNC(KeReadStateMutex, 1), 4412 IMPORT_SFUNC(KeInitializeEvent, 3), 4413 IMPORT_SFUNC(KeSetEvent, 3), 4414 IMPORT_SFUNC(KeResetEvent, 1), 4415 IMPORT_SFUNC(KeClearEvent, 1), 4416 IMPORT_SFUNC(KeReadStateEvent, 1), 4417 IMPORT_SFUNC(KeInitializeTimer, 1), 4418 IMPORT_SFUNC(KeInitializeTimerEx, 2), 4419 IMPORT_SFUNC(KeSetTimer, 3), 4420 IMPORT_SFUNC(KeSetTimerEx, 4), 4421 IMPORT_SFUNC(KeCancelTimer, 1), 4422 IMPORT_SFUNC(KeReadStateTimer, 1), 4423 IMPORT_SFUNC(KeInitializeDpc, 3), 4424 IMPORT_SFUNC(KeInsertQueueDpc, 3), 4425 IMPORT_SFUNC(KeRemoveQueueDpc, 1), 4426 IMPORT_SFUNC(KeSetImportanceDpc, 2), 4427 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2), 4428 IMPORT_SFUNC(KeFlushQueuedDpcs, 0), 4429 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1), 4430 IMPORT_SFUNC(ObReferenceObjectByHandle, 6), 4431 IMPORT_FFUNC(ObfDereferenceObject, 1), 4432 IMPORT_SFUNC(ZwClose, 1), 4433 IMPORT_SFUNC(PsCreateSystemThread, 7), 4434 IMPORT_SFUNC(PsTerminateSystemThread, 1), 4435 IMPORT_SFUNC(IoWMIRegistrationControl, 2), 4436 IMPORT_SFUNC(WmiQueryTraceInformation, 5), 4437 IMPORT_CFUNC(WmiTraceMessage, 0), 4438 IMPORT_SFUNC(KeQuerySystemTime, 1), 4439 IMPORT_CFUNC(KeTickCount, 0), 4440 IMPORT_SFUNC(KeDelayExecutionThread, 3), 4441 IMPORT_SFUNC(KeQueryInterruptTime, 0), 4442 IMPORT_SFUNC(KeGetCurrentThread, 0), 4443 IMPORT_SFUNC(KeSetPriorityThread, 2), 4444 4445 /* 4446 * This last entry is a catch-all for any function we haven't 4447 * implemented yet. The PE import list patching routine will 4448 * use it for any function that doesn't have an explicit match 4449 * in this table. 4450 */ 4451 4452 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL }, 4453 4454 /* End of list. */ 4455 4456 { NULL, NULL, NULL } 4457};
|