support.s revision 1690
1/*- 2 * Copyright (c) 1993 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $Id: support.s,v 1.10 1994/06/06 14:23:49 davidg Exp $ 34 */ 35 36#include "assym.s" /* system definitions */ 37#include "errno.h" /* error return codes */ 38#include "machine/asmacros.h" /* miscellaneous asm macros */ 39#include "machine/cputypes.h" /* types of CPUs */ 40 41#define KDSEL 0x10 /* kernel data selector */ 42#define IDXSHIFT 10 43 44/* 45 * Support routines for GCC, general C-callable functions 46 */ 47ENTRY(__udivsi3) 48 movl 4(%esp),%eax 49 xorl %edx,%edx 50 divl 8(%esp) 51 ret 52 53ENTRY(__divsi3) 54 movl 4(%esp),%eax 55 cltd 56 idivl 8(%esp) 57 ret 58 59 /* 60 * I/O bus instructions via C 61 */ 62ENTRY(inb) /* val = inb(port) */ 63 movl 4(%esp),%edx 64 subl %eax,%eax 65 inb %dx,%al 66 NOP 67 ret 68 69ENTRY(inw) /* val = inw(port) */ 70 movl 4(%esp),%edx 71 subl %eax,%eax 72 inw %dx,%ax 73 NOP 74 ret 75 76ENTRY(insb) /* insb(port, addr, cnt) */ 77 pushl %edi 78 movw 8(%esp),%dx 79 movl 12(%esp),%edi 80 movl 16(%esp),%ecx 81 cld 82 rep 83 insb 84 NOP 85 movl %edi,%eax 86 popl %edi 87 ret 88 89ENTRY(insw) /* insw(port, addr, cnt) */ 90 pushl %edi 91 movw 8(%esp),%dx 92 movl 12(%esp),%edi 93 movl 16(%esp),%ecx 94 cld 95 rep 96 insw 97 NOP 98 movl %edi,%eax 99 popl %edi 100 ret 101 102ENTRY(insl) /* insl(port, addr, cnt) */ 103 pushl %edi 104 movw 8(%esp),%dx 105 movl 12(%esp),%edi 106 movl 16(%esp),%ecx 107 cld 108 rep 109 insl 110 NOP 111 movl %edi,%eax 112 popl %edi 113 ret 114 115ENTRY(rtcin) /* rtcin(val) */ 116 movl 4(%esp),%eax 117 outb %al,$0x70 118 NOP 119 xorl %eax,%eax 120 inb $0x71,%al 121 NOP 122 ret 123 124ENTRY(outb) /* outb(port, val) */ 125 movl 4(%esp),%edx 126 movl 8(%esp),%eax 127 outb %al,%dx 128 NOP 129 ret 130 131ENTRY(outw) /* outw(port, val) */ 132 movl 4(%esp),%edx 133 movl 8(%esp),%eax 134 outw %ax,%dx 135 NOP 136 ret 137 138ENTRY(outsb) /* outsb(port, addr, cnt) */ 139 pushl %esi 140 movw 8(%esp),%dx 141 movl 12(%esp),%esi 142 movl 16(%esp),%ecx 143 cld 144 rep 145 outsb 146 NOP 147 movl %esi,%eax 148 popl %esi 149 ret 150 151ENTRY(outsw) /* outsw(port, addr, cnt) */ 152 pushl %esi 153 movw 8(%esp),%dx 154 movl 12(%esp),%esi 155 movl 16(%esp),%ecx 156 cld 157 rep 158 outsw 159 NOP 160 movl %esi,%eax 161 popl %esi 162 ret 163 164ENTRY(outsl) /* outsl(port, addr, cnt) */ 165 pushl %esi 166 movw 8(%esp),%dx 167 movl 12(%esp),%esi 168 movl 16(%esp),%ecx 169 cld 170 rep 171 outsl 172 NOP 173 movl %esi,%eax 174 popl %esi 175 ret 176 177/* 178 * bcopy family 179 */ 180/* 181 * void bzero(void *base, u_int cnt) 182 * Special code for I486 because stosl uses lots 183 * of clocks. Makes little or no difference on DX2 type 184 * machines, but about stosl is about 1/2 as fast as 185 * memory moves on standard DX !!!!! 186 */ 187 188ALTENTRY(blkclr) 189ENTRY(bzero) 190#if defined(I486_CPU) && (defined(I386_CPU) || defined(I586_CPU)) 191 cmpl $CPUCLASS_486,_cpu_class 192 jz 1f 193#endif 194#if defined(I386_CPU) || defined(I586_CPU) 195 pushl %edi 196 movl 8(%esp),%edi 197 movl 12(%esp),%ecx 198 xorl %eax,%eax 199 shrl $2,%ecx 200 cld 201 rep 202 stosl 203 movl 12(%esp),%ecx 204 andl $3,%ecx 205 rep 206 stosb 207 popl %edi 208 ret 209 .align 4 210#endif 211#if defined(I486_CPU) 2121: 213 movl 4(%esp),%edx 214 movl 8(%esp),%ecx 215 xorl %eax,%eax 216/ 217/ do 64 byte chunks first 218/ 2192: 220 cmpl $64,%ecx 221 jb 3f 222 movl %eax,(%edx) 223 movl %eax,4(%edx) 224 movl %eax,8(%edx) 225 movl %eax,12(%edx) 226 movl %eax,16(%edx) 227 movl %eax,20(%edx) 228 movl %eax,24(%edx) 229 movl %eax,28(%edx) 230 movl %eax,32(%edx) 231 movl %eax,36(%edx) 232 movl %eax,40(%edx) 233 movl %eax,44(%edx) 234 movl %eax,48(%edx) 235 movl %eax,52(%edx) 236 movl %eax,56(%edx) 237 movl %eax,60(%edx) 238 addl $64,%edx 239 subl $64,%ecx 240 jnz 2b 241 ret 242 .align 4 243/ 244/ do 16 byte chunks 245/ 2463: 247 cmpl $16,%ecx 248 jb 4f 249 movl %eax,(%edx) 250 movl %eax,4(%edx) 251 movl %eax,8(%edx) 252 movl %eax,12(%edx) 253 addl $16,%edx 254 subl $16,%ecx 255 jnz 3b 256 ret 257 .align 4 258/ 259/ do 4 byte chunks 260/ 2614: cmpl $4,%ecx 262 jb 5f 263 movl %eax,(%edx) 264 addl $4,%edx 265 subl $4,%ecx 266 jnz 4b 267 ret 268/ 269/ do 1 byte chunks -- this appears to be faster than a loop 270/ 271 .align 4 272jtab: .long do0 273 .long do1 274 .long do2 275 .long do3 276 277 .align 4 2785: jmp jtab(,%ecx,4) 279 280 .align 2 281do3: movb $0,(%edx) 282 incl %edx 283 movw $0,(%edx) 284 ret 285 .align 2 286do2: movw $0,(%edx) 287 ret 288 .align 2 289do1: movb $0,(%edx) 290do0: ret 291 292#endif 293 294/* fillw(pat, base, cnt) */ 295ENTRY(fillw) 296 pushl %edi 297 movl 8(%esp),%eax 298 movl 12(%esp),%edi 299 movl 16(%esp),%ecx 300 cld 301 rep 302 stosw 303 popl %edi 304 ret 305 306/* filli(pat, base, cnt) */ 307ENTRY(filli) 308 pushl %edi 309 movl 8(%esp),%eax 310 movl 12(%esp),%edi 311 movl 16(%esp),%ecx 312 cld 313 rep 314 stosl 315 popl %edi 316 ret 317 318ENTRY(bcopyb) 319bcopyb: 320 pushl %esi 321 pushl %edi 322 movl 12(%esp),%esi 323 movl 16(%esp),%edi 324 movl 20(%esp),%ecx 325 cmpl %esi,%edi /* potentially overlapping? */ 326 jnb 1f 327 cld /* nope, copy forwards */ 328 rep 329 movsb 330 popl %edi 331 popl %esi 332 ret 333 334 ALIGN_TEXT 3351: 336 addl %ecx,%edi /* copy backwards. */ 337 addl %ecx,%esi 338 std 339 decl %edi 340 decl %esi 341 rep 342 movsb 343 popl %edi 344 popl %esi 345 cld 346 ret 347 348ENTRY(bcopyw) 349bcopyw: 350 pushl %esi 351 pushl %edi 352 movl 12(%esp),%esi 353 movl 16(%esp),%edi 354 movl 20(%esp),%ecx 355 cmpl %esi,%edi /* potentially overlapping? */ 356 jnb 1f 357 cld /* nope, copy forwards */ 358 shrl $1,%ecx /* copy by 16-bit words */ 359 rep 360 movsw 361 adc %ecx,%ecx /* any bytes left? */ 362 rep 363 movsb 364 popl %edi 365 popl %esi 366 ret 367 368 ALIGN_TEXT 3691: 370 addl %ecx,%edi /* copy backwards */ 371 addl %ecx,%esi 372 std 373 andl $1,%ecx /* any fractional bytes? */ 374 decl %edi 375 decl %esi 376 rep 377 movsb 378 movl 20(%esp),%ecx /* copy remainder by 16-bit words */ 379 shrl $1,%ecx 380 decl %esi 381 decl %edi 382 rep 383 movsw 384 popl %edi 385 popl %esi 386 cld 387 ret 388 389ENTRY(bcopyx) 390 movl 16(%esp),%eax 391 cmpl $2,%eax 392 je bcopyw /* not _bcopyw, to avoid multiple mcounts */ 393 cmpl $4,%eax 394 je bcopy /* XXX the shared ret's break mexitcount */ 395 jmp bcopyb 396 397/* 398 * (ov)bcopy(src, dst, cnt) 399 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 400 */ 401ALTENTRY(ovbcopy) 402ENTRY(bcopy) 403bcopy: 404 pushl %esi 405 pushl %edi 406 movl 12(%esp),%esi 407 movl 16(%esp),%edi 408 movl 20(%esp),%ecx 409 cmpl %esi,%edi /* potentially overlapping? */ 410 jnb 1f 411 cld /* nope, copy forwards */ 412 shrl $2,%ecx /* copy by 32-bit words */ 413 rep 414 movsl 415 movl 20(%esp),%ecx 416 andl $3,%ecx /* any bytes left? */ 417 rep 418 movsb 419 popl %edi 420 popl %esi 421 ret 422 423 ALIGN_TEXT 4241: 425 addl %ecx,%edi /* copy backwards */ 426 addl %ecx,%esi 427 std 428 andl $3,%ecx /* any fractional bytes? */ 429 decl %edi 430 decl %esi 431 rep 432 movsb 433 movl 20(%esp),%ecx /* copy remainder by 32-bit words */ 434 shrl $2,%ecx 435 subl $3,%esi 436 subl $3,%edi 437 rep 438 movsl 439 popl %edi 440 popl %esi 441 cld 442 ret 443 444ALTENTRY(ntohl) 445ENTRY(htonl) 446 movl 4(%esp),%eax 447#ifdef i486 448/* XXX */ 449/* Since Gas 1.38 does not grok bswap this has been coded as the 450 * equivalent bytes. This can be changed back to bswap when we 451 * upgrade to a newer version of Gas 452 */ 453 /* bswap %eax */ 454 .byte 0x0f 455 .byte 0xc8 456#else 457 xchgb %al,%ah 458 roll $16,%eax 459 xchgb %al,%ah 460#endif 461 ret 462 463ALTENTRY(ntohs) 464ENTRY(htons) 465 movzwl 4(%esp),%eax 466 xchgb %al,%ah 467 ret 468 469/*****************************************************************************/ 470/* copyout and fubyte family */ 471/*****************************************************************************/ 472/* 473 * Access user memory from inside the kernel. These routines and possibly 474 * the math- and DOS emulators should be the only places that do this. 475 * 476 * We have to access the memory with user's permissions, so use a segment 477 * selector with RPL 3. For writes to user space we have to additionally 478 * check the PTE for write permission, because the 386 does not check 479 * write permissions when we are executing with EPL 0. The 486 does check 480 * this if the WP bit is set in CR0, so we can use a simpler version here. 481 * 482 * These routines set curpcb->onfault for the time they execute. When a 483 * protection violation occurs inside the functions, the trap handler 484 * returns to *curpcb->onfault instead of the function. 485 */ 486 487 488ENTRY(copyout) /* copyout(from_kernel, to_user, len) */ 489 movl _curpcb,%eax 490 movl $copyout_fault,PCB_ONFAULT(%eax) 491 pushl %esi 492 pushl %edi 493 pushl %ebx 494 movl 16(%esp),%esi 495 movl 20(%esp),%edi 496 movl 24(%esp),%ebx 497 orl %ebx,%ebx /* anything to do? */ 498 jz done_copyout 499 500 /* 501 * Check explicitly for non-user addresses. If 486 write protection 502 * is being used, this check is essential because we are in kernel 503 * mode so the h/w does not provide any protection against writing 504 * kernel addresses. 505 * 506 * Otherwise, it saves having to load and restore %es to get the 507 * usual segment-based protection (the destination segment for movs 508 * is always %es). The other explicit checks for user-writablility 509 * are not quite sufficient. They fail for the user area because 510 * we mapped the user area read/write to avoid having an #ifdef in 511 * vm_machdep.c. They fail for user PTEs and/or PTDs! (107 512 * addresses including 0xff800000 and 0xfc000000). I'm not sure if 513 * this can be fixed. Marking the PTEs supervisor mode and the 514 * PDE's user mode would almost work, but there may be a problem 515 * with the self-referential PDE. 516 */ 517 movl %edi,%eax 518 addl %ebx,%eax 519 jc copyout_fault 520/* 521 * XXX STOP USING VM_MAXUSER_ADDRESS. 522 * It is an end address, not a max, so every time it is used correctly it 523 * looks like there is an off by one error, and of course it caused an off 524 * by one error in several places. 525 */ 526 cmpl $VM_MAXUSER_ADDRESS,%eax 527 ja copyout_fault 528 529#if defined(I386_CPU) 530 531#if defined(I486_CPU) || defined(I586_CPU) 532 cmpl $CPUCLASS_386,_cpu_class 533 jne 3f 534#endif 535/* 536 * We have to check each PTE for user write permission. 537 * The checking may cause a page fault, so it is important to set 538 * up everything for return via copyout_fault before here. 539 */ 540 /* compute number of pages */ 541 movl %edi,%ecx 542 andl $NBPG-1,%ecx 543 addl %ebx,%ecx 544 decl %ecx 545 shrl $IDXSHIFT+2,%ecx 546 incl %ecx 547 548 /* compute PTE offset for start address */ 549 movl %edi,%edx 550 shrl $IDXSHIFT,%edx 551 andb $0xfc,%dl 552 5531: /* check PTE for each page */ 554 movb _PTmap(%edx),%al 555 andb $0x07,%al /* Pages must be VALID + USERACC + WRITABLE */ 556 cmpb $0x07,%al 557 je 2f 558 559 /* simulate a trap */ 560 pushl %edx 561 pushl %ecx 562 shll $IDXSHIFT,%edx 563 pushl %edx 564 call _trapwrite /* trapwrite(addr) */ 565 popl %edx 566 popl %ecx 567 popl %edx 568 569 orl %eax,%eax /* if not ok, return EFAULT */ 570 jnz copyout_fault 571 5722: 573 addl $4,%edx 574 decl %ecx 575 jnz 1b /* check next page */ 576#endif /* I386_CPU */ 577 578 /* bcopy(%esi, %edi, %ebx) */ 5793: 580 cld 581 movl %ebx,%ecx 582 shrl $2,%ecx 583 rep 584 movsl 585 movb %bl,%cl 586 andb $3,%cl 587 rep 588 movsb 589 590done_copyout: 591 popl %ebx 592 popl %edi 593 popl %esi 594 xorl %eax,%eax 595 movl _curpcb,%edx 596 movl %eax,PCB_ONFAULT(%edx) 597 ret 598 599 ALIGN_TEXT 600copyout_fault: 601 popl %ebx 602 popl %edi 603 popl %esi 604 movl _curpcb,%edx 605 movl $0,PCB_ONFAULT(%edx) 606 movl $EFAULT,%eax 607 ret 608 609/* copyin(from_user, to_kernel, len) */ 610ENTRY(copyin) 611 movl _curpcb,%eax 612 movl $copyin_fault,PCB_ONFAULT(%eax) 613 pushl %esi 614 pushl %edi 615 movl 12(%esp),%esi /* caddr_t from */ 616 movl 16(%esp),%edi /* caddr_t to */ 617 movl 20(%esp),%ecx /* size_t len */ 618 619 /* 620 * make sure address is valid 621 */ 622 movl %esi,%edx 623 addl %ecx,%edx 624 jc copyin_fault 625 cmpl $VM_MAXUSER_ADDRESS,%edx 626 ja copyin_fault 627 628 movb %cl,%al 629 shrl $2,%ecx /* copy longword-wise */ 630 cld 631 rep 632 movsl 633 movb %al,%cl 634 andb $3,%cl /* copy remaining bytes */ 635 rep 636 movsb 637 638 popl %edi 639 popl %esi 640 xorl %eax,%eax 641 movl _curpcb,%edx 642 movl %eax,PCB_ONFAULT(%edx) 643 ret 644 645 ALIGN_TEXT 646copyin_fault: 647 popl %edi 648 popl %esi 649 movl _curpcb,%edx 650 movl $0,PCB_ONFAULT(%edx) 651 movl $EFAULT,%eax 652 ret 653 654/* 655 * fu{byte,sword,word} : fetch a byte (sword, word) from user memory 656 */ 657ALTENTRY(fuiword) 658ENTRY(fuword) 659 movl _curpcb,%ecx 660 movl $fusufault,PCB_ONFAULT(%ecx) 661 movl 4(%esp),%edx /* from */ 662 663 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */ 664 ja fusufault 665 666 movl (%edx),%eax 667 movl $0,PCB_ONFAULT(%ecx) 668 ret 669 670/* 671 * These two routines are called from the profiling code, potentially 672 * at interrupt time. If they fail, that's okay, good things will 673 * happen later. Fail all the time for now - until the trap code is 674 * able to deal with this. 675 */ 676ALTENTRY(suswintr) 677ENTRY(fuswintr) 678 movl $-1,%eax 679 ret 680 681ENTRY(fusword) 682 movl _curpcb,%ecx 683 movl $fusufault,PCB_ONFAULT(%ecx) 684 movl 4(%esp),%edx 685 686 cmpl $VM_MAXUSER_ADDRESS-2,%edx 687 ja fusufault 688 689 movzwl (%edx),%eax 690 movl $0,PCB_ONFAULT(%ecx) 691 ret 692 693ALTENTRY(fuibyte) 694ENTRY(fubyte) 695 movl _curpcb,%ecx 696 movl $fusufault,PCB_ONFAULT(%ecx) 697 movl 4(%esp),%edx 698 699 cmpl $VM_MAXUSER_ADDRESS-1,%eax 700 ja fusufault 701 702 movzbl (%edx),%eax 703 movl $0,PCB_ONFAULT(%ecx) 704 ret 705 706 ALIGN_TEXT 707fusufault: 708 movl _curpcb,%ecx 709 xorl %eax,%eax 710 movl %eax,PCB_ONFAULT(%ecx) 711 decl %eax 712 ret 713 714/* 715 * su{byte,sword,word}: write a byte (word, longword) to user memory 716 */ 717ALTENTRY(suiword) 718ENTRY(suword) 719 movl _curpcb,%ecx 720 movl $fusufault,PCB_ONFAULT(%ecx) 721 movl 4(%esp),%edx 722 723#if defined(I386_CPU) 724 725#if defined(I486_CPU) || defined(I586_CPU) 726 cmpl $CPUCLASS_386,_cpu_class 727 jne 2f /* we only have to set the right segment selector */ 728#endif /* I486_CPU || I586_CPU */ 729 730 /* XXX - page boundary crossing is still not handled */ 731 movl %edx,%eax 732 shrl $IDXSHIFT,%edx 733 andb $0xfc,%dl 734 movb _PTmap(%edx),%dl 735 andb $0x7,%dl /* must be VALID + USERACC + WRITE */ 736 cmpb $0x7,%dl 737 je 1f 738 739 /* simulate a trap */ 740 pushl %eax 741 call _trapwrite 742 popl %edx /* remove junk parameter from stack */ 743 movl _curpcb,%ecx /* restore trashed register */ 744 orl %eax,%eax 745 jnz fusufault 7461: 747 movl 4(%esp),%edx 748#endif 749 7502: 751 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */ 752 ja fusufault 753 754 movl 8(%esp),%eax 755 movl %eax,(%edx) 756 xorl %eax,%eax 757 movl %eax,PCB_ONFAULT(%ecx) 758 ret 759 760ENTRY(susword) 761 movl _curpcb,%ecx 762 movl $fusufault,PCB_ONFAULT(%ecx) 763 movl 4(%esp),%edx 764 765#if defined(I386_CPU) 766 767#if defined(I486_CPU) || defined(I586_CPU) 768 cmpl $CPUCLASS_386,_cpu_class 769 jne 2f 770#endif /* I486_CPU || I586_CPU */ 771 772 /* XXX - page boundary crossing is still not handled */ 773 movl %edx,%eax 774 shrl $IDXSHIFT,%edx 775 andb $0xfc,%dl 776 movb _PTmap(%edx),%dl 777 andb $0x7,%dl /* must be VALID + USERACC + WRITE */ 778 cmpb $0x7,%dl 779 je 1f 780 781 /* simulate a trap */ 782 pushl %eax 783 call _trapwrite 784 popl %edx /* remove junk parameter from stack */ 785 movl _curpcb,%ecx /* restore trashed register */ 786 orl %eax,%eax 787 jnz fusufault 7881: 789 movl 4(%esp),%edx 790#endif 791 7922: 793 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */ 794 ja fusufault 795 796 movw 8(%esp),%ax 797 movw %ax,(%edx) 798 xorl %eax,%eax 799 movl %eax,PCB_ONFAULT(%ecx) 800 ret 801 802ALTENTRY(suibyte) 803ENTRY(subyte) 804 movl _curpcb,%ecx 805 movl $fusufault,PCB_ONFAULT(%ecx) 806 movl 4(%esp),%edx 807 808#if defined(I386_CPU) 809 810#if defined(I486_CPU) || defined(I586_CPU) 811 cmpl $CPUCLASS_386,_cpu_class 812 jne 2f 813#endif /* I486_CPU || I586_CPU */ 814 815 movl %edx,%eax 816 shrl $IDXSHIFT,%edx 817 andb $0xfc,%dl 818 movb _PTmap(%edx),%dl 819 andb $0x7,%dl /* must be VALID + USERACC + WRITE */ 820 cmpb $0x7,%dl 821 je 1f 822 823 /* simulate a trap */ 824 pushl %eax 825 call _trapwrite 826 popl %edx /* remove junk parameter from stack */ 827 movl _curpcb,%ecx /* restore trashed register */ 828 orl %eax,%eax 829 jnz fusufault 8301: 831 movl 4(%esp),%edx 832#endif 833 8342: 835 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */ 836 ja fusufault 837 838 movb 8(%esp),%al 839 movb %al,(%edx) 840 xorl %eax,%eax 841 movl %eax,PCB_ONFAULT(%ecx) 842 ret 843 844/* 845 * copyoutstr(from, to, maxlen, int *lencopied) 846 * copy a string from from to to, stop when a 0 character is reached. 847 * return ENAMETOOLONG if string is longer than maxlen, and 848 * EFAULT on protection violations. If lencopied is non-zero, 849 * return the actual length in *lencopied. 850 */ 851ENTRY(copyoutstr) 852 pushl %esi 853 pushl %edi 854 movl _curpcb,%ecx 855 movl $cpystrflt,PCB_ONFAULT(%ecx) 856 857 movl 12(%esp),%esi /* %esi = from */ 858 movl 16(%esp),%edi /* %edi = to */ 859 movl 20(%esp),%edx /* %edx = maxlen */ 860 861#if defined(I386_CPU) 862 863#if defined(I486_CPU) || defined(I586_CPU) 864 cmpl $CPUCLASS_386,_cpu_class 865 jne 5f 866#endif /* I486_CPU || I586_CPU */ 867 8681: 869 /* 870 * It suffices to check that the first byte is in user space, because 871 * we look at a page at a time and the end address is on a page 872 * boundary. 873 */ 874 cmpl $VM_MAXUSER_ADDRESS-1,%edi 875 ja cpystrflt 876 877 movl %edi,%eax 878 shrl $IDXSHIFT,%eax 879 andb $0xfc,%al 880 movb _PTmap(%eax),%al 881 andb $7,%al 882 cmpb $7,%al 883 je 2f 884 885 /* simulate trap */ 886 pushl %edx 887 pushl %edi 888 call _trapwrite 889 popl %edi 890 popl %edx 891 orl %eax,%eax 892 jnz cpystrflt 893 8942: /* copy up to end of this page */ 895 movl %edi,%eax 896 andl $NBPG-1,%eax 897 movl $NBPG,%ecx 898 subl %eax,%ecx /* ecx = NBPG - (src % NBPG) */ 899 cmpl %ecx,%edx 900 jae 3f 901 movl %edx,%ecx /* ecx = min(ecx, edx) */ 9023: 903 orl %ecx,%ecx 904 jz 4f 905 decl %ecx 906 decl %edx 907 lodsb 908 stosb 909 orb %al,%al 910 jnz 3b 911 912 /* Success -- 0 byte reached */ 913 decl %edx 914 xorl %eax,%eax 915 jmp 6f 916 9174: /* next page */ 918 orl %edx,%edx 919 jnz 1b 920 921 /* edx is zero -- return ENAMETOOLONG */ 922 movl $ENAMETOOLONG,%eax 923 jmp cpystrflt_x 924#endif /* I386_CPU */ 925 926#if defined(I486_CPU) || defined(I586_CPU) 9275: 928 incl %edx 9291: 930 decl %edx 931 jz 2f 932 /* 933 * XXX - would be faster to rewrite this function to use 934 * strlen() and copyout(). 935 */ 936 cmpl $VM_MAXUSER_ADDRESS-1,%edi 937 ja cpystrflt 938 939 lodsb 940 stosb 941 orb %al,%al 942 jnz 1b 943 944 /* Success -- 0 byte reached */ 945 decl %edx 946 xorl %eax,%eax 947 jmp cpystrflt_x 9482: 949 /* edx is zero -- return ENAMETOOLONG */ 950 movl $ENAMETOOLONG,%eax 951 jmp cpystrflt_x 952 953#endif /* I486_CPU || I586_CPU */ 954 955cpystrflt: 956 movl $EFAULT,%eax 957cpystrflt_x: 958 /* set *lencopied and return %eax */ 959 movl _curpcb,%ecx 960 movl $0,PCB_ONFAULT(%ecx) 961 movl 20(%esp),%ecx 962 subl %edx,%ecx 963 movl 24(%esp),%edx 964 orl %edx,%edx 965 jz 1f 966 movl %ecx,(%edx) 9671: 968 popl %edi 969 popl %esi 970 ret 971 972 973/* 974 * copyinstr(from, to, maxlen, int *lencopied) 975 * copy a string from from to to, stop when a 0 character is reached. 976 * return ENAMETOOLONG if string is longer than maxlen, and 977 * EFAULT on protection violations. If lencopied is non-zero, 978 * return the actual length in *lencopied. 979 */ 980ENTRY(copyinstr) 981 pushl %esi 982 pushl %edi 983 movl _curpcb,%ecx 984 movl $copyinstr_fault,PCB_ONFAULT(%ecx) 985 986 movl 12(%esp),%esi /* %esi = from */ 987 movl 16(%esp),%edi /* %edi = to */ 988 movl 20(%esp),%edx /* %edx = maxlen */ 989 pushl %gs 990 movl __udatasel,%eax 991 movl %ax,%gs 992 incl %edx 9931: 994 decl %edx 995 jz 2f 996 gs 997 lodsb 998 stosb 999 orb %al,%al 1000 jnz 1b 1001 1002 /* Success -- 0 byte reached */ 1003 decl %edx 1004 xorl %eax,%eax 1005 jmp 3f 10062: 1007 /* edx is zero -- return ENAMETOOLONG */ 1008 movl $ENAMETOOLONG,%eax 1009 jmp 3f 1010 1011copyinstr_fault: 1012 movl $EFAULT,%eax 10133: 1014 /* set *lencopied and return %eax */ 1015 movl _curpcb,%ecx 1016 movl $0,PCB_ONFAULT(%ecx) 1017 movl 24(%esp),%ecx 1018 subl %edx,%ecx 1019 movl 28(%esp),%edx 1020 orl %edx,%edx 1021 jz 4f 1022 movl %ecx,(%edx) 10234: 1024 popl %gs 1025 popl %edi 1026 popl %esi 1027 ret 1028 1029 1030/* 1031 * copystr(from, to, maxlen, int *lencopied) 1032 */ 1033ENTRY(copystr) 1034 pushl %esi 1035 pushl %edi 1036 1037 movl 12(%esp),%esi /* %esi = from */ 1038 movl 16(%esp),%edi /* %edi = to */ 1039 movl 20(%esp),%edx /* %edx = maxlen */ 1040 incl %edx 1041 10421: 1043 decl %edx 1044 jz 4f 1045 lodsb 1046 stosb 1047 orb %al,%al 1048 jnz 1b 1049 1050 /* Success -- 0 byte reached */ 1051 decl %edx 1052 xorl %eax,%eax 1053 jmp 6f 10544: 1055 /* edx is zero -- return ENAMETOOLONG */ 1056 movl $ENAMETOOLONG,%eax 1057 10586: 1059 /* set *lencopied and return %eax */ 1060 movl 20(%esp),%ecx 1061 subl %edx,%ecx 1062 movl 24(%esp),%edx 1063 orl %edx,%edx 1064 jz 7f 1065 movl %ecx,(%edx) 10667: 1067 popl %edi 1068 popl %esi 1069 ret 1070 1071/* 1072 * Handling of special 386 registers and descriptor tables etc 1073 */ 1074/* void lgdt(struct region_descriptor *rdp); */ 1075ENTRY(lgdt) 1076 /* reload the descriptor table */ 1077 movl 4(%esp),%eax 1078 lgdt (%eax) 1079 1080 /* flush the prefetch q */ 1081 jmp 1f 1082 nop 10831: 1084 /* reload "stale" selectors */ 1085 movl $KDSEL,%eax 1086 movl %ax,%ds 1087 movl %ax,%es 1088 movl %ax,%ss 1089 1090 /* reload code selector by turning return into intersegmental return */ 1091 movl (%esp),%eax 1092 pushl %eax 1093# movl $KCSEL,4(%esp) 1094 movl $8,4(%esp) 1095 lret 1096 1097/* 1098 * void lidt(struct region_descriptor *rdp); 1099 */ 1100ENTRY(lidt) 1101 movl 4(%esp),%eax 1102 lidt (%eax) 1103 ret 1104 1105/* 1106 * void lldt(u_short sel) 1107 */ 1108ENTRY(lldt) 1109 lldt 4(%esp) 1110 ret 1111 1112/* 1113 * void ltr(u_short sel) 1114 */ 1115ENTRY(ltr) 1116 ltr 4(%esp) 1117 ret 1118 1119/* ssdtosd(*ssdp,*sdp) */ 1120ENTRY(ssdtosd) 1121 pushl %ebx 1122 movl 8(%esp),%ecx 1123 movl 8(%ecx),%ebx 1124 shll $16,%ebx 1125 movl (%ecx),%edx 1126 roll $16,%edx 1127 movb %dh,%bl 1128 movb %dl,%bh 1129 rorl $8,%ebx 1130 movl 4(%ecx),%eax 1131 movw %ax,%dx 1132 andl $0xf0000,%eax 1133 orl %eax,%ebx 1134 movl 12(%esp),%ecx 1135 movl %edx,(%ecx) 1136 movl %ebx,4(%ecx) 1137 popl %ebx 1138 ret 1139 1140/* load_cr0(cr0) */ 1141ENTRY(load_cr0) 1142 movl 4(%esp),%eax 1143 movl %eax,%cr0 1144 ret 1145 1146/* rcr0() */ 1147ENTRY(rcr0) 1148 movl %cr0,%eax 1149 ret 1150 1151/* rcr3() */ 1152ENTRY(rcr3) 1153 movl %cr3,%eax 1154 ret 1155 1156/* void load_cr3(caddr_t cr3) */ 1157ENTRY(load_cr3) 1158 movl 4(%esp),%eax 1159 orl $I386_CR3PAT,%eax 1160 movl %eax,%cr3 1161 ret 1162 1163 1164/*****************************************************************************/ 1165/* setjump, longjump */ 1166/*****************************************************************************/ 1167 1168ENTRY(setjmp) 1169 movl 4(%esp),%eax 1170 movl %ebx,(%eax) /* save ebx */ 1171 movl %esp,4(%eax) /* save esp */ 1172 movl %ebp,8(%eax) /* save ebp */ 1173 movl %esi,12(%eax) /* save esi */ 1174 movl %edi,16(%eax) /* save edi */ 1175 movl (%esp),%edx /* get rta */ 1176 movl %edx,20(%eax) /* save eip */ 1177 xorl %eax,%eax /* return(0); */ 1178 ret 1179 1180ENTRY(longjmp) 1181 movl 4(%esp),%eax 1182 movl (%eax),%ebx /* restore ebx */ 1183 movl 4(%eax),%esp /* restore esp */ 1184 movl 8(%eax),%ebp /* restore ebp */ 1185 movl 12(%eax),%esi /* restore esi */ 1186 movl 16(%eax),%edi /* restore edi */ 1187 movl 20(%eax),%edx /* get rta */ 1188 movl %edx,(%esp) /* put in return frame */ 1189 xorl %eax,%eax /* return(1); */ 1190 incl %eax 1191 ret 1192