• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/i386/commpage/

Lines Matching refs:edx

57         movl    %edi,%edx
58 subl %esi,%edx // (dest - source)
59 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
82 movl %edi,%edx
83 subl %esi,%edx // (dest - source)
84 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
95 movl %ecx,%edx // copy length
106 andl $3,%edx // any leftover bytes?
113 dec %edx
135 movl %edi,%edx // copy destination
137 negl %edx
138 andl $15,%edx // get #bytes to align destination
140 subl %edx,%ecx // decrement length
146 dec %edx
156 // edx = -(length to move), a multiple of 64
161 movl %ecx,%edx // copy length
164 andl $-64,%edx // get number of bytes we will copy in inner loop
166 addl %edx,%esi // point to 1st byte not copied
167 addl %edx,%edi
168 negl %edx // now generate offset to 1st byte to be copied
216 addl %edx,%esi // restore ptrs to 1st byte of source and dest
217 addl %edx,%edi
218 negl %edx // make length positive
219 orl %edx,%ecx // restore total #bytes remaining to move
221 movl %ecx,%edx // copy total length to move
231 cmpl $(-kFastUCode),%edx // %edx == -length, where (length < kVeryLong)
236 movdqa (%esi,%edx),%xmm0
237 movdqa 16(%esi,%edx),%xmm1
238 movdqa 32(%esi,%edx),%xmm2
239 movdqa 48(%esi,%edx),%xmm3
241 movdqa %xmm0,(%edi,%edx)
242 movdqa %xmm1,16(%edi,%edx)
243 movdqa %xmm2,32(%edi,%edx)
244 movdqa %xmm3,48(%edi,%edx)
246 addl $64,%edx
255 movdqa -1(%esi,%edx),%xmm0 // prime the loop by loading 1st quadword
257 movdqa 15(%esi,%edx),%xmm1
258 movdqa 31(%esi,%edx),%xmm2
259 movdqa 47(%esi,%edx),%xmm3
260 movdqa 63(%esi,%edx),%xmm4
270 movdqa %xmm1,(%edi,%edx)
271 movdqa %xmm2,16(%edi,%edx)
272 movdqa %xmm3,32(%edi,%edx)
273 movdqa %xmm4,48(%edi,%edx)
275 addl $64,%edx
284 movdqa -2(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
286 movdqa 14(%esi,%edx),%xmm1
287 movdqa 30(%esi,%edx),%xmm2
288 movdqa 46(%esi,%edx),%xmm3
289 movdqa 62(%esi,%edx),%xmm4
299 movdqa %xmm1,(%edi,%edx)
300 movdqa %xmm2,16(%edi,%edx)
301 movdqa %xmm3,32(%edi,%edx)
302 movdqa %xmm4,48(%edi,%edx)
304 addl $64,%edx
313 movdqa -3(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
315 movdqa 13(%esi,%edx),%xmm1
316 movdqa 29(%esi,%edx),%xmm2
317 movdqa 45(%esi,%edx),%xmm3
318 movdqa 61(%esi,%edx),%xmm4
328 movdqa %xmm1,(%edi,%edx)
329 movdqa %xmm2,16(%edi,%edx)
330 movdqa %xmm3,32(%edi,%edx)
331 movdqa %xmm4,48(%edi,%edx)
333 addl $64,%edx
343 movaps -4(%esi,%edx),%xmm0 // 4-byte aligned: prime the loop
347 movaps 12(%esi,%edx),%xmm1
348 movaps 28(%esi,%edx),%xmm2
351 movaps 44(%esi,%edx),%xmm3
354 movaps 60(%esi,%edx),%xmm4
358 movaps %xmm0,(%edi,%edx)
361 movaps %xmm1,16(%edi,%edx)
362 movaps %xmm2,32(%edi,%edx)
364 movaps %xmm3,48(%edi,%edx)
366 addl $64,%edx
375 movdqa -5(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
377 movdqa 11(%esi,%edx),%xmm1
378 movdqa 27(%esi,%edx),%xmm2
379 movdqa 43(%esi,%edx),%xmm3
380 movdqa 59(%esi,%edx),%xmm4
390 movdqa %xmm1,(%edi,%edx)
391 movdqa %xmm2,16(%edi,%edx)
392 movdqa %xmm3,32(%edi,%edx)
393 movdqa %xmm4,48(%edi,%edx)
395 addl $64,%edx
404 movdqa -6(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
406 movdqa 10(%esi,%edx),%xmm1
407 movdqa 26(%esi,%edx),%xmm2
408 movdqa 42(%esi,%edx),%xmm3
409 movdqa 58(%esi,%edx),%xmm4
419 movdqa %xmm1,(%edi,%edx)
420 movdqa %xmm2,16(%edi,%edx)
421 movdqa %xmm3,32(%edi,%edx)
422 movdqa %xmm4,48(%edi,%edx)
424 addl $64,%edx
433 movdqa -7(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
435 movdqa 9(%esi,%edx),%xmm1
436 movdqa 25(%esi,%edx),%xmm2
437 movdqa 41(%esi,%edx),%xmm3
438 movdqa 57(%esi,%edx),%xmm4
448 movdqa %xmm1,(%edi,%edx)
449 movdqa %xmm2,16(%edi,%edx)
450 movdqa %xmm3,32(%edi,%edx)
451 movdqa %xmm4,48(%edi,%edx)
453 addl $64,%edx
463 cmpl $(-kFastUCode),%edx // %edx == -length, where (length < kVeryLong)
465 movapd -8(%esi,%edx),%xmm0 // 8-byte aligned: prime the loop
469 movapd 8(%esi,%edx),%xmm1
470 movapd 24(%esi,%edx),%xmm2
472 movapd 40(%esi,%edx),%xmm3
474 movapd 56(%esi,%edx),%xmm4
477 movapd %xmm0,(%edi,%edx)
479 movapd %xmm1,16(%edi,%edx)
480 movapd %xmm2,32(%edi,%edx)
482 movapd %xmm3,48(%edi,%edx)
484 addl $64,%edx
493 movdqa -9(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
495 movdqa 7(%esi,%edx),%xmm1
496 movdqa 23(%esi,%edx),%xmm2
497 movdqa 39(%esi,%edx),%xmm3
498 movdqa 55(%esi,%edx),%xmm4
508 movdqa %xmm1,(%edi,%edx)
509 movdqa %xmm2,16(%edi,%edx)
510 movdqa %xmm3,32(%edi,%edx)
511 movdqa %xmm4,48(%edi,%edx)
513 addl $64,%edx
522 movdqa -10(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
524 movdqa 6(%esi,%edx),%xmm1
525 movdqa 22(%esi,%edx),%xmm2
526 movdqa 38(%esi,%edx),%xmm3
527 movdqa 54(%esi,%edx),%xmm4
537 movdqa %xmm1,(%edi,%edx)
538 movdqa %xmm2,16(%edi,%edx)
539 movdqa %xmm3,32(%edi,%edx)
540 movdqa %xmm4,48(%edi,%edx)
542 addl $64,%edx
551 movdqa -11(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
553 movdqa 5(%esi,%edx),%xmm1
554 movdqa 21(%esi,%edx),%xmm2
555 movdqa 37(%esi,%edx),%xmm3
556 movdqa 53(%esi,%edx),%xmm4
566 movdqa %xmm1,(%edi,%edx)
567 movdqa %xmm2,16(%edi,%edx)
568 movdqa %xmm3,32(%edi,%edx)
569 movdqa %xmm4,48(%edi,%edx)
571 addl $64,%edx
581 movss (%esi,%edx),%xmm0 // prefetch 1st four bytes of source, right justified
585 pshufd $(0x93),4(%esi,%edx),%xmm1 // load and rotate right 12 bytes (mask -- 10 01 00 11)
586 pshufd $(0x93),20(%esi,%edx),%xmm2
587 pshufd $(0x93),36(%esi,%edx),%xmm3
588 pshufd $(0x93),52(%esi,%edx),%xmm4
596 movaps %xmm1,(%edi,%edx)
597 movaps %xmm2,16(%edi,%edx)
599 movaps %xmm3,32(%edi,%edx)
600 movaps %xmm4,48(%edi,%edx)
602 addl $64,%edx
611 movdqa -13(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
613 movdqa 3(%esi,%edx),%xmm1
614 movdqa 19(%esi,%edx),%xmm2
615 movdqa 35(%esi,%edx),%xmm3
616 movdqa 51(%esi,%edx),%xmm4
626 movdqa %xmm1,(%edi,%edx)
627 movdqa %xmm2,16(%edi,%edx)
628 movdqa %xmm3,32(%edi,%edx)
629 movdqa %xmm4,48(%edi,%edx)
631 addl $64,%edx
640 movdqa -14(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
642 movdqa 2(%esi,%edx),%xmm1
643 movdqa 18(%esi,%edx),%xmm2
644 movdqa 34(%esi,%edx),%xmm3
645 movdqa 50(%esi,%edx),%xmm4
655 movdqa %xmm1,(%edi,%edx)
656 movdqa %xmm2,16(%edi,%edx)
657 movdqa %xmm3,32(%edi,%edx)
658 movdqa %xmm4,48(%edi,%edx)
660 addl $64,%edx
669 movdqa -15(%esi,%edx),%xmm0 // prime the loop by loading 1st source dq
671 movdqa 1(%esi,%edx),%xmm1
672 movdqa 17(%esi,%edx),%xmm2
673 movdqa 33(%esi,%edx),%xmm3
674 movdqa 49(%esi,%edx),%xmm4
684 movdqa %xmm1,(%edi,%edx)
685 movdqa %xmm2,16(%edi,%edx)
686 movdqa %xmm3,32(%edi,%edx)
687 movdqa %xmm4,48(%edi,%edx)
689 addl $64,%edx
713 movl %ecx,%edx // copy length
724 andl $3,%edx // bytes?
731 dec %edx
746 movl %edi,%edx // copy destination
747 andl $15,%edx // get #bytes to align destination
749 subl %edx,%ecx // adjust length
755 dec %edx
761 movl %ecx,%edx // copy length
763 andl $-64,%edx // get number of bytes we will copy in inner loop
764 subl %edx,%esi // point to endpoint of copy
765 subl %edx,%edi
770 movdqa -16(%esi,%edx),%xmm0
771 movdqa -32(%esi,%edx),%xmm1
772 movdqa -48(%esi,%edx),%xmm2
773 movdqa -64(%esi,%edx),%xmm3
775 movdqa %xmm0,-16(%edi,%edx)
776 movdqa %xmm1,-32(%edi,%edx)
777 movdqa %xmm2,-48(%edi,%edx)
778 movdqa %xmm3,-64(%edi,%edx)
780 subl $64,%edx
789 movdqu -16(%esi,%edx),%xmm0
790 movdqu -32(%esi,%edx),%xmm1
791 movdqu -48(%esi,%edx),%xmm2
792 movdqu -64(%esi,%edx),%xmm3
794 movdqa %xmm0,-16(%edi,%edx)
795 movdqa %xmm1,-32(%edi,%edx)
796 movdqa %xmm2,-48(%edi,%edx)
797 movdqa %xmm3,-64(%edi,%edx)
799 subl $64,%edx