• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/i386/commpage/

Lines Matching refs:edx

77         movl    %edi,%edx
78 subl %esi,%edx // (dest - source)
79 cmpl %ecx,%edx // must move in reverse if (dest - source) < length
91 movl %ecx,%edx // copy length
102 andl $3,%edx // any leftover bytes?
109 dec %edx
131 movl %edi,%edx // copy destination
133 negl %edx
134 andl $15,%edx // get #bytes to align destination
136 subl %edx,%ecx // decrement length
142 dec %edx
149 movl %ecx,%edx // copy length
152 andl $-64,%edx // get number of bytes we will copy in inner loop
153 addl %edx,%esi // point to 1st byte not copied
154 addl %edx,%edi
155 negl %edx // now generate offset to 1st byte to be copied
165 addl %edx,%esi // restore ptrs to 1st byte of source and dest
166 addl %edx,%edi
169 movl %eax,%edx // original length
177 movdqa (%esi,%edx),%xmm0
178 movdqa 16(%esi,%edx),%xmm1
179 movdqa 32(%esi,%edx),%xmm2
180 movdqa 48(%esi,%edx),%xmm3
182 movdqa %xmm0,(%edi,%edx)
183 movdqa %xmm1,16(%edi,%edx)
184 movdqa %xmm2,32(%edi,%edx)
185 movdqa %xmm3,48(%edi,%edx)
187 addl $64,%edx
199 movdqu (%esi,%edx),%xmm0 // the loads are unaligned
200 movdqu 16(%esi,%edx),%xmm1
201 movdqu 32(%esi,%edx),%xmm2
202 movdqu 48(%esi,%edx),%xmm3
204 movdqa %xmm0,(%edi,%edx) // we can use aligned stores
205 movdqa %xmm1,16(%edi,%edx)
206 movdqa %xmm2,32(%edi,%edx)
207 movdqa %xmm3,48(%edi,%edx)
209 addl $64,%edx
252 movl $(kBigChunk),%edx // assume we can do a full chunk
253 cmpl %edx,%ecx // do we have a full chunk left to do?
254 cmovbl %ecx,%edx // if not, only move what we have left
255 andl $-4096,%edx // we work in page multiples
263 // edx = chunk length (multiples of pages)
293 cmpl %eax,%edx // done with this chunk?
300 addl %edx,%esi // increment ptrs by chunk length
301 addl %edx,%edi
302 subl %edx,%ecx // adjust remaining length
303 negl %edx // prepare loop index (counts up to 0)
310 movdqa (%esi,%edx),%xmm0
311 movdqa 16(%esi,%edx),%xmm1
312 movdqa 32(%esi,%edx),%xmm2
313 movdqa 48(%esi,%edx),%xmm3
314 movdqa 64(%esi,%edx),%xmm4
315 movdqa 80(%esi,%edx),%xmm5
316 movdqa 96(%esi,%edx),%xmm6
317 movdqa 112(%esi,%edx),%xmm7
319 movntdq %xmm0,(%edi,%edx)
320 movntdq %xmm1,16(%edi,%edx)
321 movntdq %xmm2,32(%edi,%edx)
322 movntdq %xmm3,48(%edi,%edx)
323 movntdq %xmm4,64(%edi,%edx)
324 movntdq %xmm5,80(%edi,%edx)
325 movntdq %xmm6,96(%edi,%edx)
326 movntdq %xmm7,112(%edi,%edx)
328 subl $-128,%edx // add 128 with an 8-bit immediate
334 movdqu (%esi,%edx),%xmm0
335 movdqu 16(%esi,%edx),%xmm1
336 movdqu 32(%esi,%edx),%xmm2
337 movdqu 48(%esi,%edx),%xmm3
338 movdqu 64(%esi,%edx),%xmm4
339 movdqu 80(%esi,%edx),%xmm5
340 movdqu 96(%esi,%edx),%xmm6
341 movdqu 112(%esi,%edx),%xmm7
343 movntdq %xmm0,(%edi,%edx)
344 movntdq %xmm1,16(%edi,%edx)
345 movntdq %xmm2,32(%edi,%edx)
346 movntdq %xmm3,48(%edi,%edx)
347 movntdq %xmm4,64(%edi,%edx)
348 movntdq %xmm5,80(%edi,%edx)
349 movntdq %xmm6,96(%edi,%edx)
350 movntdq %xmm7,112(%edi,%edx)
352 subl $-128,%edx // add 128 with an 8-bit immediate
381 movl %ecx,%edx // copy length
392 andl $3,%edx // bytes?
399 dec %edx
414 movl %edi,%edx // copy destination
415 andl $15,%edx // get #bytes to align destination
417 subl %edx,%ecx // adjust length
423 dec %edx
429 movl %ecx,%edx // copy length
431 andl $-64,%edx // get number of bytes we will copy in inner loop
432 subl %edx,%esi // point to endpoint of copy
433 subl %edx,%edi
440 movdqa -16(%esi,%edx),%xmm0
441 movdqa -32(%esi,%edx),%xmm1
442 movdqa -48(%esi,%edx),%xmm2
443 movdqa -64(%esi,%edx),%xmm3
445 movdqa %xmm0,-16(%edi,%edx)
446 movdqa %xmm1,-32(%edi,%edx)
447 movdqa %xmm2,-48(%edi,%edx)
448 movdqa %xmm3,-64(%edi,%edx)
450 subl $64,%edx
460 movdqu -16(%esi,%edx),%xmm0
461 movdqu -32(%esi,%edx),%xmm1
462 movdqu -48(%esi,%edx),%xmm2
463 movdqu -64(%esi,%edx),%xmm3
465 movdqa %xmm0,-16(%edi,%edx)
466 movdqa %xmm1,-32(%edi,%edx)
467 movdqa %xmm2,-48(%edi,%edx)
468 movdqa %xmm3,-64(%edi,%edx)
470 subl $64,%edx