1/* AMD64 (x86_64) rshift -- Right shift a limb vector and store 2 * result in a second limb vector. 3 * 4 * Copyright (C) 1992, 1994, 1995, 1998, 5 * 2001, 2002, 2006 Free Software Foundation, Inc. 6 * 7 * This file is part of Libgcrypt. 8 * 9 * Libgcrypt is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU Lesser General Public License as 11 * published by the Free Software Foundation; either version 2.1 of 12 * the License, or (at your option) any later version. 13 * 14 * Libgcrypt is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 22 * 23 * Note: This code is heavily based on the GNU MP Library. 24 * Actually it's the same code with only minor changes in the 25 * way the data is stored; this is to support the abstraction 26 * of an optional secure memory allocation which may be used 27 * to avoid revealing of sensitive data due to paging etc. 28 */ 29 30 31#include "sysdep.h" 32#include "asm-syntax.h" 33 34/******************* 35 * mpi_limb_t 36 * _gcry_mpih_rshift( mpi_ptr_t wp, rdi 37 * mpi_ptr_t up, rsi 38 * mpi_size_t usize, rdx 39 * unsigned cnt) rcx 40 */ 41 42.text 43 .globl C_SYMBOL_NAME(_gcry_mpih_rshift) 44C_SYMBOL_NAME(_gcry_mpih_rshift:) 45 movq (%rsi), %mm7 46 movd %ecx, %mm1 47 movl $64, %eax 48 subl %ecx, %eax 49 movd %eax, %mm0 50 movq %mm7, %mm3 51 psllq %mm0, %mm7 52 movd %mm7, %rax 53 leaq (%rsi,%rdx,8), %rsi 54 leaq (%rdi,%rdx,8), %rdi 55 negq %rdx 56 addq $2, %rdx 57 jg .Lendo 58 59 ALIGN(8) /* minimal alignment for claimed speed */ 60.Loop: movq -8(%rsi,%rdx,8), %mm6 61 movq %mm6, %mm2 62 psllq %mm0, %mm6 63 psrlq %mm1, %mm3 64 por %mm6, %mm3 65 movq %mm3, -16(%rdi,%rdx,8) 66 je .Lende 67 movq (%rsi,%rdx,8), %mm7 68 movq %mm7, %mm3 69 psllq %mm0, %mm7 70 psrlq %mm1, %mm2 71 por %mm7, %mm2 72 movq %mm2, -8(%rdi,%rdx,8) 73 addq $2, %rdx 74 jle .Loop 75 76.Lendo: movq %mm3, %mm2 77.Lende: psrlq %mm1, %mm2 78 movq %mm2, -8(%rdi) 79 emms 80 ret 81