1/* AMD64 (x86_64) lshift -- Left shift a limb vector and store 2 * result in a second limb vector. 3 * 4 * Copyright (C) 1992, 1994, 1995, 1998, 5 * 2001, 2002, 2006 Free Software Foundation, Inc. 6 * 7 * This file is part of Libgcrypt. 8 * 9 * Libgcrypt is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU Lesser General Public License as 11 * published by the Free Software Foundation; either version 2.1 of 12 * the License, or (at your option) any later version. 13 * 14 * Libgcrypt is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 22 * 23 * Note: This code is heavily based on the GNU MP Library. 24 * Actually it's the same code with only minor changes in the 25 * way the data is stored; this is to support the abstraction 26 * of an optional secure memory allocation which may be used 27 * to avoid revealing of sensitive data due to paging etc. 28 */ 29 30 31#include "sysdep.h" 32#include "asm-syntax.h" 33 34/******************* 35 * mpi_limb_t 36 * _gcry_mpih_lshift( mpi_ptr_t wp, rdi 37 * mpi_ptr_t up, rsi 38 * mpi_size_t usize, rdx 39 * unsigned cnt) rcx 40 */ 41 42.text 43 .globl C_SYMBOL_NAME(_gcry_mpih_lshift) 44C_SYMBOL_NAME(_gcry_mpih_lshift:) 45 movq -8(%rsi,%rdx,8), %mm7 46 movd %ecx, %mm1 47 movl $64, %eax 48 subl %ecx, %eax 49 movd %eax, %mm0 50 movq %mm7, %mm3 51 psrlq %mm0, %mm7 52 movd %mm7, %rax 53 subq $2, %rdx 54 jl .Lendo 55 56 ALIGN(4) /* minimal alignment for claimed speed */ 57.Loop: movq (%rsi,%rdx,8), %mm6 58 movq %mm6, %mm2 59 psrlq %mm0, %mm6 60 psllq %mm1, %mm3 61 por %mm6, %mm3 62 movq %mm3, 8(%rdi,%rdx,8) 63 je .Lende 64 movq -8(%rsi,%rdx,8), %mm7 65 movq %mm7, %mm3 66 psrlq %mm0, %mm7 67 psllq %mm1, %mm2 68 por %mm7, %mm2 69 movq %mm2, (%rdi,%rdx,8) 70 subq $2, %rdx 71 jge .Loop 72 73.Lendo: movq %mm3, %mm2 74.Lende: psllq %mm1, %mm2 75 movq %mm2, (%rdi) 76 emms 77 ret 78