1/* Intel Pentium-4 mpn_add_n -- mpn addition. 2 * 3 * Copyright 2001, 2002 Free Software Foundation, Inc. 4 * 5 * This file is part of Libgcrypt. 6 * 7 * Libgcrypt is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU Lesser General Public License as 9 * published by the Free Software Foundation; either version 2.1 of 10 * the License, or (at your option) any later version. 11 * 12 * Libgcrypt is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 20 * 21 * Note: This code is heavily based on the GNU MP Library. 22 * Actually it's the same code with only minor changes in the 23 * way the data is stored; this is to support the abstraction 24 * of an optional secure memory allocation which may be used 25 * to avoid revealing of sensitive data due to paging etc. 26 */ 27 28 29#include "sysdep.h" 30#include "asm-syntax.h" 31 32 33 /******************* 34 * mpi_limb_t 35 * _gcry_mpih_add_n( mpi_ptr_t res_ptr, (sp + 4) 36 * mpi_ptr_t s1_ptr, (sp + 8) 37 * mpi_ptr_t s2_ptr, (sp + 12) 38 * mpi_size_t size) (sp + 16) 39 * 40 * P4 Willamette, Northwood: 4.0 cycles/limb if dst!=src1 and dst!=src2 41 * 6.0 cycles/limb if dst==src1 or dst==src2 42 * P4 Prescott: >= 5 cycles/limb 43 * 44 * The 4 c/l achieved here isn't particularly good, but is better than 9 c/l 45 * for a basic adc loop. 46 */ 47 48 TEXT 49 ALIGN (3) 50 GLOBL C_SYMBOL_NAME(_gcry_mpih_add_n) 51C_SYMBOL_NAME(_gcry_mpih_add_n:) 52 53 pxor %mm0, %mm0 54 55 movl 8(%esp), %eax /* s1_ptr */ 56 movl %ebx, 8(%esp) /* re-use parameter space */ 57 movl 12(%esp), %ebx /* res_ptr */ 58 movl 4(%esp), %edx /* s2_ptr */ 59 movl 16(%esp), %ecx /* size */ 60 61 leal (%eax,%ecx,4), %eax /* src1 end */ 62 leal (%ebx,%ecx,4), %ebx /* src2 end */ 63 leal (%edx,%ecx,4), %edx /* dst end */ 64 negl %ecx /* -size */ 65 66Ltop: 67/* 68 C eax src1 end 69 C ebx src2 end 70 C ecx counter, limbs, negative 71 C edx dst end 72 C mm0 carry bit 73*/ 74 75 movd (%eax,%ecx,4), %mm1 76 movd (%ebx,%ecx,4), %mm2 77 paddq %mm2, %mm1 78 79 paddq %mm1, %mm0 80 movd %mm0, (%edx,%ecx,4) 81 82 psrlq $32, %mm0 83 84 addl $1, %ecx 85 jnz Ltop 86 87 88 movd %mm0, %eax 89 movl 8(%esp), %ebx /* restore saved EBX */ 90 emms 91 ret 92