1# Pyramid __gmpn_add_n -- Add two limb vectors of the same length > 0 and store 2# sum in a third limb vector. 3 4# Copyright 1995, 2000 Free Software Foundation, Inc. 5 6# This file is part of the GNU MP Library. 7 8# The GNU MP Library is free software; you can redistribute it and/or modify 9# it under the terms of the GNU Lesser General Public License as published by 10# the Free Software Foundation; either version 3 of the License, or (at your 11# option) any later version. 12 13# The GNU MP Library is distributed in the hope that it will be useful, but 14# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 16# License for more details. 17 18# You should have received a copy of the GNU Lesser General Public License 19# along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. 20 21.text 22 .align 2 23.globl ___gmpn_add_n 24___gmpn_add_n: 25 movw $-1,tr0 # representation for carry clear 26 27 movw pr3,tr2 28 andw $3,tr2 29 beq Lend0 30 subw tr2,pr3 31 32Loop0: rsubw $0,tr0 # restore carry bit from carry-save register 33 34 movw (pr1),tr1 35 addwc (pr2),tr1 36 movw tr1,(pr0) 37 38 subwb tr0,tr0 39 addw $4,pr0 40 addw $4,pr1 41 addw $4,pr2 42 addw $-1,tr2 43 bne Loop0 44 45 mtstw pr3,pr3 46 beq Lend 47Lend0: 48Loop: rsubw $0,tr0 # restore carry bit from carry-save register 49 50 movw (pr1),tr1 51 addwc (pr2),tr1 52 movw tr1,(pr0) 53 54 movw 4(pr1),tr1 55 addwc 4(pr2),tr1 56 movw tr1,4(pr0) 57 58 movw 8(pr1),tr1 59 addwc 8(pr2),tr1 60 movw tr1,8(pr0) 61 62 movw 12(pr1),tr1 63 addwc 12(pr2),tr1 64 movw tr1,12(pr0) 65 66 subwb tr0,tr0 67 addw $16,pr0 68 addw $16,pr1 69 addw $16,pr2 70 addw $-4,pr3 71 bne Loop 72Lend: 73 mnegw tr0,pr0 74 ret 75