1/* 2 * ARM NEON optimised integer operations 3 * Copyright (c) 2009 Kostya Shishkov 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22#include "asm.S" 23 24 preserve8 25 .fpu neon 26 .text 27 28function ff_scalarproduct_int16_neon, export=1 29 vmov.i16 q0, #0 30 vmov.i16 q1, #0 31 vmov.i16 q2, #0 32 vmov.i16 q3, #0 33 negs r3, r3 34 beq 2f 35 36 vdup.s32 q12, r3 371: vld1.16 {d16-d17}, [r0]! 38 vld1.16 {d20-d21}, [r1,:128]! 39 vmull.s16 q12, d16, d20 40 vld1.16 {d18-d19}, [r0]! 41 vmull.s16 q13, d17, d21 42 vld1.16 {d22-d23}, [r1,:128]! 43 vmull.s16 q14, d18, d22 44 vmull.s16 q15, d19, d23 45 vshl.s32 q8, q12, q12 46 vshl.s32 q9, q13, q12 47 vadd.s32 q0, q0, q8 48 vshl.s32 q10, q14, q12 49 vadd.s32 q1, q1, q9 50 vshl.s32 q11, q15, q12 51 vadd.s32 q2, q2, q10 52 vadd.s32 q3, q3, q11 53 subs r2, r2, #16 54 bne 1b 55 b 3f 56 572: vld1.16 {d16-d17}, [r0]! 58 vld1.16 {d20-d21}, [r1,:128]! 59 vmlal.s16 q0, d16, d20 60 vld1.16 {d18-d19}, [r0]! 61 vmlal.s16 q1, d17, d21 62 vld1.16 {d22-d23}, [r1,:128]! 63 vmlal.s16 q2, d18, d22 64 vmlal.s16 q3, d19, d23 65 subs r2, r2, #16 66 bne 2b 67 683: vpadd.s32 d16, d0, d1 69 vpadd.s32 d17, d2, d3 70 vpadd.s32 d10, d4, d5 71 vpadd.s32 d11, d6, d7 72 vpadd.s32 d0, d16, d17 73 vpadd.s32 d1, d10, d11 74 vpadd.s32 d2, d0, d1 75 vpaddl.s32 d3, d2 76 vmov.32 r0, d3[0] 77 bx lr 78endfunc 79 80@ scalarproduct_and_madd_int16(/*aligned*/v0,v1,v2,order,mul) 81function ff_scalarproduct_and_madd_int16_neon, export=1 82 vld1.16 {d28[],d29[]}, [sp] 83 vmov.i16 q0, #0 84 vmov.i16 q1, #0 85 vmov.i16 q2, #0 86 vmov.i16 q3, #0 87 mov r12, r0 88 891: vld1.16 {d16-d17}, [r0,:128]! 90 vld1.16 {d18-d19}, [r1]! 91 vld1.16 {d20-d21}, [r2]! 92 vld1.16 {d22-d23}, [r0,:128]! 93 vld1.16 {d24-d25}, [r1]! 94 vld1.16 {d26-d27}, [r2]! 95 vmul.s16 q10, q10, q14 96 vmul.s16 q13, q13, q14 97 vmlal.s16 q0, d16, d18 98 vmlal.s16 q1, d17, d19 99 vadd.s16 q10, q8, q10 100 vadd.s16 q13, q11, q13 101 vmlal.s16 q2, d22, d24 102 vmlal.s16 q3, d23, d25 103 vst1.16 {q10}, [r12,:128]! 104 subs r3, r3, #16 105 vst1.16 {q13}, [r12,:128]! 106 bne 1b 107 108 vpadd.s32 d16, d0, d1 109 vpadd.s32 d17, d2, d3 110 vpadd.s32 d10, d4, d5 111 vpadd.s32 d11, d6, d7 112 vpadd.s32 d0, d16, d17 113 vpadd.s32 d1, d10, d11 114 vpadd.s32 d2, d0, d1 115 vpaddl.s32 d3, d2 116 vmov.32 r0, d3[0] 117 bx lr 118endfunc 119