1/*
2 * GMC (Global Motion Compensation)
3 * AltiVec-enabled
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "libavcodec/dsputil.h"
24
25#include "gcc_fixes.h"
26
27#include "dsputil_ppc.h"
28#include "util_altivec.h"
29
30/*
31  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
32  to preserve proper dst alignment.
33*/
34#define GMC1_PERF_COND (h==8)
35void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
36{
37POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
38    const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) =
39        {rounder, rounder, rounder, rounder,
40         rounder, rounder, rounder, rounder};
41    const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
42        {
43            (16-x16)*(16-y16), /* A */
44            (   x16)*(16-y16), /* B */
45            (16-x16)*(   y16), /* C */
46            (   x16)*(   y16), /* D */
47            0, 0, 0, 0         /* padding */
48        };
49    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
50    register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
51    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
52    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
53    int i;
54    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
55    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
56
57
58POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
59
60    tempA = vec_ld(0, (unsigned short*)ABCD);
61    Av = vec_splat(tempA, 0);
62    Bv = vec_splat(tempA, 1);
63    Cv = vec_splat(tempA, 2);
64    Dv = vec_splat(tempA, 3);
65
66    rounderV = vec_ld(0, (unsigned short*)rounder_a);
67
68    // we'll be able to pick-up our 9 char elements
69    // at src from those 32 bytes
70    // we load the first batch here, as inside the loop
71    // we can re-use 'src+stride' from one iteration
72    // as the 'src' of the next.
73    src_0 = vec_ld(0, src);
74    src_1 = vec_ld(16, src);
75    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
76
77    if (src_really_odd != 0x0000000F) {
78        // if src & 0xF == 0xF, then (src+1) is properly aligned
79        // on the second vector.
80        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
81    } else {
82        srcvB = src_1;
83    }
84    srcvA = vec_mergeh(vczero, srcvA);
85    srcvB = vec_mergeh(vczero, srcvB);
86
87    for(i=0; i<h; i++) {
88        dst_odd = (unsigned long)dst & 0x0000000F;
89        src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
90
91        dstv = vec_ld(0, dst);
92
93        // we we'll be able to pick-up our 9 char elements
94        // at src + stride from those 32 bytes
95        // then reuse the resulting 2 vectors srvcC and srcvD
96        // as the next srcvA and srcvB
97        src_0 = vec_ld(stride + 0, src);
98        src_1 = vec_ld(stride + 16, src);
99        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
100
101        if (src_really_odd != 0x0000000F) {
102            // if src & 0xF == 0xF, then (src+1) is properly aligned
103            // on the second vector.
104            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
105        } else {
106            srcvD = src_1;
107        }
108
109        srcvC = vec_mergeh(vczero, srcvC);
110        srcvD = vec_mergeh(vczero, srcvD);
111
112
113        // OK, now we (finally) do the math :-)
114        // those four instructions replaces 32 int muls & 32 int adds.
115        // isn't AltiVec nice ?
116        tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
117        tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
118        tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
119        tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
120
121        srcvA = srcvC;
122        srcvB = srcvD;
123
124        tempD = vec_sr(tempD, vcsr8);
125
126        dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
127
128        if (dst_odd) {
129            dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
130        } else {
131            dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
132        }
133
134        vec_st(dstv2, 0, dst);
135
136        dst += stride;
137        src += stride;
138    }
139
140POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
141}
142