1/* 2 * Copyright (C) 2009 David Conrad 3 * 4 * This file is part of Libav. 5 * 6 * Libav is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * Libav is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with Libav; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 21#include "libavcodec/dsputil.h" 22#include "util_altivec.h" 23#include "types_altivec.h" 24#include "dsputil_altivec.h" 25 26static const vec_s16 constants = 27 {0, 64277, 60547, 54491, 46341, 36410, 25080, 12785}; 28static const vec_u8 interleave_high = 29 {0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}; 30 31#define IDCT_START \ 32 vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\ 33 vec_s16 Ed, Gd, Add, Bdd, Fd, Hd;\ 34 vec_s16 eight = vec_splat_s16(8);\ 35 vec_u16 four = vec_splat_u16(4);\ 36\ 37 vec_s16 C1 = vec_splat(constants, 1);\ 38 vec_s16 C2 = vec_splat(constants, 2);\ 39 vec_s16 C3 = vec_splat(constants, 3);\ 40 vec_s16 C4 = vec_splat(constants, 4);\ 41 vec_s16 C5 = vec_splat(constants, 5);\ 42 vec_s16 C6 = vec_splat(constants, 6);\ 43 vec_s16 C7 = vec_splat(constants, 7);\ 44\ 45 vec_s16 b0 = vec_ld(0x00, block);\ 46 vec_s16 b1 = vec_ld(0x10, block);\ 47 vec_s16 b2 = vec_ld(0x20, block);\ 48 vec_s16 b3 = vec_ld(0x30, block);\ 49 vec_s16 b4 = vec_ld(0x40, block);\ 50 vec_s16 b5 = vec_ld(0x50, block);\ 51 vec_s16 b6 = vec_ld(0x60, block);\ 52 vec_s16 b7 = vec_ld(0x70, block); 53 54// these functions do (a*C)>>16 55// things are tricky because a is signed, but C unsigned. 56// M15 is used if C fits in 15 bit unsigned (C6,C7) 57// M16 is used if C requires 16 bits unsigned 58static inline vec_s16 M15(vec_s16 a, vec_s16 C) 59{ 60 return (vec_s16)vec_perm(vec_mule(a,C), vec_mulo(a,C), interleave_high); 61} 62static inline vec_s16 M16(vec_s16 a, vec_s16 C) 63{ 64 return vec_add(a, M15(a, C)); 65} 66 67#define IDCT_1D(ADD, SHIFT)\ 68 A = vec_add(M16(b1, C1), M15(b7, C7));\ 69 B = vec_sub(M15(b1, C7), M16(b7, C1));\ 70 C = vec_add(M16(b3, C3), M16(b5, C5));\ 71 D = vec_sub(M16(b5, C3), M16(b3, C5));\ 72\ 73 Ad = M16(vec_sub(A, C), C4);\ 74 Bd = M16(vec_sub(B, D), C4);\ 75\ 76 Cd = vec_add(A, C);\ 77 Dd = vec_add(B, D);\ 78\ 79 E = ADD(M16(vec_add(b0, b4), C4));\ 80 F = ADD(M16(vec_sub(b0, b4), C4));\ 81\ 82 G = vec_add(M16(b2, C2), M15(b6, C6));\ 83 H = vec_sub(M15(b2, C6), M16(b6, C2));\ 84\ 85 Ed = vec_sub(E, G);\ 86 Gd = vec_add(E, G);\ 87\ 88 Add = vec_add(F, Ad);\ 89 Bdd = vec_sub(Bd, H);\ 90\ 91 Fd = vec_sub(F, Ad);\ 92 Hd = vec_add(Bd, H);\ 93\ 94 b0 = SHIFT(vec_add(Gd, Cd));\ 95 b7 = SHIFT(vec_sub(Gd, Cd));\ 96\ 97 b1 = SHIFT(vec_add(Add, Hd));\ 98 b2 = SHIFT(vec_sub(Add, Hd));\ 99\ 100 b3 = SHIFT(vec_add(Ed, Dd));\ 101 b4 = SHIFT(vec_sub(Ed, Dd));\ 102\ 103 b5 = SHIFT(vec_add(Fd, Bdd));\ 104 b6 = SHIFT(vec_sub(Fd, Bdd)); 105 106#define NOP(a) a 107#define ADD8(a) vec_add(a, eight) 108#define SHIFT4(a) vec_sra(a, four) 109 110void ff_vp3_idct_altivec(DCTELEM block[64]) 111{ 112 IDCT_START 113 114 IDCT_1D(NOP, NOP) 115 TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); 116 IDCT_1D(ADD8, SHIFT4) 117 118 vec_st(b0, 0x00, block); 119 vec_st(b1, 0x10, block); 120 vec_st(b2, 0x20, block); 121 vec_st(b3, 0x30, block); 122 vec_st(b4, 0x40, block); 123 vec_st(b5, 0x50, block); 124 vec_st(b6, 0x60, block); 125 vec_st(b7, 0x70, block); 126} 127 128void ff_vp3_idct_put_altivec(uint8_t *dst, int stride, DCTELEM block[64]) 129{ 130 vec_u8 t; 131 IDCT_START 132 133 // pixels are signed; so add 128*16 in addition to the normal 8 134 vec_s16 v2048 = vec_sl(vec_splat_s16(1), vec_splat_u16(11)); 135 eight = vec_add(eight, v2048); 136 137 IDCT_1D(NOP, NOP) 138 TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); 139 IDCT_1D(ADD8, SHIFT4) 140 141#define PUT(a)\ 142 t = vec_packsu(a, a);\ 143 vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ 144 vec_ste((vec_u32)t, 4, (unsigned int *)dst); 145 146 PUT(b0) dst += stride; 147 PUT(b1) dst += stride; 148 PUT(b2) dst += stride; 149 PUT(b3) dst += stride; 150 PUT(b4) dst += stride; 151 PUT(b5) dst += stride; 152 PUT(b6) dst += stride; 153 PUT(b7) 154} 155 156void ff_vp3_idct_add_altivec(uint8_t *dst, int stride, DCTELEM block[64]) 157{ 158 LOAD_ZERO; 159 vec_u8 t, vdst; 160 vec_s16 vdst_16; 161 vec_u8 vdst_mask = vec_mergeh(vec_splat_u8(-1), vec_lvsl(0, dst)); 162 163 IDCT_START 164 165 IDCT_1D(NOP, NOP) 166 TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7); 167 IDCT_1D(ADD8, SHIFT4) 168 169#define ADD(a)\ 170 vdst = vec_ld(0, dst);\ 171 vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\ 172 vdst_16 = vec_adds(a, vdst_16);\ 173 t = vec_packsu(vdst_16, vdst_16);\ 174 vec_ste((vec_u32)t, 0, (unsigned int *)dst);\ 175 vec_ste((vec_u32)t, 4, (unsigned int *)dst); 176 177 ADD(b0) dst += stride; 178 ADD(b1) dst += stride; 179 ADD(b2) dst += stride; 180 ADD(b3) dst += stride; 181 ADD(b4) dst += stride; 182 ADD(b5) dst += stride; 183 ADD(b6) dst += stride; 184 ADD(b7) 185} 186