1/*
2 * simple math operations
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef AVCODEC_ARM_MATHOPS_H
23#define AVCODEC_ARM_MATHOPS_H
24
25#include <stdint.h>
26#include "libavutil/common.h"
27
28#   define MULL MULL
29static inline av_const int MULL(int a, int b, unsigned shift)
30{
31    int lo, hi;
32    __asm__("smull %0, %1, %2, %3     \n\t"
33            "mov   %0, %0,     lsr %4 \n\t"
34            "add   %1, %0, %1, lsl %5 \n\t"
35            : "=&r"(lo), "=&r"(hi)
36            : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift));
37    return hi;
38}
39
40#define MULH MULH
41#if HAVE_ARMV6
42static inline av_const int MULH(int a, int b)
43{
44    int r;
45    __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
46    return r;
47}
48#else
49static inline av_const int MULH(int a, int b)
50{
51    int lo, hi;
52    __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
53    return hi;
54}
55#endif
56
57static inline av_const int64_t MUL64(int a, int b)
58{
59    union { uint64_t x; unsigned hl[2]; } x;
60    __asm__ ("smull %0, %1, %2, %3"
61             : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
62    return x.x;
63}
64#define MUL64 MUL64
65
66static inline av_const int64_t MAC64(int64_t d, int a, int b)
67{
68    union { uint64_t x; unsigned hl[2]; } x = { d };
69    __asm__ ("smlal %0, %1, %2, %3"
70             : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
71    return x.x;
72}
73#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
74#define MLS64(d, a, b) MAC64(d, -(a), b)
75
76#if HAVE_ARMV5TE
77
78/* signed 16x16 -> 32 multiply add accumulate */
79#   define MAC16(rt, ra, rb)                                            \
80    __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
81
82/* signed 16x16 -> 32 multiply */
83#   define MUL16 MUL16
84static inline av_const int MUL16(int ra, int rb)
85{
86    int rt;
87    __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
88    return rt;
89}
90
91#endif
92
93#define mid_pred mid_pred
94static inline av_const int mid_pred(int a, int b, int c)
95{
96    int m;
97    __asm__ volatile (
98        "mov   %0, %2  \n\t"
99        "cmp   %1, %2  \n\t"
100        "movgt %0, %1  \n\t"
101        "movgt %1, %2  \n\t"
102        "cmp   %1, %3  \n\t"
103        "movle %1, %3  \n\t"
104        "cmp   %0, %1  \n\t"
105        "movgt %0, %1  \n\t"
106        : "=&r"(m), "+r"(a)
107        : "r"(b), "r"(c));
108    return m;
109}
110
111#endif /* AVCODEC_ARM_MATHOPS_H */
112