1/*
2 * simple math operations
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef AVCODEC_ARM_MATHOPS_H
23#define AVCODEC_ARM_MATHOPS_H
24
25#include <stdint.h>
26#include "config.h"
27#include "libavutil/common.h"
28
29#if HAVE_INLINE_ASM
30
31#   define MULL MULL
32static inline av_const int MULL(int a, int b, unsigned shift)
33{
34    int lo, hi;
35    __asm__("smull %0, %1, %2, %3     \n\t"
36            "mov   %0, %0,     lsr %4 \n\t"
37            "add   %1, %0, %1, lsl %5 \n\t"
38            : "=&r"(lo), "=&r"(hi)
39            : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift));
40    return hi;
41}
42
43#define MULH MULH
44#if HAVE_ARMV6
45static inline av_const int MULH(int a, int b)
46{
47    int r;
48    __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
49    return r;
50}
51#else
52static inline av_const int MULH(int a, int b)
53{
54    int lo, hi;
55    __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));
56    return hi;
57}
58#endif
59
60static inline av_const int64_t MUL64(int a, int b)
61{
62    union { uint64_t x; unsigned hl[2]; } x;
63    __asm__ ("smull %0, %1, %2, %3"
64             : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
65    return x.x;
66}
67#define MUL64 MUL64
68
69static inline av_const int64_t MAC64(int64_t d, int a, int b)
70{
71    union { uint64_t x; unsigned hl[2]; } x = { d };
72    __asm__ ("smlal %0, %1, %2, %3"
73             : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
74    return x.x;
75}
76#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
77#define MLS64(d, a, b) MAC64(d, -(a), b)
78
79#if HAVE_ARMV5TE
80
81/* signed 16x16 -> 32 multiply add accumulate */
82#   define MAC16(rt, ra, rb)                                            \
83    __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb));
84
85/* signed 16x16 -> 32 multiply */
86#   define MUL16 MUL16
87static inline av_const int MUL16(int ra, int rb)
88{
89    int rt;
90    __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb));
91    return rt;
92}
93
94#endif
95
96#define mid_pred mid_pred
97static inline av_const int mid_pred(int a, int b, int c)
98{
99    int m;
100    __asm__ volatile (
101        "mov   %0, %2  \n\t"
102        "cmp   %1, %2  \n\t"
103        "movgt %0, %1  \n\t"
104        "movgt %1, %2  \n\t"
105        "cmp   %1, %3  \n\t"
106        "movle %1, %3  \n\t"
107        "cmp   %0, %1  \n\t"
108        "movgt %0, %1  \n\t"
109        : "=&r"(m), "+r"(a)
110        : "r"(b), "r"(c));
111    return m;
112}
113
114#endif /* HAVE_INLINE_ASM */
115
116#endif /* AVCODEC_ARM_MATHOPS_H */
117