1/*
2 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#ifndef AVUTIL_ARM_INTMATH_H
22#define AVUTIL_ARM_INTMATH_H
23
24#include <stdint.h>
25
26#include "config.h"
27#include "libavutil/attributes.h"
28
29#if HAVE_INLINE_ASM
30
31#if HAVE_ARMV6
32
33#define FASTDIV FASTDIV
34static av_always_inline av_const int FASTDIV(int a, int b)
35{
36    int r;
37    __asm__ ("cmp     %2, #2               \n\t"
38             "ldr     %0, [%3, %2, lsl #2] \n\t"
39             "ite     le                   \n\t"
40             "lsrle   %0, %1, #1           \n\t"
41             "smmulgt %0, %0, %1           \n\t"
42             : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc");
43    return r;
44}
45
46#define av_clip_uint8 av_clip_uint8_arm
47static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
48{
49    unsigned x;
50    __asm__ ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
51    return x;
52}
53
54#define av_clip_int8 av_clip_int8_arm
55static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
56{
57    unsigned x;
58    __asm__ ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
59    return x;
60}
61
62#define av_clip_uint16 av_clip_uint16_arm
63static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
64{
65    unsigned x;
66    __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a));
67    return x;
68}
69
70#define av_clip_int16 av_clip_int16_arm
71static av_always_inline av_const int16_t av_clip_int16_arm(int a)
72{
73    int x;
74    __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
75    return x;
76}
77
78#define av_clip_uintp2 av_clip_uintp2_arm
79static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p)
80{
81    unsigned x;
82    __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p));
83    return x;
84}
85
86
87#else /* HAVE_ARMV6 */
88
89#define FASTDIV FASTDIV
90static av_always_inline av_const int FASTDIV(int a, int b)
91{
92    int r, t;
93    __asm__ ("umull %1, %0, %2, %3"
94             : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
95    return r;
96}
97
98#endif /* HAVE_ARMV6 */
99
100#define av_clipl_int32 av_clipl_int32_arm
101static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
102{
103    int x, y;
104    __asm__ ("adds   %1, %R2, %Q2, lsr #31  \n\t"
105             "itet   ne                     \n\t"
106             "mvnne  %1, #1<<31             \n\t"
107             "moveq  %0, %Q2                \n\t"
108             "eorne  %0, %1,  %R2, asr #31  \n\t"
109             : "=r"(x), "=&r"(y) : "r"(a));
110    return x;
111}
112
113#endif /* HAVE_INLINE_ASM */
114
115#endif /* AVUTIL_ARM_INTMATH_H */
116