1/*
2 * simple math operations
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef AVCODEC_X86_MATHOPS_H
23#define AVCODEC_X86_MATHOPS_H
24
25#include "config.h"
26#include "libavutil/common.h"
27
28#if ARCH_X86_32
29#define MULL(ra, rb, shift) \
30        ({ int rt, dummy; __asm__ (\
31            "imull %3               \n\t"\
32            "shrdl %4, %%edx, %%eax \n\t"\
33            : "=a"(rt), "=d"(dummy)\
34            : "a" ((int)(ra)), "rm" ((int)(rb)), "i"(shift));\
35         rt; })
36
37#define MULH(ra, rb) \
38    ({ int rt, dummy;\
39     __asm__ ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" ((int)(ra)), "rm" ((int)(rb)));\
40     rt; })
41
42#define MUL64(ra, rb) \
43    ({ int64_t rt;\
44     __asm__ ("imull %2\n\t" : "=A"(rt) : "a" ((int)(ra)), "g" ((int)(rb)));\
45     rt; })
46#endif
47
48#if HAVE_CMOV
49/* median of 3 */
50#define mid_pred mid_pred
51static inline av_const int mid_pred(int a, int b, int c)
52{
53    int i=b;
54    __asm__ volatile(
55        "cmp    %2, %1 \n\t"
56        "cmovg  %1, %0 \n\t"
57        "cmovg  %2, %1 \n\t"
58        "cmp    %3, %1 \n\t"
59        "cmovl  %3, %1 \n\t"
60        "cmp    %1, %0 \n\t"
61        "cmovg  %1, %0 \n\t"
62        :"+&r"(i), "+&r"(a)
63        :"r"(b), "r"(c)
64    );
65    return i;
66}
67#endif
68
69#if HAVE_CMOV
70#define COPY3_IF_LT(x, y, a, b, c, d)\
71__asm__ volatile(\
72    "cmpl  %0, %3       \n\t"\
73    "cmovl %3, %0       \n\t"\
74    "cmovl %4, %1       \n\t"\
75    "cmovl %5, %2       \n\t"\
76    : "+&r" (x), "+&r" (a), "+r" (c)\
77    : "r" (y), "r" (b), "r" (d)\
78);
79#endif
80
81// avoid +32 for shift optimization (gcc should do that ...)
82#define NEG_SSR32 NEG_SSR32
83static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
84    __asm__ ("sarl %1, %0\n\t"
85         : "+r" (a)
86         : "ic" ((uint8_t)(-s))
87    );
88    return a;
89}
90
91#define NEG_USR32 NEG_USR32
92static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
93    __asm__ ("shrl %1, %0\n\t"
94         : "+r" (a)
95         : "ic" ((uint8_t)(-s))
96    );
97    return a;
98}
99
100#endif /* AVCODEC_X86_MATHOPS_H */
101