smmintrin.h revision 204793
1/*===---- smmintrin.h - SSE intrinsics -------------------------------------===
2*
3* Permission is hereby granted, free of charge, to any person obtaining a copy
4* of this software and associated documentation files (the "Software"), to deal
5* in the Software without restriction, including without limitation the rights
6* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7* copies of the Software, and to permit persons to whom the Software is
8* furnished to do so, subject to the following conditions:
9*
10* The above copyright notice and this permission notice shall be included in
11* all copies or substantial portions of the Software.
12*
13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19* THE SOFTWARE.
20*
21*===-----------------------------------------------------------------------===
22*/
23
24#ifndef _SMMINTRIN_H
25#define _SMMINTRIN_H
26
27#ifndef __SSE4_1__
28#error "SSE4.1 instruction set not enabled"
29#else
30
31#include <tmmintrin.h>
32
33/* Type defines.  */
34typedef double __v2df __attribute__ ((__vector_size__ (16)));
35
36/* SSE4 Rounding macros. */
37#define _MM_FROUND_TO_NEAREST_INT    0x00
38#define _MM_FROUND_TO_NEG_INF        0x01
39#define _MM_FROUND_TO_POS_INF        0x02
40#define _MM_FROUND_TO_ZERO           0x03
41#define _MM_FROUND_CUR_DIRECTION     0x04
42
43#define _MM_FROUND_RAISE_EXC         0x00
44#define _MM_FROUND_NO_EXC            0x08
45
46#define _MM_FROUND_NINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
47#define _MM_FROUND_FLOOR     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
48#define _MM_FROUND_CEIL      (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
49#define _MM_FROUND_TRUNC     (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
50#define _MM_FROUND_RINT      (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
51#define _MM_FROUND_NEARBYINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
52
53#define _mm_ceil_ps(X)       _mm_round_ps((X), _MM_FROUND_CEIL)
54#define _mm_ceil_pd(X)       _mm_round_pd((X), _MM_FROUND_CEIL)
55#define _mm_ceil_ss(X, Y)    _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
56#define _mm_ceil_sd(X, Y)    _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
57
58#define _mm_floor_ps(X)      _mm_round_ps((X), _MM_FROUND_FLOOR)
59#define _mm_floor_pd(X)      _mm_round_pd((X), _MM_FROUND_FLOOR)
60#define _mm_floor_ss(X, Y)   _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
61#define _mm_floor_sd(X, Y)   _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
62
63/* SSE4 Rounding Intrinsics.  */
64static inline __m128 __attribute__((__always_inline__, __nodebug__))
65_mm_round_ps (__m128 __V, const int __M)
66{
67  return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
68}
69
70static inline __m128 __attribute__((__always_inline__, __nodebug__))
71_mm_round_ss (__m128 __V1, __m128 __V2, const int __M)
72{
73  return (__m128) __builtin_ia32_roundss ((__v4sf)__V1, (__v4sf)__V2, __M);
74}
75
76static inline __m128d __attribute__((__always_inline__, __nodebug__))
77_mm_round_pd (__m128d __V, const int __M)
78{
79  return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
80}
81
82static inline __m128d __attribute__((__always_inline__, __nodebug__))
83_mm_round_sd(__m128d __V1, __m128d __V2, const int __M)
84{
85  return (__m128d) __builtin_ia32_roundsd ((__v2df)__V1, (__v2df)__V2, __M);
86}
87
88/* SSE4 Packed Blending Intrinsics.  */
89static inline __m128d __attribute__((__always_inline__, __nodebug__))
90_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
91{
92  return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
93}
94
95static inline __m128 __attribute__((__always_inline__, __nodebug__))
96_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
97{
98  return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
99}
100
101static inline __m128d __attribute__((__always_inline__, __nodebug__))
102_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
103{
104  return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
105                                            (__v2df)__M);
106}
107
108static inline __m128 __attribute__((__always_inline__, __nodebug__))
109_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
110{
111  return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
112                                           (__v4sf)__M);
113}
114
115static inline __m128i __attribute__((__always_inline__, __nodebug__))
116_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
117{
118  return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
119                                               (__v16qi)__M);
120}
121
122static inline  __m128i __attribute__((__always_inline__, __nodebug__))
123_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
124{
125  return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
126}
127
128#endif /* __SSE4_1__ */
129
130#endif /* _SMMINTRIN_H */
131