smmintrin.h revision 204962
1/*===---- smmintrin.h - SSE intrinsics -------------------------------------=== 2* 3* Permission is hereby granted, free of charge, to any person obtaining a copy 4* of this software and associated documentation files (the "Software"), to deal 5* in the Software without restriction, including without limitation the rights 6* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7* copies of the Software, and to permit persons to whom the Software is 8* furnished to do so, subject to the following conditions: 9* 10* The above copyright notice and this permission notice shall be included in 11* all copies or substantial portions of the Software. 12* 13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19* THE SOFTWARE. 20* 21*===-----------------------------------------------------------------------=== 22*/ 23 24#ifndef _SMMINTRIN_H 25#define _SMMINTRIN_H 26 27#ifndef __SSE4_1__ 28#error "SSE4.1 instruction set not enabled" 29#else 30 31#include <tmmintrin.h> 32 33/* Type defines. */ 34typedef double __v2df __attribute__ ((__vector_size__ (16))); 35typedef long long __v2di __attribute__ ((__vector_size__ (16))); 36 37/* SSE4 Rounding macros. */ 38#define _MM_FROUND_TO_NEAREST_INT 0x00 39#define _MM_FROUND_TO_NEG_INF 0x01 40#define _MM_FROUND_TO_POS_INF 0x02 41#define _MM_FROUND_TO_ZERO 0x03 42#define _MM_FROUND_CUR_DIRECTION 0x04 43 44#define _MM_FROUND_RAISE_EXC 0x00 45#define _MM_FROUND_NO_EXC 0x08 46 47#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) 48#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) 49#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) 50#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) 51#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) 52#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) 53 54#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) 55#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) 56#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) 57#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) 58 59#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) 60#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) 61#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) 62#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) 63 64#define _mm_round_ps(X, Y) __builtin_ia32_roundps((X), (Y)) 65#define _mm_round_ss(X, Y, M) __builtin_ia32_roundss((X), (Y), (M)) 66#define _mm_round_pd(X, M) __builtin_ia32_roundpd((X), (M)) 67#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M)) 68 69/* SSE4 Packed Blending Intrinsics. */ 70static inline __m128d __attribute__((__always_inline__, __nodebug__)) 71_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M) 72{ 73 return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M); 74} 75 76static inline __m128 __attribute__((__always_inline__, __nodebug__)) 77_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M) 78{ 79 return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M); 80} 81 82static inline __m128d __attribute__((__always_inline__, __nodebug__)) 83_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M) 84{ 85 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2, 86 (__v2df)__M); 87} 88 89static inline __m128 __attribute__((__always_inline__, __nodebug__)) 90_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M) 91{ 92 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, 93 (__v4sf)__M); 94} 95 96static inline __m128i __attribute__((__always_inline__, __nodebug__)) 97_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) 98{ 99 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2, 100 (__v16qi)__M); 101} 102 103static inline __m128i __attribute__((__always_inline__, __nodebug__)) 104_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M) 105{ 106 return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M); 107} 108 109/* SSE4 Dword Multiply Instructions. */ 110static inline __m128i __attribute__((__always_inline__, __nodebug__)) 111_mm_mullo_epi32 (__m128i __V1, __m128i __V2) 112{ 113 return (__m128i) __builtin_ia32_pmulld128((__v4si)__V1, (__v4si)__V2); 114} 115 116static inline __m128i __attribute__((__always_inline__, __nodebug__)) 117_mm_mul_epi32 (__m128i __V1, __m128i __V2) 118{ 119 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2); 120} 121 122/* SSE4 Floating Point Dot Product Instructions. */ 123#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M)) 124#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M)) 125 126/* SSE4 Streaming Load Hint Instruction. */ 127static inline __m128i __attribute__((__always_inline__, __nodebug__)) 128_mm_stream_load_si128 (__m128i *__V) 129{ 130 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V); 131} 132 133/* SSE4 Packed Integer Min/Max Instructions. */ 134static inline __m128i __attribute__((__always_inline__, __nodebug__)) 135_mm_min_epi8 (__m128i __V1, __m128i __V2) 136{ 137 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2); 138} 139 140static inline __m128i __attribute__((__always_inline__, __nodebug__)) 141_mm_max_epi8 (__m128i __V1, __m128i __V2) 142{ 143 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2); 144} 145 146static inline __m128i __attribute__((__always_inline__, __nodebug__)) 147_mm_min_epu16 (__m128i __V1, __m128i __V2) 148{ 149 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2); 150} 151 152static inline __m128i __attribute__((__always_inline__, __nodebug__)) 153_mm_max_epu16 (__m128i __V1, __m128i __V2) 154{ 155 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2); 156} 157 158static inline __m128i __attribute__((__always_inline__, __nodebug__)) 159_mm_min_epi32 (__m128i __V1, __m128i __V2) 160{ 161 return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2); 162} 163 164static inline __m128i __attribute__((__always_inline__, __nodebug__)) 165_mm_max_epi32 (__m128i __V1, __m128i __V2) 166{ 167 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2); 168} 169 170static inline __m128i __attribute__((__always_inline__, __nodebug__)) 171_mm_min_epu32 (__m128i __V1, __m128i __V2) 172{ 173 return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2); 174} 175 176static inline __m128i __attribute__((__always_inline__, __nodebug__)) 177_mm_max_epu32 (__m128i __V1, __m128i __V2) 178{ 179 return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2); 180} 181 182/* SSE4 Insertion and Extraction from XMM Register Instructions. */ 183#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) 184#define _mm_extract_ps(X, N) (__extension__ \ 185 ({ union { int i; float f; } __t; \ 186 __v4sf __a = (__v4sf)X; \ 187 __t.f = __a[N]; \ 188 __t.i;})) 189 190/* Miscellaneous insert and extract macros. */ 191/* Extract a single-precision float from X at index N into D. */ 192#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)X; \ 193 (D) = __a[N]; })) 194 195/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create 196 an index suitable for _mm_insert_ps. */ 197#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) 198 199/* Extract a float from X at index N into the first index of the return. */ 200#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ 201 _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) 202 203#endif /* __SSE4_1__ */ 204 205#endif /* _SMMINTRIN_H */ 206