avx512dqintrin.h revision 283627
1/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 * THE SOFTWARE.
20 *
21 *===-----------------------------------------------------------------------===
22 */
23
24#ifndef __IMMINTRIN_H
25#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
26#endif
27
28#ifndef __AVX512DQINTRIN_H
29#define __AVX512DQINTRIN_H
30
31static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__))
32_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
33  return (__m512i) ((__v8di) __A * (__v8di) __B);
34}
35
36static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__))
37_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
38  return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
39              (__v8di) __B,
40              (__v8di) __W,
41              (__mmask8) __U);
42}
43
44static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__))
45_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
46  return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
47              (__v8di) __B,
48              (__v8di)
49              _mm512_setzero_si512 (),
50              (__mmask8) __U);
51}
52
53static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
54_mm512_xor_pd (__m512d __A, __m512d __B) {
55  return (__m512d) ((__v8di) __A ^ (__v8di) __B);
56}
57
58static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
59_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
60  return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
61             (__v8df) __B,
62             (__v8df) __W,
63             (__mmask8) __U);
64}
65
66static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
67_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
68  return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
69             (__v8df) __B,
70             (__v8df)
71             _mm512_setzero_pd (),
72             (__mmask8) __U);
73}
74
75static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
76_mm512_xor_ps (__m512 __A, __m512 __B) {
77  return (__m512) ((__v16si) __A ^ (__v16si) __B);
78}
79
80static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
81_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
82  return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
83            (__v16sf) __B,
84            (__v16sf) __W,
85            (__mmask16) __U);
86}
87
88static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
89_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
90  return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
91            (__v16sf) __B,
92            (__v16sf)
93            _mm512_setzero_ps (),
94            (__mmask16) __U);
95}
96
97static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
98_mm512_or_pd (__m512d __A, __m512d __B) {
99  return (__m512d) ((__v8di) __A | (__v8di) __B);
100}
101
102static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
103_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
104  return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
105            (__v8df) __B,
106            (__v8df) __W,
107            (__mmask8) __U);
108}
109
110static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
111_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
112  return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
113            (__v8df) __B,
114            (__v8df)
115            _mm512_setzero_pd (),
116            (__mmask8) __U);
117}
118
119static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
120_mm512_or_ps (__m512 __A, __m512 __B) {
121  return (__m512) ((__v16si) __A | (__v16si) __B);
122}
123
124static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
125_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
126  return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
127                 (__v16sf) __B,
128                 (__v16sf) __W,
129                 (__mmask16) __U);
130}
131
132static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
133_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
134  return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
135                 (__v16sf) __B,
136                 (__v16sf)
137                 _mm512_setzero_ps (),
138                 (__mmask16) __U);
139}
140
141static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
142_mm512_and_pd (__m512d __A, __m512d __B) {
143  return (__m512d) ((__v8di) __A & (__v8di) __B);
144}
145
146static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
147_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
148  return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
149             (__v8df) __B,
150             (__v8df) __W,
151             (__mmask8) __U);
152}
153
154static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
155_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
156  return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
157             (__v8df) __B,
158             (__v8df)
159             _mm512_setzero_pd (),
160             (__mmask8) __U);
161}
162
163static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
164_mm512_and_ps (__m512 __A, __m512 __B) {
165  return (__m512) ((__v16si) __A & (__v16si) __B);
166}
167
168static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
169_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
170  return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
171            (__v16sf) __B,
172            (__v16sf) __W,
173            (__mmask16) __U);
174}
175
176static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
177_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
178  return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
179            (__v16sf) __B,
180            (__v16sf)
181            _mm512_setzero_ps (),
182            (__mmask16) __U);
183}
184
185static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
186_mm512_andnot_pd (__m512d __A, __m512d __B) {
187  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
188              (__v8df) __B,
189              (__v8df)
190              _mm512_setzero_pd (),
191              (__mmask8) -1);
192}
193
194static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
195_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
196  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
197              (__v8df) __B,
198              (__v8df) __W,
199              (__mmask8) __U);
200}
201
202static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__))
203_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
204  return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
205              (__v8df) __B,
206              (__v8df)
207              _mm512_setzero_pd (),
208              (__mmask8) __U);
209}
210
211static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
212_mm512_andnot_ps (__m512 __A, __m512 __B) {
213  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
214             (__v16sf) __B,
215             (__v16sf)
216             _mm512_setzero_ps (),
217             (__mmask16) -1);
218}
219
220static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
221_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
222  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
223             (__v16sf) __B,
224             (__v16sf) __W,
225             (__mmask16) __U);
226}
227
228static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__))
229_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
230  return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
231             (__v16sf) __B,
232             (__v16sf)
233             _mm512_setzero_ps (),
234             (__mmask16) __U);
235}
236
237#endif
238