1/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2
3   This file is part of GCC.
4
5   GCC is free software; you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation; either version 3, or (at your option)
8   any later version.
9
10   GCC is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   Under Section 7 of GPL version 3, you are granted additional
16   permissions described in the GCC Runtime Library Exception, version
17   3.1, as published by the Free Software Foundation.
18
19   You should have received a copy of the GNU General Public License and
20   a copy of the GCC Runtime Library Exception along with this program;
21   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22   <http://www.gnu.org/licenses/>.  */
23
24/* Implemented from the specification included in the Intel C++ Compiler
25   User Guide and Reference, version 9.0.  */
26
27#ifndef _XMMINTRIN_H_INCLUDED
28#define _XMMINTRIN_H_INCLUDED
29
30/* We need type definitions from the MMX header file.  */
31#include <mmintrin.h>
32
33/* Get _mm_malloc () and _mm_free ().  */
34#include <mm_malloc.h>
35
36/* Constants for use with _mm_prefetch.  */
37enum _mm_hint
38{
39  /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
40  _MM_HINT_ET0 = 7,
41  _MM_HINT_ET1 = 6,
42  _MM_HINT_T0 = 3,
43  _MM_HINT_T1 = 2,
44  _MM_HINT_T2 = 1,
45  _MM_HINT_NTA = 0
46};
47
48/* Loads one cache line from address P to a location "closer" to the
49   processor.  The selector I specifies the type of prefetch operation.  */
50#ifdef __OPTIMIZE__
51extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52_mm_prefetch (const void *__P, enum _mm_hint __I)
53{
54  __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
55}
56#else
57#define _mm_prefetch(P, I) \
58  __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
59#endif
60
61#ifndef __SSE__
62#pragma GCC push_options
63#pragma GCC target("sse")
64#define __DISABLE_SSE__
65#endif /* __SSE__ */
66
67/* The Intel API is flexible enough that we must allow aliasing with other
68   vector types, and their scalar components.  */
69typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
70
71/* Internal data types for implementing the intrinsics.  */
72typedef float __v4sf __attribute__ ((__vector_size__ (16)));
73
74/* Create a selector for use with the SHUFPS instruction.  */
75#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
76 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
77
78/* Bits in the MXCSR.  */
79#define _MM_EXCEPT_MASK       0x003f
80#define _MM_EXCEPT_INVALID    0x0001
81#define _MM_EXCEPT_DENORM     0x0002
82#define _MM_EXCEPT_DIV_ZERO   0x0004
83#define _MM_EXCEPT_OVERFLOW   0x0008
84#define _MM_EXCEPT_UNDERFLOW  0x0010
85#define _MM_EXCEPT_INEXACT    0x0020
86
87#define _MM_MASK_MASK         0x1f80
88#define _MM_MASK_INVALID      0x0080
89#define _MM_MASK_DENORM       0x0100
90#define _MM_MASK_DIV_ZERO     0x0200
91#define _MM_MASK_OVERFLOW     0x0400
92#define _MM_MASK_UNDERFLOW    0x0800
93#define _MM_MASK_INEXACT      0x1000
94
95#define _MM_ROUND_MASK        0x6000
96#define _MM_ROUND_NEAREST     0x0000
97#define _MM_ROUND_DOWN        0x2000
98#define _MM_ROUND_UP          0x4000
99#define _MM_ROUND_TOWARD_ZERO 0x6000
100
101#define _MM_FLUSH_ZERO_MASK   0x8000
102#define _MM_FLUSH_ZERO_ON     0x8000
103#define _MM_FLUSH_ZERO_OFF    0x0000
104
105/* Create an undefined vector.  */
106extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
107_mm_undefined_ps (void)
108{
109  __m128 __Y = __Y;
110  return __Y;
111}
112
113/* Create a vector of zeros.  */
114extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
115_mm_setzero_ps (void)
116{
117  return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
118}
119
120/* Perform the respective operation on the lower SPFP (single-precision
121   floating-point) values of A and B; the upper three SPFP values are
122   passed through from A.  */
123
124extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
125_mm_add_ss (__m128 __A, __m128 __B)
126{
127  return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
128}
129
130extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131_mm_sub_ss (__m128 __A, __m128 __B)
132{
133  return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
134}
135
136extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
137_mm_mul_ss (__m128 __A, __m128 __B)
138{
139  return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
140}
141
142extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143_mm_div_ss (__m128 __A, __m128 __B)
144{
145  return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
146}
147
148extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
149_mm_sqrt_ss (__m128 __A)
150{
151  return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
152}
153
154extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
155_mm_rcp_ss (__m128 __A)
156{
157  return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
158}
159
160extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
161_mm_rsqrt_ss (__m128 __A)
162{
163  return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
164}
165
166extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
167_mm_min_ss (__m128 __A, __m128 __B)
168{
169  return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
170}
171
172extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
173_mm_max_ss (__m128 __A, __m128 __B)
174{
175  return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
176}
177
178/* Perform the respective operation on the four SPFP values in A and B.  */
179
180extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
181_mm_add_ps (__m128 __A, __m128 __B)
182{
183  return (__m128) ((__v4sf)__A + (__v4sf)__B);
184}
185
186extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
187_mm_sub_ps (__m128 __A, __m128 __B)
188{
189  return (__m128) ((__v4sf)__A - (__v4sf)__B);
190}
191
192extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
193_mm_mul_ps (__m128 __A, __m128 __B)
194{
195  return (__m128) ((__v4sf)__A * (__v4sf)__B);
196}
197
198extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
199_mm_div_ps (__m128 __A, __m128 __B)
200{
201  return (__m128) ((__v4sf)__A / (__v4sf)__B);
202}
203
204extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
205_mm_sqrt_ps (__m128 __A)
206{
207  return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
208}
209
210extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211_mm_rcp_ps (__m128 __A)
212{
213  return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
214}
215
216extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217_mm_rsqrt_ps (__m128 __A)
218{
219  return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
220}
221
222extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
223_mm_min_ps (__m128 __A, __m128 __B)
224{
225  return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
226}
227
228extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229_mm_max_ps (__m128 __A, __m128 __B)
230{
231  return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
232}
233
234/* Perform logical bit-wise operations on 128-bit values.  */
235
236extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
237_mm_and_ps (__m128 __A, __m128 __B)
238{
239  return __builtin_ia32_andps (__A, __B);
240}
241
242extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
243_mm_andnot_ps (__m128 __A, __m128 __B)
244{
245  return __builtin_ia32_andnps (__A, __B);
246}
247
248extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
249_mm_or_ps (__m128 __A, __m128 __B)
250{
251  return __builtin_ia32_orps (__A, __B);
252}
253
254extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
255_mm_xor_ps (__m128 __A, __m128 __B)
256{
257  return __builtin_ia32_xorps (__A, __B);
258}
259
260/* Perform a comparison on the lower SPFP values of A and B.  If the
261   comparison is true, place a mask of all ones in the result, otherwise a
262   mask of zeros.  The upper three SPFP values are passed through from A.  */
263
264extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
265_mm_cmpeq_ss (__m128 __A, __m128 __B)
266{
267  return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
268}
269
270extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
271_mm_cmplt_ss (__m128 __A, __m128 __B)
272{
273  return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
274}
275
276extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
277_mm_cmple_ss (__m128 __A, __m128 __B)
278{
279  return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
280}
281
282extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
283_mm_cmpgt_ss (__m128 __A, __m128 __B)
284{
285  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
286					(__v4sf)
287					__builtin_ia32_cmpltss ((__v4sf) __B,
288								(__v4sf)
289								__A));
290}
291
292extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
293_mm_cmpge_ss (__m128 __A, __m128 __B)
294{
295  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
296					(__v4sf)
297					__builtin_ia32_cmpless ((__v4sf) __B,
298								(__v4sf)
299								__A));
300}
301
302extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
303_mm_cmpneq_ss (__m128 __A, __m128 __B)
304{
305  return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
306}
307
308extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
309_mm_cmpnlt_ss (__m128 __A, __m128 __B)
310{
311  return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
312}
313
314extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
315_mm_cmpnle_ss (__m128 __A, __m128 __B)
316{
317  return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
318}
319
320extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
321_mm_cmpngt_ss (__m128 __A, __m128 __B)
322{
323  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
324					(__v4sf)
325					__builtin_ia32_cmpnltss ((__v4sf) __B,
326								 (__v4sf)
327								 __A));
328}
329
330extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
331_mm_cmpnge_ss (__m128 __A, __m128 __B)
332{
333  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
334					(__v4sf)
335					__builtin_ia32_cmpnless ((__v4sf) __B,
336								 (__v4sf)
337								 __A));
338}
339
340extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
341_mm_cmpord_ss (__m128 __A, __m128 __B)
342{
343  return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
344}
345
346extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
347_mm_cmpunord_ss (__m128 __A, __m128 __B)
348{
349  return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
350}
351
352/* Perform a comparison on the four SPFP values of A and B.  For each
353   element, if the comparison is true, place a mask of all ones in the
354   result, otherwise a mask of zeros.  */
355
356extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
357_mm_cmpeq_ps (__m128 __A, __m128 __B)
358{
359  return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
360}
361
362extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
363_mm_cmplt_ps (__m128 __A, __m128 __B)
364{
365  return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
366}
367
368extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
369_mm_cmple_ps (__m128 __A, __m128 __B)
370{
371  return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
372}
373
374extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
375_mm_cmpgt_ps (__m128 __A, __m128 __B)
376{
377  return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
378}
379
380extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
381_mm_cmpge_ps (__m128 __A, __m128 __B)
382{
383  return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
384}
385
386extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
387_mm_cmpneq_ps (__m128 __A, __m128 __B)
388{
389  return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
390}
391
392extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
393_mm_cmpnlt_ps (__m128 __A, __m128 __B)
394{
395  return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
396}
397
398extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
399_mm_cmpnle_ps (__m128 __A, __m128 __B)
400{
401  return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
402}
403
404extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
405_mm_cmpngt_ps (__m128 __A, __m128 __B)
406{
407  return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
408}
409
410extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
411_mm_cmpnge_ps (__m128 __A, __m128 __B)
412{
413  return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
414}
415
416extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
417_mm_cmpord_ps (__m128 __A, __m128 __B)
418{
419  return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
420}
421
422extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
423_mm_cmpunord_ps (__m128 __A, __m128 __B)
424{
425  return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
426}
427
428/* Compare the lower SPFP values of A and B and return 1 if true
429   and 0 if false.  */
430
431extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
432_mm_comieq_ss (__m128 __A, __m128 __B)
433{
434  return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
435}
436
437extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
438_mm_comilt_ss (__m128 __A, __m128 __B)
439{
440  return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
441}
442
443extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
444_mm_comile_ss (__m128 __A, __m128 __B)
445{
446  return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
447}
448
449extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
450_mm_comigt_ss (__m128 __A, __m128 __B)
451{
452  return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
453}
454
455extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
456_mm_comige_ss (__m128 __A, __m128 __B)
457{
458  return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
459}
460
461extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
462_mm_comineq_ss (__m128 __A, __m128 __B)
463{
464  return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
465}
466
467extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
468_mm_ucomieq_ss (__m128 __A, __m128 __B)
469{
470  return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
471}
472
473extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
474_mm_ucomilt_ss (__m128 __A, __m128 __B)
475{
476  return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
477}
478
479extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
480_mm_ucomile_ss (__m128 __A, __m128 __B)
481{
482  return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
483}
484
485extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
486_mm_ucomigt_ss (__m128 __A, __m128 __B)
487{
488  return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
489}
490
491extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
492_mm_ucomige_ss (__m128 __A, __m128 __B)
493{
494  return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
495}
496
497extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
498_mm_ucomineq_ss (__m128 __A, __m128 __B)
499{
500  return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
501}
502
503/* Convert the lower SPFP value to a 32-bit integer according to the current
504   rounding mode.  */
505extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
506_mm_cvtss_si32 (__m128 __A)
507{
508  return __builtin_ia32_cvtss2si ((__v4sf) __A);
509}
510
511extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
512_mm_cvt_ss2si (__m128 __A)
513{
514  return _mm_cvtss_si32 (__A);
515}
516
517#ifdef __x86_64__
518/* Convert the lower SPFP value to a 32-bit integer according to the
519   current rounding mode.  */
520
521/* Intel intrinsic.  */
522extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
523_mm_cvtss_si64 (__m128 __A)
524{
525  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
526}
527
528/* Microsoft intrinsic.  */
529extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
530_mm_cvtss_si64x (__m128 __A)
531{
532  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
533}
534#endif
535
536/* Convert the two lower SPFP values to 32-bit integers according to the
537   current rounding mode.  Return the integers in packed form.  */
538extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
539_mm_cvtps_pi32 (__m128 __A)
540{
541  return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
542}
543
544extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
545_mm_cvt_ps2pi (__m128 __A)
546{
547  return _mm_cvtps_pi32 (__A);
548}
549
550/* Truncate the lower SPFP value to a 32-bit integer.  */
551extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
552_mm_cvttss_si32 (__m128 __A)
553{
554  return __builtin_ia32_cvttss2si ((__v4sf) __A);
555}
556
557extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
558_mm_cvtt_ss2si (__m128 __A)
559{
560  return _mm_cvttss_si32 (__A);
561}
562
563#ifdef __x86_64__
564/* Truncate the lower SPFP value to a 32-bit integer.  */
565
566/* Intel intrinsic.  */
567extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
568_mm_cvttss_si64 (__m128 __A)
569{
570  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
571}
572
573/* Microsoft intrinsic.  */
574extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
575_mm_cvttss_si64x (__m128 __A)
576{
577  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
578}
579#endif
580
581/* Truncate the two lower SPFP values to 32-bit integers.  Return the
582   integers in packed form.  */
583extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
584_mm_cvttps_pi32 (__m128 __A)
585{
586  return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
587}
588
589extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
590_mm_cvtt_ps2pi (__m128 __A)
591{
592  return _mm_cvttps_pi32 (__A);
593}
594
595/* Convert B to a SPFP value and insert it as element zero in A.  */
596extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
597_mm_cvtsi32_ss (__m128 __A, int __B)
598{
599  return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
600}
601
602extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
603_mm_cvt_si2ss (__m128 __A, int __B)
604{
605  return _mm_cvtsi32_ss (__A, __B);
606}
607
608#ifdef __x86_64__
609/* Convert B to a SPFP value and insert it as element zero in A.  */
610
611/* Intel intrinsic.  */
612extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
613_mm_cvtsi64_ss (__m128 __A, long long __B)
614{
615  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
616}
617
618/* Microsoft intrinsic.  */
619extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
620_mm_cvtsi64x_ss (__m128 __A, long long __B)
621{
622  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
623}
624#endif
625
626/* Convert the two 32-bit values in B to SPFP form and insert them
627   as the two lower elements in A.  */
628extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
629_mm_cvtpi32_ps (__m128 __A, __m64 __B)
630{
631  return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
632}
633
634extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
635_mm_cvt_pi2ps (__m128 __A, __m64 __B)
636{
637  return _mm_cvtpi32_ps (__A, __B);
638}
639
640/* Convert the four signed 16-bit values in A to SPFP form.  */
641extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
642_mm_cvtpi16_ps (__m64 __A)
643{
644  __v4hi __sign;
645  __v2si __hisi, __losi;
646  __v4sf __zero, __ra, __rb;
647
648  /* This comparison against zero gives us a mask that can be used to
649     fill in the missing sign bits in the unpack operations below, so
650     that we get signed values after unpacking.  */
651  __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
652
653  /* Convert the four words to doublewords.  */
654  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
655  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
656
657  /* Convert the doublewords to floating point two at a time.  */
658  __zero = (__v4sf) _mm_setzero_ps ();
659  __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
660  __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
661
662  return (__m128) __builtin_ia32_movlhps (__ra, __rb);
663}
664
665/* Convert the four unsigned 16-bit values in A to SPFP form.  */
666extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
667_mm_cvtpu16_ps (__m64 __A)
668{
669  __v2si __hisi, __losi;
670  __v4sf __zero, __ra, __rb;
671
672  /* Convert the four words to doublewords.  */
673  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
674  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
675
676  /* Convert the doublewords to floating point two at a time.  */
677  __zero = (__v4sf) _mm_setzero_ps ();
678  __ra = __builtin_ia32_cvtpi2ps (__zero, __losi);
679  __rb = __builtin_ia32_cvtpi2ps (__ra, __hisi);
680
681  return (__m128) __builtin_ia32_movlhps (__ra, __rb);
682}
683
684/* Convert the low four signed 8-bit values in A to SPFP form.  */
685extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
686_mm_cvtpi8_ps (__m64 __A)
687{
688  __v8qi __sign;
689
690  /* This comparison against zero gives us a mask that can be used to
691     fill in the missing sign bits in the unpack operations below, so
692     that we get signed values after unpacking.  */
693  __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
694
695  /* Convert the four low bytes to words.  */
696  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
697
698  return _mm_cvtpi16_ps(__A);
699}
700
701/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
702extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
703_mm_cvtpu8_ps(__m64 __A)
704{
705  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
706  return _mm_cvtpu16_ps(__A);
707}
708
709/* Convert the four signed 32-bit values in A and B to SPFP form.  */
710extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
711_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
712{
713  __v4sf __zero = (__v4sf) _mm_setzero_ps ();
714  __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
715  __v4sf __sfb = __builtin_ia32_cvtpi2ps (__sfa, (__v2si)__B);
716  return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
717}
718
719/* Convert the four SPFP values in A to four signed 16-bit integers.  */
720extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
721_mm_cvtps_pi16(__m128 __A)
722{
723  __v4sf __hisf = (__v4sf)__A;
724  __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
725  __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
726  __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
727  return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
728}
729
730/* Convert the four SPFP values in A to four signed 8-bit integers.  */
731extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
732_mm_cvtps_pi8(__m128 __A)
733{
734  __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
735  return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
736}
737
738/* Selects four specific SPFP values from A and B based on MASK.  */
739#ifdef __OPTIMIZE__
740extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
741_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
742{
743  return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
744}
745#else
746#define _mm_shuffle_ps(A, B, MASK)					\
747  ((__m128) __builtin_ia32_shufps ((__v4sf)(__m128)(A),			\
748				   (__v4sf)(__m128)(B), (int)(MASK)))
749#endif
750
751/* Selects and interleaves the upper two SPFP values from A and B.  */
752extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
753_mm_unpackhi_ps (__m128 __A, __m128 __B)
754{
755  return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
756}
757
758/* Selects and interleaves the lower two SPFP values from A and B.  */
759extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
760_mm_unpacklo_ps (__m128 __A, __m128 __B)
761{
762  return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
763}
764
765/* Sets the upper two SPFP values with 64-bits of data loaded from P;
766   the lower two values are passed through from A.  */
767extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
768_mm_loadh_pi (__m128 __A, __m64 const *__P)
769{
770  return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (const __v2sf *)__P);
771}
772
773/* Stores the upper two SPFP values of A into P.  */
774extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
775_mm_storeh_pi (__m64 *__P, __m128 __A)
776{
777  __builtin_ia32_storehps ((__v2sf *)__P, (__v4sf)__A);
778}
779
780/* Moves the upper two values of B into the lower two values of A.  */
781extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
782_mm_movehl_ps (__m128 __A, __m128 __B)
783{
784  return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
785}
786
787/* Moves the lower two values of B into the upper two values of A.  */
788extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
789_mm_movelh_ps (__m128 __A, __m128 __B)
790{
791  return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
792}
793
794/* Sets the lower two SPFP values with 64-bits of data loaded from P;
795   the upper two values are passed through from A.  */
796extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
797_mm_loadl_pi (__m128 __A, __m64 const *__P)
798{
799  return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (const __v2sf *)__P);
800}
801
802/* Stores the lower two SPFP values of A into P.  */
803extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
804_mm_storel_pi (__m64 *__P, __m128 __A)
805{
806  __builtin_ia32_storelps ((__v2sf *)__P, (__v4sf)__A);
807}
808
809/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
810extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
811_mm_movemask_ps (__m128 __A)
812{
813  return __builtin_ia32_movmskps ((__v4sf)__A);
814}
815
816/* Return the contents of the control register.  */
817extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
818_mm_getcsr (void)
819{
820  return __builtin_ia32_stmxcsr ();
821}
822
823/* Read exception bits from the control register.  */
824extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
825_MM_GET_EXCEPTION_STATE (void)
826{
827  return _mm_getcsr() & _MM_EXCEPT_MASK;
828}
829
830extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
831_MM_GET_EXCEPTION_MASK (void)
832{
833  return _mm_getcsr() & _MM_MASK_MASK;
834}
835
836extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
837_MM_GET_ROUNDING_MODE (void)
838{
839  return _mm_getcsr() & _MM_ROUND_MASK;
840}
841
842extern __inline unsigned int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
843_MM_GET_FLUSH_ZERO_MODE (void)
844{
845  return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
846}
847
848/* Set the control register to I.  */
849extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
850_mm_setcsr (unsigned int __I)
851{
852  __builtin_ia32_ldmxcsr (__I);
853}
854
855/* Set exception bits in the control register.  */
856extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
857_MM_SET_EXCEPTION_STATE(unsigned int __mask)
858{
859  _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
860}
861
862extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
863_MM_SET_EXCEPTION_MASK (unsigned int __mask)
864{
865  _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
866}
867
868extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
869_MM_SET_ROUNDING_MODE (unsigned int __mode)
870{
871  _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
872}
873
874extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
875_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
876{
877  _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
878}
879
880/* Create a vector with element 0 as F and the rest zero.  */
881extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
882_mm_set_ss (float __F)
883{
884  return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
885}
886
887/* Create a vector with all four elements equal to F.  */
888extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
889_mm_set1_ps (float __F)
890{
891  return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
892}
893
894extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
895_mm_set_ps1 (float __F)
896{
897  return _mm_set1_ps (__F);
898}
899
900/* Create a vector with element 0 as *P and the rest zero.  */
901extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
902_mm_load_ss (float const *__P)
903{
904  return _mm_set_ss (*__P);
905}
906
907/* Create a vector with all four elements equal to *P.  */
908extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
909_mm_load1_ps (float const *__P)
910{
911  return _mm_set1_ps (*__P);
912}
913
914extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
915_mm_load_ps1 (float const *__P)
916{
917  return _mm_load1_ps (__P);
918}
919
920/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
921extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
922_mm_load_ps (float const *__P)
923{
924  return (__m128) *(__v4sf *)__P;
925}
926
927/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
928extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
929_mm_loadu_ps (float const *__P)
930{
931  return (__m128) __builtin_ia32_loadups (__P);
932}
933
934/* Load four SPFP values in reverse order.  The address must be aligned.  */
935extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
936_mm_loadr_ps (float const *__P)
937{
938  __v4sf __tmp = *(__v4sf *)__P;
939  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
940}
941
942/* Create the vector [Z Y X W].  */
943extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
944_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
945{
946  return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
947}
948
949/* Create the vector [W X Y Z].  */
950extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
951_mm_setr_ps (float __Z, float __Y, float __X, float __W)
952{
953  return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
954}
955
956/* Stores the lower SPFP value.  */
957extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
958_mm_store_ss (float *__P, __m128 __A)
959{
960  *__P = ((__v4sf)__A)[0];
961}
962
963extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
964_mm_cvtss_f32 (__m128 __A)
965{
966  return ((__v4sf)__A)[0];
967}
968
969/* Store four SPFP values.  The address must be 16-byte aligned.  */
970extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
971_mm_store_ps (float *__P, __m128 __A)
972{
973  *(__v4sf *)__P = (__v4sf)__A;
974}
975
976/* Store four SPFP values.  The address need not be 16-byte aligned.  */
977extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
978_mm_storeu_ps (float *__P, __m128 __A)
979{
980  __builtin_ia32_storeups (__P, (__v4sf)__A);
981}
982
983/* Store the lower SPFP value across four words.  */
984extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
985_mm_store1_ps (float *__P, __m128 __A)
986{
987  __v4sf __va = (__v4sf)__A;
988  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
989  _mm_storeu_ps (__P, __tmp);
990}
991
992extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
993_mm_store_ps1 (float *__P, __m128 __A)
994{
995  _mm_store1_ps (__P, __A);
996}
997
998/* Store four SPFP values in reverse order.  The address must be aligned.  */
999extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1000_mm_storer_ps (float *__P, __m128 __A)
1001{
1002  __v4sf __va = (__v4sf)__A;
1003  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
1004  _mm_store_ps (__P, __tmp);
1005}
1006
1007/* Sets the low SPFP value of A from the low value of B.  */
1008extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1009_mm_move_ss (__m128 __A, __m128 __B)
1010{
1011  return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
1012}
1013
1014/* Extracts one of the four words of A.  The selector N must be immediate.  */
1015#ifdef __OPTIMIZE__
1016extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1017_mm_extract_pi16 (__m64 const __A, int const __N)
1018{
1019  return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1020}
1021
1022extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1023_m_pextrw (__m64 const __A, int const __N)
1024{
1025  return _mm_extract_pi16 (__A, __N);
1026}
1027#else
1028#define _mm_extract_pi16(A, N)	\
1029  ((int) __builtin_ia32_vec_ext_v4hi ((__v4hi)(__m64)(A), (int)(N)))
1030
1031#define _m_pextrw(A, N) _mm_extract_pi16(A, N)
1032#endif
1033
1034/* Inserts word D into one of four words of A.  The selector N must be
1035   immediate.  */
1036#ifdef __OPTIMIZE__
1037extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1038_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1039{
1040  return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1041}
1042
1043extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1044_m_pinsrw (__m64 const __A, int const __D, int const __N)
1045{
1046  return _mm_insert_pi16 (__A, __D, __N);
1047}
1048#else
1049#define _mm_insert_pi16(A, D, N)				\
1050  ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(__m64)(A),	\
1051					(int)(D), (int)(N)))
1052
1053#define _m_pinsrw(A, D, N) _mm_insert_pi16(A, D, N)
1054#endif
1055
1056/* Compute the element-wise maximum of signed 16-bit values.  */
1057extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1058_mm_max_pi16 (__m64 __A, __m64 __B)
1059{
1060  return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1061}
1062
1063extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1064_m_pmaxsw (__m64 __A, __m64 __B)
1065{
1066  return _mm_max_pi16 (__A, __B);
1067}
1068
1069/* Compute the element-wise maximum of unsigned 8-bit values.  */
1070extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1071_mm_max_pu8 (__m64 __A, __m64 __B)
1072{
1073  return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1074}
1075
1076extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1077_m_pmaxub (__m64 __A, __m64 __B)
1078{
1079  return _mm_max_pu8 (__A, __B);
1080}
1081
1082/* Compute the element-wise minimum of signed 16-bit values.  */
1083extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1084_mm_min_pi16 (__m64 __A, __m64 __B)
1085{
1086  return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1087}
1088
1089extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1090_m_pminsw (__m64 __A, __m64 __B)
1091{
1092  return _mm_min_pi16 (__A, __B);
1093}
1094
1095/* Compute the element-wise minimum of unsigned 8-bit values.  */
1096extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1097_mm_min_pu8 (__m64 __A, __m64 __B)
1098{
1099  return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1100}
1101
1102extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1103_m_pminub (__m64 __A, __m64 __B)
1104{
1105  return _mm_min_pu8 (__A, __B);
1106}
1107
1108/* Create an 8-bit mask of the signs of 8-bit values.  */
1109extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1110_mm_movemask_pi8 (__m64 __A)
1111{
1112  return __builtin_ia32_pmovmskb ((__v8qi)__A);
1113}
1114
1115extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1116_m_pmovmskb (__m64 __A)
1117{
1118  return _mm_movemask_pi8 (__A);
1119}
1120
1121/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1122   in B and produce the high 16 bits of the 32-bit results.  */
1123extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1124_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1125{
1126  return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1127}
1128
1129extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1130_m_pmulhuw (__m64 __A, __m64 __B)
1131{
1132  return _mm_mulhi_pu16 (__A, __B);
1133}
1134
1135/* Return a combination of the four 16-bit values in A.  The selector
1136   must be an immediate.  */
1137#ifdef __OPTIMIZE__
1138extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1139_mm_shuffle_pi16 (__m64 __A, int const __N)
1140{
1141  return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1142}
1143
1144extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1145_m_pshufw (__m64 __A, int const __N)
1146{
1147  return _mm_shuffle_pi16 (__A, __N);
1148}
1149#else
1150#define _mm_shuffle_pi16(A, N) \
1151  ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
1152
1153#define _m_pshufw(A, N) _mm_shuffle_pi16 (A, N)
1154#endif
1155
1156/* Conditionally store byte elements of A into P.  The high bit of each
1157   byte in the selector N determines whether the corresponding byte from
1158   A is stored.  */
1159extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1160_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1161{
1162  __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1163}
1164
1165extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1166_m_maskmovq (__m64 __A, __m64 __N, char *__P)
1167{
1168  _mm_maskmove_si64 (__A, __N, __P);
1169}
1170
1171/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
1172extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1173_mm_avg_pu8 (__m64 __A, __m64 __B)
1174{
1175  return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1176}
1177
1178extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1179_m_pavgb (__m64 __A, __m64 __B)
1180{
1181  return _mm_avg_pu8 (__A, __B);
1182}
1183
1184/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
1185extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1186_mm_avg_pu16 (__m64 __A, __m64 __B)
1187{
1188  return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1189}
1190
1191extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1192_m_pavgw (__m64 __A, __m64 __B)
1193{
1194  return _mm_avg_pu16 (__A, __B);
1195}
1196
1197/* Compute the sum of the absolute differences of the unsigned 8-bit
1198   values in A and B.  Return the value in the lower 16-bit word; the
1199   upper words are cleared.  */
1200extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1201_mm_sad_pu8 (__m64 __A, __m64 __B)
1202{
1203  return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1204}
1205
1206extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1207_m_psadbw (__m64 __A, __m64 __B)
1208{
1209  return _mm_sad_pu8 (__A, __B);
1210}
1211
1212/* Stores the data in A to the address P without polluting the caches.  */
1213extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1214_mm_stream_pi (__m64 *__P, __m64 __A)
1215{
1216  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1217}
1218
1219/* Likewise.  The address must be 16-byte aligned.  */
1220extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1221_mm_stream_ps (float *__P, __m128 __A)
1222{
1223  __builtin_ia32_movntps (__P, (__v4sf)__A);
1224}
1225
1226/* Guarantees that every preceding store is globally visible before
1227   any subsequent store.  */
1228extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1229_mm_sfence (void)
1230{
1231  __builtin_ia32_sfence ();
1232}
1233
1234/* Transpose the 4x4 matrix composed of row[0-3].  */
1235#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
1236do {									\
1237  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
1238  __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1);			\
1239  __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3);			\
1240  __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1);			\
1241  __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3);			\
1242  (row0) = __builtin_ia32_movlhps (__t0, __t1);				\
1243  (row1) = __builtin_ia32_movhlps (__t1, __t0);				\
1244  (row2) = __builtin_ia32_movlhps (__t2, __t3);				\
1245  (row3) = __builtin_ia32_movhlps (__t3, __t2);				\
1246} while (0)
1247
1248/* For backward source compatibility.  */
1249# include <emmintrin.h>
1250
1251#ifdef __DISABLE_SSE__
1252#undef __DISABLE_SSE__
1253#pragma GCC pop_options
1254#endif /* __DISABLE_SSE__ */
1255
1256/* The execution of the next instruction is delayed by an implementation
1257   specific amount of time.  The instruction does not modify the
1258   architectural state.  This is after the pop_options pragma because
1259   it does not require SSE support in the processor--the encoding is a
1260   nop on processors that do not support it.  */
1261extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1262_mm_pause (void)
1263{
1264  __builtin_ia32_pause ();
1265}
1266
1267#endif /* _XMMINTRIN_H_INCLUDED */
1268