xmmintrin.h revision 117395
1/* Copyright (C) 2002 Free Software Foundation, Inc.
2
3   This file is part of GNU CC.
4
5   GNU CC is free software; you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation; either version 2, or (at your option)
8   any later version.
9
10   GNU CC is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   You should have received a copy of the GNU General Public License
16   along with GNU CC; see the file COPYING.  If not, write to
17   the Free Software Foundation, 59 Temple Place - Suite 330,
18   Boston, MA 02111-1307, USA.  */
19
20/* As a special exception, if you include this header file into source
21   files compiled by GCC, this header file does not by itself cause
22   the resulting executable to be covered by the GNU General Public
23   License.  This exception does not however invalidate any other
24   reasons why the executable file might be covered by the GNU General
25   Public License.  */
26
27/* Implemented from the specification included in the Intel C++ Compiler
28   User Guide and Reference, version 5.0.  */
29
30#ifndef _XMMINTRIN_H_INCLUDED
31#define _XMMINTRIN_H_INCLUDED
32
33#ifndef __SSE__
34# error "SSE instruction set not enabled"
35#else
36
37/* We need type definitions from the MMX header file.  */
38#include <mmintrin.h>
39
40/* The data type indended for user use.  */
41typedef int __m128 __attribute__ ((__mode__(__V4SF__)));
42
43/* Internal data types for implementing the instrinsics.  */
44typedef int __v4sf __attribute__ ((__mode__(__V4SF__)));
45typedef int __v4si __attribute__ ((__mode__(__V4SI__)));
46
47/* Create a selector for use with the SHUFPS instruction.  */
48#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
49 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
50
51/* Constants for use with _mm_prefetch.  */
52enum _mm_hint
53{
54  _MM_HINT_T0 = 3,
55  _MM_HINT_T1 = 2,
56  _MM_HINT_T2 = 1,
57  _MM_HINT_NTA = 0
58};
59
60/* Bits in the MXCSR.  */
61#define _MM_EXCEPT_MASK       0x003f
62#define _MM_EXCEPT_INVALID    0x0001
63#define _MM_EXCEPT_DENORM     0x0002
64#define _MM_EXCEPT_DIV_ZERO   0x0004
65#define _MM_EXCEPT_OVERFLOW   0x0008
66#define _MM_EXCEPT_UNDERFLOW  0x0010
67#define _MM_EXCEPT_INEXACT    0x0020
68
69#define _MM_MASK_MASK         0x1f80
70#define _MM_MASK_INVALID      0x0080
71#define _MM_MASK_DENORM       0x0100
72#define _MM_MASK_DIV_ZERO     0x0200
73#define _MM_MASK_OVERFLOW     0x0400
74#define _MM_MASK_UNDERFLOW    0x0800
75#define _MM_MASK_INEXACT      0x1000
76
77#define _MM_ROUND_MASK        0x6000
78#define _MM_ROUND_NEAREST     0x0000
79#define _MM_ROUND_DOWN        0x2000
80#define _MM_ROUND_UP          0x4000
81#define _MM_ROUND_TOWARD_ZERO 0x6000
82
83#define _MM_FLUSH_ZERO_MASK   0x8000
84#define _MM_FLUSH_ZERO_ON     0x8000
85#define _MM_FLUSH_ZERO_OFF    0x0000
86
87/* Perform the respective operation on the lower SPFP (single-precision
88   floating-point) values of A and B; the upper three SPFP values are
89   passed through from A.  */
90
91static __inline __m128
92_mm_add_ss (__m128 __A, __m128 __B)
93{
94  return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
95}
96
97static __inline __m128
98_mm_sub_ss (__m128 __A, __m128 __B)
99{
100  return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
101}
102
103static __inline __m128
104_mm_mul_ss (__m128 __A, __m128 __B)
105{
106  return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
107}
108
109static __inline __m128
110_mm_div_ss (__m128 __A, __m128 __B)
111{
112  return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
113}
114
115static __inline __m128
116_mm_sqrt_ss (__m128 __A)
117{
118  return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
119}
120
121static __inline __m128
122_mm_rcp_ss (__m128 __A)
123{
124  return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
125}
126
127static __inline __m128
128_mm_rsqrt_ss (__m128 __A)
129{
130  return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
131}
132
133static __inline __m128
134_mm_min_ss (__m128 __A, __m128 __B)
135{
136  return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
137}
138
139static __inline __m128
140_mm_max_ss (__m128 __A, __m128 __B)
141{
142  return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
143}
144
145/* Perform the respective operation on the four SPFP values in A and B.  */
146
147static __inline __m128
148_mm_add_ps (__m128 __A, __m128 __B)
149{
150  return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
151}
152
153static __inline __m128
154_mm_sub_ps (__m128 __A, __m128 __B)
155{
156  return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
157}
158
159static __inline __m128
160_mm_mul_ps (__m128 __A, __m128 __B)
161{
162  return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
163}
164
165static __inline __m128
166_mm_div_ps (__m128 __A, __m128 __B)
167{
168  return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
169}
170
171static __inline __m128
172_mm_sqrt_ps (__m128 __A)
173{
174  return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
175}
176
177static __inline __m128
178_mm_rcp_ps (__m128 __A)
179{
180  return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
181}
182
183static __inline __m128
184_mm_rsqrt_ps (__m128 __A)
185{
186  return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
187}
188
189static __inline __m128
190_mm_min_ps (__m128 __A, __m128 __B)
191{
192  return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
193}
194
195static __inline __m128
196_mm_max_ps (__m128 __A, __m128 __B)
197{
198  return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
199}
200
201/* Perform logical bit-wise operations on 128-bit values.  */
202
203static __inline __m128
204_mm_and_ps (__m128 __A, __m128 __B)
205{
206  return __builtin_ia32_andps (__A, __B);
207}
208
209static __inline __m128
210_mm_andnot_ps (__m128 __A, __m128 __B)
211{
212  return __builtin_ia32_andnps (__A, __B);
213}
214
215static __inline __m128
216_mm_or_ps (__m128 __A, __m128 __B)
217{
218  return __builtin_ia32_orps (__A, __B);
219}
220
221static __inline __m128
222_mm_xor_ps (__m128 __A, __m128 __B)
223{
224  return __builtin_ia32_xorps (__A, __B);
225}
226
227/* Perform a comparison on the lower SPFP values of A and B.  If the
228   comparison is true, place a mask of all ones in the result, otherwise a
229   mask of zeros.  The upper three SPFP values are passed through from A.  */
230
231static __inline __m128
232_mm_cmpeq_ss (__m128 __A, __m128 __B)
233{
234  return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
235}
236
237static __inline __m128
238_mm_cmplt_ss (__m128 __A, __m128 __B)
239{
240  return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
241}
242
243static __inline __m128
244_mm_cmple_ss (__m128 __A, __m128 __B)
245{
246  return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
247}
248
249static __inline __m128
250_mm_cmpgt_ss (__m128 __A, __m128 __B)
251{
252  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
253					(__v4sf)
254					__builtin_ia32_cmpltss ((__v4sf) __B,
255								(__v4sf)
256								__A));
257}
258
259static __inline __m128
260_mm_cmpge_ss (__m128 __A, __m128 __B)
261{
262  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
263					(__v4sf)
264					__builtin_ia32_cmpless ((__v4sf) __B,
265								(__v4sf)
266								__A));
267}
268
269static __inline __m128
270_mm_cmpneq_ss (__m128 __A, __m128 __B)
271{
272  return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
273}
274
275static __inline __m128
276_mm_cmpnlt_ss (__m128 __A, __m128 __B)
277{
278  return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
279}
280
281static __inline __m128
282_mm_cmpnle_ss (__m128 __A, __m128 __B)
283{
284  return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
285}
286
287static __inline __m128
288_mm_cmpngt_ss (__m128 __A, __m128 __B)
289{
290  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
291					(__v4sf)
292					__builtin_ia32_cmpnltss ((__v4sf) __B,
293								 (__v4sf)
294								 __A));
295}
296
297static __inline __m128
298_mm_cmpnge_ss (__m128 __A, __m128 __B)
299{
300  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
301					(__v4sf)
302					__builtin_ia32_cmpnless ((__v4sf) __B,
303								 (__v4sf)
304								 __A));
305}
306
307static __inline __m128
308_mm_cmpord_ss (__m128 __A, __m128 __B)
309{
310  return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
311}
312
313static __inline __m128
314_mm_cmpunord_ss (__m128 __A, __m128 __B)
315{
316  return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
317}
318
319/* Perform a comparison on the four SPFP values of A and B.  For each
320   element, if the comparison is true, place a mask of all ones in the
321   result, otherwise a mask of zeros.  */
322
323static __inline __m128
324_mm_cmpeq_ps (__m128 __A, __m128 __B)
325{
326  return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
327}
328
329static __inline __m128
330_mm_cmplt_ps (__m128 __A, __m128 __B)
331{
332  return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
333}
334
335static __inline __m128
336_mm_cmple_ps (__m128 __A, __m128 __B)
337{
338  return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
339}
340
341static __inline __m128
342_mm_cmpgt_ps (__m128 __A, __m128 __B)
343{
344  return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
345}
346
347static __inline __m128
348_mm_cmpge_ps (__m128 __A, __m128 __B)
349{
350  return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
351}
352
353static __inline __m128
354_mm_cmpneq_ps (__m128 __A, __m128 __B)
355{
356  return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
357}
358
359static __inline __m128
360_mm_cmpnlt_ps (__m128 __A, __m128 __B)
361{
362  return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
363}
364
365static __inline __m128
366_mm_cmpnle_ps (__m128 __A, __m128 __B)
367{
368  return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
369}
370
371static __inline __m128
372_mm_cmpngt_ps (__m128 __A, __m128 __B)
373{
374  return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
375}
376
377static __inline __m128
378_mm_cmpnge_ps (__m128 __A, __m128 __B)
379{
380  return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
381}
382
383static __inline __m128
384_mm_cmpord_ps (__m128 __A, __m128 __B)
385{
386  return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
387}
388
389static __inline __m128
390_mm_cmpunord_ps (__m128 __A, __m128 __B)
391{
392  return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
393}
394
395/* Compare the lower SPFP values of A and B and return 1 if true
396   and 0 if false.  */
397
398static __inline int
399_mm_comieq_ss (__m128 __A, __m128 __B)
400{
401  return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
402}
403
404static __inline int
405_mm_comilt_ss (__m128 __A, __m128 __B)
406{
407  return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
408}
409
410static __inline int
411_mm_comile_ss (__m128 __A, __m128 __B)
412{
413  return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
414}
415
416static __inline int
417_mm_comigt_ss (__m128 __A, __m128 __B)
418{
419  return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
420}
421
422static __inline int
423_mm_comige_ss (__m128 __A, __m128 __B)
424{
425  return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
426}
427
428static __inline int
429_mm_comineq_ss (__m128 __A, __m128 __B)
430{
431  return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
432}
433
434static __inline int
435_mm_ucomieq_ss (__m128 __A, __m128 __B)
436{
437  return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
438}
439
440static __inline int
441_mm_ucomilt_ss (__m128 __A, __m128 __B)
442{
443  return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
444}
445
446static __inline int
447_mm_ucomile_ss (__m128 __A, __m128 __B)
448{
449  return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
450}
451
452static __inline int
453_mm_ucomigt_ss (__m128 __A, __m128 __B)
454{
455  return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
456}
457
458static __inline int
459_mm_ucomige_ss (__m128 __A, __m128 __B)
460{
461  return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
462}
463
464static __inline int
465_mm_ucomineq_ss (__m128 __A, __m128 __B)
466{
467  return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
468}
469
470/* Convert the lower SPFP value to a 32-bit integer according to the current
471   rounding mode.  */
472static __inline int
473_mm_cvtss_si32 (__m128 __A)
474{
475  return __builtin_ia32_cvtss2si ((__v4sf) __A);
476}
477
478#ifdef __x86_64__
479/* Convert the lower SPFP value to a 32-bit integer according to the current
480   rounding mode.  */
481static __inline long long
482_mm_cvtss_si64x (__m128 __A)
483{
484  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
485}
486#endif
487
488/* Convert the two lower SPFP values to 32-bit integers according to the
489   current rounding mode.  Return the integers in packed form.  */
490static __inline __m64
491_mm_cvtps_pi32 (__m128 __A)
492{
493  return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
494}
495
496/* Truncate the lower SPFP value to a 32-bit integer.  */
497static __inline int
498_mm_cvttss_si32 (__m128 __A)
499{
500  return __builtin_ia32_cvttss2si ((__v4sf) __A);
501}
502
503#ifdef __x86_64__
504/* Truncate the lower SPFP value to a 32-bit integer.  */
505static __inline long long
506_mm_cvttss_si64x (__m128 __A)
507{
508  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
509}
510#endif
511
512/* Truncate the two lower SPFP values to 32-bit integers.  Return the
513   integers in packed form.  */
514static __inline __m64
515_mm_cvttps_pi32 (__m128 __A)
516{
517  return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
518}
519
520/* Convert B to a SPFP value and insert it as element zero in A.  */
521static __inline __m128
522_mm_cvtsi32_ss (__m128 __A, int __B)
523{
524  return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
525}
526
527#ifdef __x86_64__
528/* Convert B to a SPFP value and insert it as element zero in A.  */
529static __inline __m128
530_mm_cvtsi64x_ss (__m128 __A, long long __B)
531{
532  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
533}
534#endif
535
536/* Convert the two 32-bit values in B to SPFP form and insert them
537   as the two lower elements in A.  */
538static __inline __m128
539_mm_cvtpi32_ps (__m128 __A, __m64 __B)
540{
541  return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
542}
543
544/* Convert the four signed 16-bit values in A to SPFP form.  */
545static __inline __m128
546_mm_cvtpi16_ps (__m64 __A)
547{
548  __v4hi __sign;
549  __v2si __hisi, __losi;
550  __v4sf __r;
551
552  /* This comparison against zero gives us a mask that can be used to
553     fill in the missing sign bits in the unpack operations below, so
554     that we get signed values after unpacking.  */
555  __sign = (__v4hi) __builtin_ia32_mmx_zero ();
556  __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A);
557
558  /* Convert the four words to doublewords.  */
559  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
560  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
561
562  /* Convert the doublewords to floating point two at a time.  */
563  __r = (__v4sf) __builtin_ia32_setzerops ();
564  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
565  __r = __builtin_ia32_movlhps (__r, __r);
566  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
567
568  return (__m128) __r;
569}
570
571/* Convert the four unsigned 16-bit values in A to SPFP form.  */
572static __inline __m128
573_mm_cvtpu16_ps (__m64 __A)
574{
575  __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
576  __v2si __hisi, __losi;
577  __v4sf __r;
578
579  /* Convert the four words to doublewords.  */
580  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero);
581  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero);
582
583  /* Convert the doublewords to floating point two at a time.  */
584  __r = (__v4sf) __builtin_ia32_setzerops ();
585  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
586  __r = __builtin_ia32_movlhps (__r, __r);
587  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
588
589  return (__m128) __r;
590}
591
592/* Convert the low four signed 8-bit values in A to SPFP form.  */
593static __inline __m128
594_mm_cvtpi8_ps (__m64 __A)
595{
596  __v8qi __sign;
597
598  /* This comparison against zero gives us a mask that can be used to
599     fill in the missing sign bits in the unpack operations below, so
600     that we get signed values after unpacking.  */
601  __sign = (__v8qi) __builtin_ia32_mmx_zero ();
602  __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A);
603
604  /* Convert the four low bytes to words.  */
605  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
606
607  return _mm_cvtpi16_ps(__A);
608}
609
610/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
611static __inline __m128
612_mm_cvtpu8_ps(__m64 __A)
613{
614  __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero ();
615  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero);
616  return _mm_cvtpu16_ps(__A);
617}
618
619/* Convert the four signed 32-bit values in A and B to SPFP form.  */
620static __inline __m128
621_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
622{
623  __v4sf __zero = (__v4sf) __builtin_ia32_setzerops ();
624  __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
625  __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
626  return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
627}
628
629/* Convert the four SPFP values in A to four signed 16-bit integers.  */
630static __inline __m64
631_mm_cvtps_pi16(__m128 __A)
632{
633  __v4sf __hisf = (__v4sf)__A;
634  __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
635  __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
636  __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
637  return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
638}
639
640/* Convert the four SPFP values in A to four signed 8-bit integers.  */
641static __inline __m64
642_mm_cvtps_pi8(__m128 __A)
643{
644  __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
645  __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
646  return (__m64) __builtin_ia32_packsswb (__tmp, __zero);
647}
648
649/* Selects four specific SPFP values from A and B based on MASK.  */
650#if 0
651static __inline __m128
652_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
653{
654  return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
655}
656#else
657#define _mm_shuffle_ps(A, B, MASK) \
658 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
659#endif
660
661
662/* Selects and interleaves the upper two SPFP values from A and B.  */
663static __inline __m128
664_mm_unpackhi_ps (__m128 __A, __m128 __B)
665{
666  return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
667}
668
669/* Selects and interleaves the lower two SPFP values from A and B.  */
670static __inline __m128
671_mm_unpacklo_ps (__m128 __A, __m128 __B)
672{
673  return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
674}
675
676/* Sets the upper two SPFP values with 64-bits of data loaded from P;
677   the lower two values are passed through from A.  */
678static __inline __m128
679_mm_loadh_pi (__m128 __A, __m64 const *__P)
680{
681  return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
682}
683
684/* Stores the upper two SPFP values of A into P.  */
685static __inline void
686_mm_storeh_pi (__m64 *__P, __m128 __A)
687{
688  __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
689}
690
691/* Moves the upper two values of B into the lower two values of A.  */
692static __inline __m128
693_mm_movehl_ps (__m128 __A, __m128 __B)
694{
695  return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
696}
697
698/* Moves the lower two values of B into the upper two values of A.  */
699static __inline __m128
700_mm_movelh_ps (__m128 __A, __m128 __B)
701{
702  return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
703}
704
705/* Sets the lower two SPFP values with 64-bits of data loaded from P;
706   the upper two values are passed through from A.  */
707static __inline __m128
708_mm_loadl_pi (__m128 __A, __m64 const *__P)
709{
710  return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
711}
712
713/* Stores the lower two SPFP values of A into P.  */
714static __inline void
715_mm_storel_pi (__m64 *__P, __m128 __A)
716{
717  __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
718}
719
720/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
721static __inline int
722_mm_movemask_ps (__m128 __A)
723{
724  return __builtin_ia32_movmskps ((__v4sf)__A);
725}
726
727/* Return the contents of the control register.  */
728static __inline unsigned int
729_mm_getcsr (void)
730{
731  return __builtin_ia32_stmxcsr ();
732}
733
734/* Read exception bits from the control register.  */
735static __inline unsigned int
736_MM_GET_EXCEPTION_STATE (void)
737{
738  return _mm_getcsr() & _MM_EXCEPT_MASK;
739}
740
741static __inline unsigned int
742_MM_GET_EXCEPTION_MASK (void)
743{
744  return _mm_getcsr() & _MM_MASK_MASK;
745}
746
747static __inline unsigned int
748_MM_GET_ROUNDING_MODE (void)
749{
750  return _mm_getcsr() & _MM_ROUND_MASK;
751}
752
753static __inline unsigned int
754_MM_GET_FLUSH_ZERO_MODE (void)
755{
756  return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
757}
758
759/* Set the control register to I.  */
760static __inline void
761_mm_setcsr (unsigned int __I)
762{
763  __builtin_ia32_ldmxcsr (__I);
764}
765
766/* Set exception bits in the control register.  */
767static __inline void
768_MM_SET_EXCEPTION_STATE(unsigned int __mask)
769{
770  _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
771}
772
773static __inline void
774_MM_SET_EXCEPTION_MASK (unsigned int __mask)
775{
776  _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
777}
778
779static __inline void
780_MM_SET_ROUNDING_MODE (unsigned int __mode)
781{
782  _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
783}
784
785static __inline void
786_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
787{
788  _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
789}
790
791/* Create a vector with element 0 as *P and the rest zero.  */
792static __inline __m128
793_mm_load_ss (float const *__P)
794{
795  return (__m128) __builtin_ia32_loadss (__P);
796}
797
798/* Create a vector with all four elements equal to *P.  */
799static __inline __m128
800_mm_load1_ps (float const *__P)
801{
802  __v4sf __tmp = __builtin_ia32_loadss (__P);
803  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
804}
805
806static __inline __m128
807_mm_load_ps1 (float const *__P)
808{
809  return _mm_load1_ps (__P);
810}
811
812/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
813static __inline __m128
814_mm_load_ps (float const *__P)
815{
816  return (__m128) __builtin_ia32_loadaps (__P);
817}
818
819/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
820static __inline __m128
821_mm_loadu_ps (float const *__P)
822{
823  return (__m128) __builtin_ia32_loadups (__P);
824}
825
826/* Load four SPFP values in reverse order.  The address must be aligned.  */
827static __inline __m128
828_mm_loadr_ps (float const *__P)
829{
830  __v4sf __tmp = __builtin_ia32_loadaps (__P);
831  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
832}
833
834/* Create a vector with element 0 as F and the rest zero.  */
835static __inline __m128
836_mm_set_ss (float __F)
837{
838  return (__m128) __builtin_ia32_loadss (&__F);
839}
840
841/* Create a vector with all four elements equal to F.  */
842static __inline __m128
843_mm_set1_ps (float __F)
844{
845  __v4sf __tmp = __builtin_ia32_loadss (&__F);
846  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
847}
848
849static __inline __m128
850_mm_set_ps1 (float __F)
851{
852  return _mm_set1_ps (__F);
853}
854
855/* Create the vector [Z Y X W].  */
856static __inline __m128
857_mm_set_ps (float __Z, float __Y, float __X, float __W)
858{
859  union {
860    float __a[4];
861    __m128 __v;
862  } __u;
863
864  __u.__a[0] = __W;
865  __u.__a[1] = __X;
866  __u.__a[2] = __Y;
867  __u.__a[3] = __Z;
868
869  return __u.__v;
870}
871
872/* Create the vector [W X Y Z].  */
873static __inline __m128
874_mm_setr_ps (float __Z, float __Y, float __X, float __W)
875{
876  return _mm_set_ps (__W, __X, __Y, __Z);
877}
878
879/* Create a vector of zeros.  */
880static __inline __m128
881_mm_setzero_ps (void)
882{
883  return (__m128) __builtin_ia32_setzerops ();
884}
885
886/* Stores the lower SPFP value.  */
887static __inline void
888_mm_store_ss (float *__P, __m128 __A)
889{
890  __builtin_ia32_storess (__P, (__v4sf)__A);
891}
892
893/* Store the lower SPFP value across four words.  */
894static __inline void
895_mm_store1_ps (float *__P, __m128 __A)
896{
897  __v4sf __va = (__v4sf)__A;
898  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
899  __builtin_ia32_storeaps (__P, __tmp);
900}
901
902static __inline void
903_mm_store_ps1 (float *__P, __m128 __A)
904{
905  _mm_store1_ps (__P, __A);
906}
907
908/* Store four SPFP values.  The address must be 16-byte aligned.  */
909static __inline void
910_mm_store_ps (float *__P, __m128 __A)
911{
912  __builtin_ia32_storeaps (__P, (__v4sf)__A);
913}
914
915/* Store four SPFP values.  The address need not be 16-byte aligned.  */
916static __inline void
917_mm_storeu_ps (float *__P, __m128 __A)
918{
919  __builtin_ia32_storeups (__P, (__v4sf)__A);
920}
921
922/* Store four SPFP values in reverse order.  The address must be aligned.  */
923static __inline void
924_mm_storer_ps (float *__P, __m128 __A)
925{
926  __v4sf __va = (__v4sf)__A;
927  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
928  __builtin_ia32_storeaps (__P, __tmp);
929}
930
931/* Sets the low SPFP value of A from the low value of B.  */
932static __inline __m128
933_mm_move_ss (__m128 __A, __m128 __B)
934{
935  return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
936}
937
938/* Extracts one of the four words of A.  The selector N must be immediate.  */
939#if 0
940static __inline int
941_mm_extract_pi16 (__m64 __A, int __N)
942{
943  return __builtin_ia32_pextrw ((__v4hi)__A, __N);
944}
945#else
946#define _mm_extract_pi16(A, N) \
947  __builtin_ia32_pextrw ((__v4hi)(A), (N))
948#endif
949
950/* Inserts word D into one of four words of A.  The selector N must be
951   immediate.  */
952#if 0
953static __inline __m64
954_mm_insert_pi16 (__m64 __A, int __D, int __N)
955{
956  return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
957}
958#else
959#define _mm_insert_pi16(A, D, N) \
960  ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
961#endif
962
963/* Compute the element-wise maximum of signed 16-bit values.  */
964static __inline __m64
965_mm_max_pi16 (__m64 __A, __m64 __B)
966{
967  return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
968}
969
970/* Compute the element-wise maximum of unsigned 8-bit values.  */
971static __inline __m64
972_mm_max_pu8 (__m64 __A, __m64 __B)
973{
974  return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
975}
976
977/* Compute the element-wise minimum of signed 16-bit values.  */
978static __inline __m64
979_mm_min_pi16 (__m64 __A, __m64 __B)
980{
981  return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
982}
983
984/* Compute the element-wise minimum of unsigned 8-bit values.  */
985static __inline __m64
986_mm_min_pu8 (__m64 __A, __m64 __B)
987{
988  return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
989}
990
991/* Create an 8-bit mask of the signs of 8-bit values.  */
992static __inline int
993_mm_movemask_pi8 (__m64 __A)
994{
995  return __builtin_ia32_pmovmskb ((__v8qi)__A);
996}
997
998/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
999   in B and produce the high 16 bits of the 32-bit results.  */
1000static __inline __m64
1001_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1002{
1003  return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1004}
1005
1006/* Return a combination of the four 16-bit values in A.  The selector
1007   must be an immediate.  */
1008#if 0
1009static __inline __m64
1010_mm_shuffle_pi16 (__m64 __A, int __N)
1011{
1012  return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1013}
1014#else
1015#define _mm_shuffle_pi16(A, N) \
1016  ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1017#endif
1018
1019/* Conditionally store byte elements of A into P.  The high bit of each
1020   byte in the selector N determines whether the corresponding byte from
1021   A is stored.  */
1022static __inline void
1023_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1024{
1025  __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1026}
1027
1028/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
1029static __inline __m64
1030_mm_avg_pu8 (__m64 __A, __m64 __B)
1031{
1032  return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1033}
1034
1035/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
1036static __inline __m64
1037_mm_avg_pu16 (__m64 __A, __m64 __B)
1038{
1039  return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1040}
1041
1042/* Compute the sum of the absolute differences of the unsigned 8-bit
1043   values in A and B.  Return the value in the lower 16-bit word; the
1044   upper words are cleared.  */
1045static __inline __m64
1046_mm_sad_pu8 (__m64 __A, __m64 __B)
1047{
1048  return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1049}
1050
1051/* Loads one cache line from address P to a location "closer" to the
1052   processor.  The selector I specifies the type of prefetch operation.  */
1053#if 0
1054static __inline void
1055_mm_prefetch (void *__P, enum _mm_hint __I)
1056{
1057  __builtin_prefetch (__P, 0, __I);
1058}
1059#else
1060#define _mm_prefetch(P, I) \
1061  __builtin_prefetch ((P), 0, (I))
1062#endif
1063
1064/* Stores the data in A to the address P without polluting the caches.  */
1065static __inline void
1066_mm_stream_pi (__m64 *__P, __m64 __A)
1067{
1068  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1069}
1070
1071/* Likewise.  The address must be 16-byte aligned.  */
1072static __inline void
1073_mm_stream_ps (float *__P, __m128 __A)
1074{
1075  __builtin_ia32_movntps (__P, (__v4sf)__A);
1076}
1077
1078/* Guarantees that every preceeding store is globally visible before
1079   any subsequent store.  */
1080static __inline void
1081_mm_sfence (void)
1082{
1083  __builtin_ia32_sfence ();
1084}
1085
1086/* The execution of the next instruction is delayed by an implementation
1087   specific amount of time.  The instruction does not modify the
1088   architectural state.  */
1089static __inline void
1090_mm_pause (void)
1091{
1092  __asm__ __volatile__ ("rep; nop" : : );
1093}
1094
1095/* Transpose the 4x4 matrix composed of row[0-3].  */
1096#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
1097do {									\
1098  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
1099  __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44);		\
1100  __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE);		\
1101  __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44);		\
1102  __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE);		\
1103  (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88);			\
1104  (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD);			\
1105  (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88);			\
1106  (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD);			\
1107} while (0)
1108
1109#ifdef __SSE2__
1110/* SSE2 */
1111typedef int __v2df __attribute__ ((mode (V2DF)));
1112typedef int __v2di __attribute__ ((mode (V2DI)));
1113typedef int __v4si __attribute__ ((mode (V4SI)));
1114typedef int __v8hi __attribute__ ((mode (V8HI)));
1115typedef int __v16qi __attribute__ ((mode (V16QI)));
1116
1117/* Create a selector for use with the SHUFPD instruction.  */
1118#define _MM_SHUFFLE2(fp1,fp0) \
1119 (((fp1) << 1) | (fp0))
1120
1121#define __m128i __v2di
1122#define __m128d __v2df
1123
1124/* Create a vector with element 0 as *P and the rest zero.  */
1125static __inline __m128d
1126_mm_load_sd (double const *__P)
1127{
1128  return (__m128d) __builtin_ia32_loadsd (__P);
1129}
1130
1131/* Create a vector with all two elements equal to *P.  */
1132static __inline __m128d
1133_mm_load1_pd (double const *__P)
1134{
1135  __v2df __tmp = __builtin_ia32_loadsd (__P);
1136  return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0));
1137}
1138
1139static __inline __m128d
1140_mm_load_pd1 (double const *__P)
1141{
1142  return _mm_load1_pd (__P);
1143}
1144
1145/* Load two DPFP values from P.  The addresd must be 16-byte aligned.  */
1146static __inline __m128d
1147_mm_load_pd (double const *__P)
1148{
1149  return (__m128d) __builtin_ia32_loadapd (__P);
1150}
1151
1152/* Load two DPFP values from P.  The addresd need not be 16-byte aligned.  */
1153static __inline __m128d
1154_mm_loadu_pd (double const *__P)
1155{
1156  return (__m128d) __builtin_ia32_loadupd (__P);
1157}
1158
1159/* Load two DPFP values in reverse order.  The addresd must be aligned.  */
1160static __inline __m128d
1161_mm_loadr_pd (double const *__P)
1162{
1163  __v2df __tmp = __builtin_ia32_loadapd (__P);
1164  return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,1));
1165}
1166
1167/* Create a vector with element 0 as F and the rest zero.  */
1168static __inline __m128d
1169_mm_set_sd (double __F)
1170{
1171  return (__m128d) __builtin_ia32_loadsd (&__F);
1172}
1173
1174/* Create a vector with all two elements equal to F.  */
1175static __inline __m128d
1176_mm_set1_pd (double __F)
1177{
1178  __v2df __tmp = __builtin_ia32_loadsd (&__F);
1179  return (__m128d) __builtin_ia32_shufpd (__tmp, __tmp, _MM_SHUFFLE2 (0,0));
1180}
1181
1182static __inline __m128d
1183_mm_set_pd1 (double __F)
1184{
1185  return _mm_set1_pd (__F);
1186}
1187
1188/* Create the vector [Z Y].  */
1189static __inline __m128d
1190_mm_set_pd (double __Z, double __Y)
1191{
1192  union {
1193    double __a[2];
1194    __m128d __v;
1195  } __u;
1196
1197  __u.__a[0] = __Y;
1198  __u.__a[1] = __Z;
1199
1200  return __u.__v;
1201}
1202
1203/* Create the vector [Y Z].  */
1204static __inline __m128d
1205_mm_setr_pd (double __Z, double __Y)
1206{
1207  return _mm_set_pd (__Y, __Z);
1208}
1209
1210/* Create a vector of zeros.  */
1211static __inline __m128d
1212_mm_setzero_pd (void)
1213{
1214  return (__m128d) __builtin_ia32_setzeropd ();
1215}
1216
1217/* Stores the lower DPFP value.  */
1218static __inline void
1219_mm_store_sd (double *__P, __m128d __A)
1220{
1221  __builtin_ia32_storesd (__P, (__v2df)__A);
1222}
1223
1224/* Store the lower DPFP value acrosd two words.  */
1225static __inline void
1226_mm_store1_pd (double *__P, __m128d __A)
1227{
1228  __v2df __va = (__v2df)__A;
1229  __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,0));
1230  __builtin_ia32_storeapd (__P, __tmp);
1231}
1232
1233static __inline void
1234_mm_store_pd1 (double *__P, __m128d __A)
1235{
1236  _mm_store1_pd (__P, __A);
1237}
1238
1239/* Store two DPFP values.  The addresd must be 16-byte aligned.  */
1240static __inline void
1241_mm_store_pd (double *__P, __m128d __A)
1242{
1243  __builtin_ia32_storeapd (__P, (__v2df)__A);
1244}
1245
1246/* Store two DPFP values.  The addresd need not be 16-byte aligned.  */
1247static __inline void
1248_mm_storeu_pd (double *__P, __m128d __A)
1249{
1250  __builtin_ia32_storeupd (__P, (__v2df)__A);
1251}
1252
1253/* Store two DPFP values in reverse order.  The addresd must be aligned.  */
1254static __inline void
1255_mm_storer_pd (double *__P, __m128d __A)
1256{
1257  __v2df __va = (__v2df)__A;
1258  __v2df __tmp = __builtin_ia32_shufpd (__va, __va, _MM_SHUFFLE2 (0,1));
1259  __builtin_ia32_storeapd (__P, __tmp);
1260}
1261
1262/* Sets the low DPFP value of A from the low value of B.  */
1263static __inline __m128d
1264_mm_move_sd (__m128d __A, __m128d __B)
1265{
1266  return (__m128d) __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
1267}
1268
1269
1270static __inline __m128d
1271_mm_add_pd (__m128d __A, __m128d __B)
1272{
1273  return (__m128d)__builtin_ia32_addpd ((__v2df)__A, (__v2df)__B);
1274}
1275
1276static __inline __m128d
1277_mm_add_sd (__m128d __A, __m128d __B)
1278{
1279  return (__m128d)__builtin_ia32_addsd ((__v2df)__A, (__v2df)__B);
1280}
1281
1282static __inline __m128d
1283_mm_sub_pd (__m128d __A, __m128d __B)
1284{
1285  return (__m128d)__builtin_ia32_subpd ((__v2df)__A, (__v2df)__B);
1286}
1287
1288static __inline __m128d
1289_mm_sub_sd (__m128d __A, __m128d __B)
1290{
1291  return (__m128d)__builtin_ia32_subsd ((__v2df)__A, (__v2df)__B);
1292}
1293
1294static __inline __m128d
1295_mm_mul_pd (__m128d __A, __m128d __B)
1296{
1297  return (__m128d)__builtin_ia32_mulpd ((__v2df)__A, (__v2df)__B);
1298}
1299
1300static __inline __m128d
1301_mm_mul_sd (__m128d __A, __m128d __B)
1302{
1303  return (__m128d)__builtin_ia32_mulsd ((__v2df)__A, (__v2df)__B);
1304}
1305
1306static __inline __m128d
1307_mm_div_pd (__m128d __A, __m128d __B)
1308{
1309  return (__m128d)__builtin_ia32_divpd ((__v2df)__A, (__v2df)__B);
1310}
1311
1312static __inline __m128d
1313_mm_div_sd (__m128d __A, __m128d __B)
1314{
1315  return (__m128d)__builtin_ia32_divsd ((__v2df)__A, (__v2df)__B);
1316}
1317
1318static __inline __m128d
1319_mm_sqrt_pd (__m128d __A)
1320{
1321  return (__m128d)__builtin_ia32_sqrtpd ((__v2df)__A);
1322}
1323
1324/* Return pair {sqrt (A[0), B[1]}.  */
1325static __inline __m128d
1326_mm_sqrt_sd (__m128d __A, __m128d __B)
1327{
1328  __v2df __tmp = __builtin_ia32_movsd ((__v2df)__A, (__v2df)__B);
1329  return (__m128d)__builtin_ia32_sqrtsd ((__v2df)__tmp);
1330}
1331
1332static __inline __m128d
1333_mm_min_pd (__m128d __A, __m128d __B)
1334{
1335  return (__m128d)__builtin_ia32_minpd ((__v2df)__A, (__v2df)__B);
1336}
1337
1338static __inline __m128d
1339_mm_min_sd (__m128d __A, __m128d __B)
1340{
1341  return (__m128d)__builtin_ia32_minsd ((__v2df)__A, (__v2df)__B);
1342}
1343
1344static __inline __m128d
1345_mm_max_pd (__m128d __A, __m128d __B)
1346{
1347  return (__m128d)__builtin_ia32_maxpd ((__v2df)__A, (__v2df)__B);
1348}
1349
1350static __inline __m128d
1351_mm_max_sd (__m128d __A, __m128d __B)
1352{
1353  return (__m128d)__builtin_ia32_maxsd ((__v2df)__A, (__v2df)__B);
1354}
1355
1356static __inline __m128d
1357_mm_and_pd (__m128d __A, __m128d __B)
1358{
1359  return (__m128d)__builtin_ia32_andpd ((__v2df)__A, (__v2df)__B);
1360}
1361
1362static __inline __m128d
1363_mm_andnot_pd (__m128d __A, __m128d __B)
1364{
1365  return (__m128d)__builtin_ia32_andnpd ((__v2df)__A, (__v2df)__B);
1366}
1367
1368static __inline __m128d
1369_mm_or_pd (__m128d __A, __m128d __B)
1370{
1371  return (__m128d)__builtin_ia32_orpd ((__v2df)__A, (__v2df)__B);
1372}
1373
1374static __inline __m128d
1375_mm_xor_pd (__m128d __A, __m128d __B)
1376{
1377  return (__m128d)__builtin_ia32_xorpd ((__v2df)__A, (__v2df)__B);
1378}
1379
1380static __inline __m128d
1381_mm_cmpeq_pd (__m128d __A, __m128d __B)
1382{
1383  return (__m128d)__builtin_ia32_cmpeqpd ((__v2df)__A, (__v2df)__B);
1384}
1385
1386static __inline __m128d
1387_mm_cmplt_pd (__m128d __A, __m128d __B)
1388{
1389  return (__m128d)__builtin_ia32_cmpltpd ((__v2df)__A, (__v2df)__B);
1390}
1391
1392static __inline __m128d
1393_mm_cmple_pd (__m128d __A, __m128d __B)
1394{
1395  return (__m128d)__builtin_ia32_cmplepd ((__v2df)__A, (__v2df)__B);
1396}
1397
1398static __inline __m128d
1399_mm_cmpgt_pd (__m128d __A, __m128d __B)
1400{
1401  return (__m128d)__builtin_ia32_cmpgtpd ((__v2df)__A, (__v2df)__B);
1402}
1403
1404static __inline __m128d
1405_mm_cmpge_pd (__m128d __A, __m128d __B)
1406{
1407  return (__m128d)__builtin_ia32_cmpgepd ((__v2df)__A, (__v2df)__B);
1408}
1409
1410static __inline __m128d
1411_mm_cmpneq_pd (__m128d __A, __m128d __B)
1412{
1413  return (__m128d)__builtin_ia32_cmpneqpd ((__v2df)__A, (__v2df)__B);
1414}
1415
1416static __inline __m128d
1417_mm_cmpnlt_pd (__m128d __A, __m128d __B)
1418{
1419  return (__m128d)__builtin_ia32_cmpnltpd ((__v2df)__A, (__v2df)__B);
1420}
1421
1422static __inline __m128d
1423_mm_cmpnle_pd (__m128d __A, __m128d __B)
1424{
1425  return (__m128d)__builtin_ia32_cmpnlepd ((__v2df)__A, (__v2df)__B);
1426}
1427
1428static __inline __m128d
1429_mm_cmpngt_pd (__m128d __A, __m128d __B)
1430{
1431  return (__m128d)__builtin_ia32_cmpngtpd ((__v2df)__A, (__v2df)__B);
1432}
1433
1434static __inline __m128d
1435_mm_cmpnge_pd (__m128d __A, __m128d __B)
1436{
1437  return (__m128d)__builtin_ia32_cmpngepd ((__v2df)__A, (__v2df)__B);
1438}
1439
1440static __inline __m128d
1441_mm_cmpord_pd (__m128d __A, __m128d __B)
1442{
1443  return (__m128d)__builtin_ia32_cmpordpd ((__v2df)__A, (__v2df)__B);
1444}
1445
1446static __inline __m128d
1447_mm_cmpunord_pd (__m128d __A, __m128d __B)
1448{
1449  return (__m128d)__builtin_ia32_cmpunordpd ((__v2df)__A, (__v2df)__B);
1450}
1451
1452static __inline __m128d
1453_mm_cmpeq_sd (__m128d __A, __m128d __B)
1454{
1455  return (__m128d)__builtin_ia32_cmpeqsd ((__v2df)__A, (__v2df)__B);
1456}
1457
1458static __inline __m128d
1459_mm_cmplt_sd (__m128d __A, __m128d __B)
1460{
1461  return (__m128d)__builtin_ia32_cmpltsd ((__v2df)__A, (__v2df)__B);
1462}
1463
1464static __inline __m128d
1465_mm_cmple_sd (__m128d __A, __m128d __B)
1466{
1467  return (__m128d)__builtin_ia32_cmplesd ((__v2df)__A, (__v2df)__B);
1468}
1469
1470static __inline __m128d
1471_mm_cmpgt_sd (__m128d __A, __m128d __B)
1472{
1473  return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
1474					 (__v2df)
1475					 __builtin_ia32_cmpltsd ((__v2df) __B,
1476								 (__v2df)
1477								 __A));
1478}
1479
1480static __inline __m128d
1481_mm_cmpge_sd (__m128d __A, __m128d __B)
1482{
1483  return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
1484					 (__v2df)
1485					 __builtin_ia32_cmplesd ((__v2df) __B,
1486								 (__v2df)
1487								 __A));
1488}
1489
1490static __inline __m128d
1491_mm_cmpneq_sd (__m128d __A, __m128d __B)
1492{
1493  return (__m128d)__builtin_ia32_cmpneqsd ((__v2df)__A, (__v2df)__B);
1494}
1495
1496static __inline __m128d
1497_mm_cmpnlt_sd (__m128d __A, __m128d __B)
1498{
1499  return (__m128d)__builtin_ia32_cmpnltsd ((__v2df)__A, (__v2df)__B);
1500}
1501
1502static __inline __m128d
1503_mm_cmpnle_sd (__m128d __A, __m128d __B)
1504{
1505  return (__m128d)__builtin_ia32_cmpnlesd ((__v2df)__A, (__v2df)__B);
1506}
1507
1508static __inline __m128d
1509_mm_cmpngt_sd (__m128d __A, __m128d __B)
1510{
1511  return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
1512					 (__v2df)
1513					 __builtin_ia32_cmpnltsd ((__v2df) __B,
1514								  (__v2df)
1515								  __A));
1516}
1517
1518static __inline __m128d
1519_mm_cmpnge_sd (__m128d __A, __m128d __B)
1520{
1521  return (__m128d) __builtin_ia32_movsd ((__v2df) __A,
1522					 (__v2df)
1523					 __builtin_ia32_cmpnlesd ((__v2df) __B,
1524								  (__v2df)
1525								  __A));
1526}
1527
1528static __inline __m128d
1529_mm_cmpord_sd (__m128d __A, __m128d __B)
1530{
1531  return (__m128d)__builtin_ia32_cmpordsd ((__v2df)__A, (__v2df)__B);
1532}
1533
1534static __inline __m128d
1535_mm_cmpunord_sd (__m128d __A, __m128d __B)
1536{
1537  return (__m128d)__builtin_ia32_cmpunordsd ((__v2df)__A, (__v2df)__B);
1538}
1539
1540static __inline int
1541_mm_comieq_sd (__m128d __A, __m128d __B)
1542{
1543  return __builtin_ia32_comisdeq ((__v2df)__A, (__v2df)__B);
1544}
1545
1546static __inline int
1547_mm_comilt_sd (__m128d __A, __m128d __B)
1548{
1549  return __builtin_ia32_comisdlt ((__v2df)__A, (__v2df)__B);
1550}
1551
1552static __inline int
1553_mm_comile_sd (__m128d __A, __m128d __B)
1554{
1555  return __builtin_ia32_comisdle ((__v2df)__A, (__v2df)__B);
1556}
1557
1558static __inline int
1559_mm_comigt_sd (__m128d __A, __m128d __B)
1560{
1561  return __builtin_ia32_comisdgt ((__v2df)__A, (__v2df)__B);
1562}
1563
1564static __inline int
1565_mm_comige_sd (__m128d __A, __m128d __B)
1566{
1567  return __builtin_ia32_comisdge ((__v2df)__A, (__v2df)__B);
1568}
1569
1570static __inline int
1571_mm_comineq_sd (__m128d __A, __m128d __B)
1572{
1573  return __builtin_ia32_comisdneq ((__v2df)__A, (__v2df)__B);
1574}
1575
1576static __inline int
1577_mm_ucomieq_sd (__m128d __A, __m128d __B)
1578{
1579  return __builtin_ia32_ucomisdeq ((__v2df)__A, (__v2df)__B);
1580}
1581
1582static __inline int
1583_mm_ucomilt_sd (__m128d __A, __m128d __B)
1584{
1585  return __builtin_ia32_ucomisdlt ((__v2df)__A, (__v2df)__B);
1586}
1587
1588static __inline int
1589_mm_ucomile_sd (__m128d __A, __m128d __B)
1590{
1591  return __builtin_ia32_ucomisdle ((__v2df)__A, (__v2df)__B);
1592}
1593
1594static __inline int
1595_mm_ucomigt_sd (__m128d __A, __m128d __B)
1596{
1597  return __builtin_ia32_ucomisdgt ((__v2df)__A, (__v2df)__B);
1598}
1599
1600static __inline int
1601_mm_ucomige_sd (__m128d __A, __m128d __B)
1602{
1603  return __builtin_ia32_ucomisdge ((__v2df)__A, (__v2df)__B);
1604}
1605
1606static __inline int
1607_mm_ucomineq_sd (__m128d __A, __m128d __B)
1608{
1609  return __builtin_ia32_ucomisdneq ((__v2df)__A, (__v2df)__B);
1610}
1611
1612/* Create a vector with element 0 as *P and the rest zero.  */
1613
1614static __inline __m128i
1615_mm_load_si128 (__m128i const *__P)
1616{
1617  return (__m128i) __builtin_ia32_loaddqa ((char const *)__P);
1618}
1619
1620static __inline __m128i
1621_mm_loadu_si128 (__m128i const *__P)
1622{
1623  return (__m128i) __builtin_ia32_loaddqu ((char const *)__P);
1624}
1625
1626static __inline __m128i
1627_mm_loadl_epi64 (__m128i const *__P)
1628{
1629  return (__m128i) __builtin_ia32_movq2dq (*(unsigned long long *)__P);
1630}
1631
1632static __inline void
1633_mm_store_si128 (__m128i *__P, __m128i __B)
1634{
1635  __builtin_ia32_storedqa ((char *)__P, (__v16qi)__B);
1636}
1637
1638static __inline void
1639_mm_storeu_si128 (__m128i *__P, __m128i __B)
1640{
1641  __builtin_ia32_storedqu ((char *)__P, (__v16qi)__B);
1642}
1643
1644static __inline void
1645_mm_storel_epi64 (__m128i *__P, __m128i __B)
1646{
1647  *(long long *)__P = __builtin_ia32_movdq2q ((__v2di)__B);
1648}
1649
1650static __inline __m64
1651_mm_movepi64_pi64 (__m128i __B)
1652{
1653  return (__m64) __builtin_ia32_movdq2q ((__v2di)__B);
1654}
1655
1656static __inline __m128i
1657_mm_move_epi64 (__m128i __A)
1658{
1659  return (__m128i) __builtin_ia32_movq ((__v2di)__A);
1660}
1661
1662/* Create a vector of zeros.  */
1663static __inline __m128i
1664_mm_setzero_si128 (void)
1665{
1666  return (__m128i) __builtin_ia32_setzero128 ();
1667}
1668
1669static __inline __m128i
1670_mm_set_epi64 (__m64 __A,  __m64 __B)
1671{
1672  __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
1673  __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B);
1674  return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp2, __tmp);
1675}
1676
1677/* Create the vector [Z Y X W].  */
1678static __inline __m128i
1679_mm_set_epi32 (int __Z, int __Y, int __X, int __W)
1680{
1681  union {
1682    int __a[4];
1683    __m128i __v;
1684  } __u;
1685
1686  __u.__a[0] = __W;
1687  __u.__a[1] = __X;
1688  __u.__a[2] = __Y;
1689  __u.__a[3] = __Z;
1690
1691  return __u.__v;
1692}
1693
1694#ifdef __x86_64__
1695/* Create the vector [Z Y].  */
1696static __inline __m128i
1697_mm_set_epi64x (long long __Z, long long __Y)
1698{
1699  union {
1700    long __a[2];
1701    __m128i __v;
1702  } __u;
1703
1704  __u.__a[0] = __Y;
1705  __u.__a[1] = __Z;
1706
1707  return __u.__v;
1708}
1709#endif
1710
1711/* Create the vector [S T U V Z Y X W].  */
1712static __inline __m128i
1713_mm_set_epi16 (short __Z, short __Y, short __X, short __W,
1714	       short __V, short __U, short __T, short __S)
1715{
1716  union {
1717    short __a[8];
1718    __m128i __v;
1719  } __u;
1720
1721  __u.__a[0] = __S;
1722  __u.__a[1] = __T;
1723  __u.__a[2] = __U;
1724  __u.__a[3] = __V;
1725  __u.__a[4] = __W;
1726  __u.__a[5] = __X;
1727  __u.__a[6] = __Y;
1728  __u.__a[7] = __Z;
1729
1730  return __u.__v;
1731}
1732
1733/* Create the vector [S T U V Z Y X W].  */
1734static __inline __m128i
1735_mm_set_epi8 (char __Z, char __Y, char __X, char __W,
1736	      char __V, char __U, char __T, char __S,
1737	      char __Z1, char __Y1, char __X1, char __W1,
1738	      char __V1, char __U1, char __T1, char __S1)
1739{
1740  union {
1741    char __a[16];
1742    __m128i __v;
1743  } __u;
1744
1745  __u.__a[0] = __S1;
1746  __u.__a[1] = __T1;
1747  __u.__a[2] = __U1;
1748  __u.__a[3] = __V1;
1749  __u.__a[4] = __W1;
1750  __u.__a[5] = __X1;
1751  __u.__a[6] = __Y1;
1752  __u.__a[7] = __Z1;
1753  __u.__a[8] = __S;
1754  __u.__a[9] = __T;
1755  __u.__a[10] = __U;
1756  __u.__a[11] = __V;
1757  __u.__a[12] = __W;
1758  __u.__a[13] = __X;
1759  __u.__a[14] = __Y;
1760  __u.__a[15] = __Z;
1761
1762  return __u.__v;
1763}
1764
1765static __inline __m128i
1766_mm_set1_epi64 (__m64 __A)
1767{
1768  __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
1769  return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp);
1770}
1771
1772static __inline __m128i
1773_mm_set1_epi32 (int __A)
1774{
1775  __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__A);
1776  return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
1777}
1778
1779#ifdef __x86_64__
1780static __inline __m128i
1781_mm_set1_epi64x (long long __A)
1782{
1783  __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
1784  return (__m128i) __builtin_ia32_shufpd ((__v2df)__tmp, (__v2df)__tmp, _MM_SHUFFLE2 (0,0));
1785}
1786#endif
1787
1788static __inline __m128i
1789_mm_set1_epi16 (short __A)
1790{
1791  int __Acopy = (unsigned short)__A;
1792  __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy);
1793  __tmp = (__v4si)__builtin_ia32_punpcklwd128 ((__v8hi)__tmp, (__v8hi)__tmp);
1794  return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
1795}
1796
1797static __inline __m128i
1798_mm_set1_epi8 (char __A)
1799{
1800  int __Acopy = (unsigned char)__A;
1801  __v4si __tmp = (__v4si)__builtin_ia32_loadd (&__Acopy);
1802  __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp);
1803  __tmp = (__v4si)__builtin_ia32_punpcklbw128 ((__v16qi)__tmp, (__v16qi)__tmp);
1804  return (__m128i) __builtin_ia32_pshufd ((__v4si)__tmp, _MM_SHUFFLE (0,0,0,0));
1805}
1806
1807static __inline __m128i
1808_mm_setr_epi64 (__m64 __A,  __m64 __B)
1809{
1810  __v2di __tmp = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__A);
1811  __v2di __tmp2 = (__v2di)__builtin_ia32_movq2dq ((unsigned long long)__B);
1812  return (__m128i)__builtin_ia32_punpcklqdq128 (__tmp, __tmp2);
1813}
1814
1815/* Create the vector [Z Y X W].  */
1816static __inline __m128i
1817_mm_setr_epi32 (int __W, int __X, int __Y, int __Z)
1818{
1819  union {
1820    int __a[4];
1821    __m128i __v;
1822  } __u;
1823
1824  __u.__a[0] = __W;
1825  __u.__a[1] = __X;
1826  __u.__a[2] = __Y;
1827  __u.__a[3] = __Z;
1828
1829  return __u.__v;
1830}
1831/* Create the vector [S T U V Z Y X W].  */
1832static __inline __m128i
1833_mm_setr_epi16 (short __S, short __T, short __U, short __V,
1834	        short __W, short __X, short __Y, short __Z)
1835{
1836  union {
1837    short __a[8];
1838    __m128i __v;
1839  } __u;
1840
1841  __u.__a[0] = __S;
1842  __u.__a[1] = __T;
1843  __u.__a[2] = __U;
1844  __u.__a[3] = __V;
1845  __u.__a[4] = __W;
1846  __u.__a[5] = __X;
1847  __u.__a[6] = __Y;
1848  __u.__a[7] = __Z;
1849
1850  return __u.__v;
1851}
1852
1853/* Create the vector [S T U V Z Y X W].  */
1854static __inline __m128i
1855_mm_setr_epi8 (char __S1, char __T1, char __U1, char __V1,
1856	       char __W1, char __X1, char __Y1, char __Z1,
1857	       char __S, char __T, char __U, char __V,
1858	       char __W, char __X, char __Y, char __Z)
1859{
1860  union {
1861    char __a[16];
1862    __m128i __v;
1863  } __u;
1864
1865  __u.__a[0] = __S1;
1866  __u.__a[1] = __T1;
1867  __u.__a[2] = __U1;
1868  __u.__a[3] = __V1;
1869  __u.__a[4] = __W1;
1870  __u.__a[5] = __X1;
1871  __u.__a[6] = __Y1;
1872  __u.__a[7] = __Z1;
1873  __u.__a[8] = __S;
1874  __u.__a[9] = __T;
1875  __u.__a[10] = __U;
1876  __u.__a[11] = __V;
1877  __u.__a[12] = __W;
1878  __u.__a[13] = __X;
1879  __u.__a[14] = __Y;
1880  __u.__a[15] = __Z;
1881
1882  return __u.__v;
1883}
1884
1885static __inline __m128d
1886_mm_cvtepi32_pd (__m128i __A)
1887{
1888  return (__m128d)__builtin_ia32_cvtdq2pd ((__v4si) __A);
1889}
1890
1891static __inline __m128
1892_mm_cvtepi32_ps (__m128i __A)
1893{
1894  return (__m128)__builtin_ia32_cvtdq2ps ((__v4si) __A);
1895}
1896
1897static __inline __m128i
1898_mm_cvtpd_epi32 (__m128d __A)
1899{
1900  return (__m128i)__builtin_ia32_cvtpd2dq ((__v2df) __A);
1901}
1902
1903static __inline __m64
1904_mm_cvtpd_pi32 (__m128d __A)
1905{
1906  return (__m64)__builtin_ia32_cvtpd2pi ((__v2df) __A);
1907}
1908
1909static __inline __m128
1910_mm_cvtpd_ps (__m128d __A)
1911{
1912  return (__m128)__builtin_ia32_cvtpd2ps ((__v2df) __A);
1913}
1914
1915static __inline __m128i
1916_mm_cvttpd_epi32 (__m128d __A)
1917{
1918  return (__m128i)__builtin_ia32_cvttpd2dq ((__v2df) __A);
1919}
1920
1921static __inline __m64
1922_mm_cvttpd_pi32 (__m128d __A)
1923{
1924  return (__m64)__builtin_ia32_cvttpd2pi ((__v2df) __A);
1925}
1926
1927static __inline __m128d
1928_mm_cvtpi32_pd (__m64 __A)
1929{
1930  return (__m128d)__builtin_ia32_cvtpi2pd ((__v2si) __A);
1931}
1932
1933static __inline __m128i
1934_mm_cvtps_epi32 (__m128 __A)
1935{
1936  return (__m128i)__builtin_ia32_cvtps2dq ((__v4sf) __A);
1937}
1938
1939static __inline __m128i
1940_mm_cvttps_epi32 (__m128 __A)
1941{
1942  return (__m128i)__builtin_ia32_cvttps2dq ((__v4sf) __A);
1943}
1944
1945static __inline __m128d
1946_mm_cvtps_pd (__m128 __A)
1947{
1948  return (__m128d)__builtin_ia32_cvtps2pd ((__v4sf) __A);
1949}
1950
1951static __inline int
1952_mm_cvtsd_si32 (__m128d __A)
1953{
1954  return __builtin_ia32_cvtsd2si ((__v2df) __A);
1955}
1956
1957#ifdef __x86_64__
1958static __inline long long
1959_mm_cvtsd_si64x (__m128d __A)
1960{
1961  return __builtin_ia32_cvtsd2si64 ((__v2df) __A);
1962}
1963#endif
1964
1965static __inline int
1966_mm_cvttsd_si32 (__m128d __A)
1967{
1968  return __builtin_ia32_cvttsd2si ((__v2df) __A);
1969}
1970
1971#ifdef __x86_64__
1972static __inline long long
1973_mm_cvttsd_si64x (__m128d __A)
1974{
1975  return __builtin_ia32_cvttsd2si64 ((__v2df) __A);
1976}
1977#endif
1978
1979static __inline __m128
1980_mm_cvtsd_ss (__m128 __A, __m128d __B)
1981{
1982  return (__m128)__builtin_ia32_cvtsd2ss ((__v4sf) __A, (__v2df) __B);
1983}
1984
1985static __inline __m128d
1986_mm_cvtsi32_sd (__m128d __A, int __B)
1987{
1988  return (__m128d)__builtin_ia32_cvtsi2sd ((__v2df) __A, __B);
1989}
1990
1991#ifdef __x86_64__
1992static __inline __m128d
1993_mm_cvtsi64x_sd (__m128d __A, long long __B)
1994{
1995  return (__m128d)__builtin_ia32_cvtsi642sd ((__v2df) __A, __B);
1996}
1997#endif
1998
1999static __inline __m128d
2000_mm_cvtss_sd (__m128d __A, __m128 __B)
2001{
2002  return (__m128d)__builtin_ia32_cvtss2sd ((__v2df) __A, (__v4sf)__B);
2003}
2004
2005#define _mm_shuffle_pd(__A, __B, __C) ((__m128d)__builtin_ia32_shufpd ((__v2df)__A, (__v2df)__B, (__C)))
2006
2007static __inline __m128d
2008_mm_unpackhi_pd (__m128d __A, __m128d __B)
2009{
2010  return (__m128d)__builtin_ia32_unpckhpd ((__v2df)__A, (__v2df)__B);
2011}
2012
2013static __inline __m128d
2014_mm_unpacklo_pd (__m128d __A, __m128d __B)
2015{
2016  return (__m128d)__builtin_ia32_unpcklpd ((__v2df)__A, (__v2df)__B);
2017}
2018
2019static __inline __m128d
2020_mm_loadh_pd (__m128d __A, double const *__B)
2021{
2022  return (__m128d)__builtin_ia32_loadhpd ((__v2df)__A, (__v2si *)__B);
2023}
2024
2025static __inline void
2026_mm_storeh_pd (double *__A, __m128d __B)
2027{
2028  __builtin_ia32_storehpd ((__v2si *)__A, (__v2df)__B);
2029}
2030
2031static __inline __m128d
2032_mm_loadl_pd (__m128d __A, double const *__B)
2033{
2034  return (__m128d)__builtin_ia32_loadlpd ((__v2df)__A, (__v2si *)__B);
2035}
2036
2037static __inline void
2038_mm_storel_pd (double *__A, __m128d __B)
2039{
2040  __builtin_ia32_storelpd ((__v2si *)__A, (__v2df)__B);
2041}
2042
2043static __inline int
2044_mm_movemask_pd (__m128d __A)
2045{
2046  return __builtin_ia32_movmskpd ((__v2df)__A);
2047}
2048
2049static __inline __m128i
2050_mm_packs_epi16 (__m128i __A, __m128i __B)
2051{
2052  return (__m128i)__builtin_ia32_packsswb128 ((__v8hi)__A, (__v8hi)__B);
2053}
2054
2055static __inline __m128i
2056_mm_packs_epi32 (__m128i __A, __m128i __B)
2057{
2058  return (__m128i)__builtin_ia32_packssdw128 ((__v4si)__A, (__v4si)__B);
2059}
2060
2061static __inline __m128i
2062_mm_packus_epi16 (__m128i __A, __m128i __B)
2063{
2064  return (__m128i)__builtin_ia32_packuswb128 ((__v8hi)__A, (__v8hi)__B);
2065}
2066
2067static __inline __m128i
2068_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
2069{
2070  return (__m128i)__builtin_ia32_punpckhbw128 ((__v16qi)__A, (__v16qi)__B);
2071}
2072
2073static __inline __m128i
2074_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
2075{
2076  return (__m128i)__builtin_ia32_punpckhwd128 ((__v8hi)__A, (__v8hi)__B);
2077}
2078
2079static __inline __m128i
2080_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
2081{
2082  return (__m128i)__builtin_ia32_punpckhdq128 ((__v4si)__A, (__v4si)__B);
2083}
2084
2085static __inline __m128i
2086_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
2087{
2088  return (__m128i)__builtin_ia32_punpckhqdq128 ((__v2di)__A, (__v2di)__B);
2089}
2090
2091static __inline __m128i
2092_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
2093{
2094  return (__m128i)__builtin_ia32_punpcklbw128 ((__v16qi)__A, (__v16qi)__B);
2095}
2096
2097static __inline __m128i
2098_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
2099{
2100  return (__m128i)__builtin_ia32_punpcklwd128 ((__v8hi)__A, (__v8hi)__B);
2101}
2102
2103static __inline __m128i
2104_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
2105{
2106  return (__m128i)__builtin_ia32_punpckldq128 ((__v4si)__A, (__v4si)__B);
2107}
2108
2109static __inline __m128i
2110_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
2111{
2112  return (__m128i)__builtin_ia32_punpcklqdq128 ((__v2di)__A, (__v2di)__B);
2113}
2114
2115static __inline __m128i
2116_mm_add_epi8 (__m128i __A, __m128i __B)
2117{
2118  return (__m128i)__builtin_ia32_paddb128 ((__v16qi)__A, (__v16qi)__B);
2119}
2120
2121static __inline __m128i
2122_mm_add_epi16 (__m128i __A, __m128i __B)
2123{
2124  return (__m128i)__builtin_ia32_paddw128 ((__v8hi)__A, (__v8hi)__B);
2125}
2126
2127static __inline __m128i
2128_mm_add_epi32 (__m128i __A, __m128i __B)
2129{
2130  return (__m128i)__builtin_ia32_paddd128 ((__v4si)__A, (__v4si)__B);
2131}
2132
2133static __inline __m128i
2134_mm_add_epi64 (__m128i __A, __m128i __B)
2135{
2136  return (__m128i)__builtin_ia32_paddq128 ((__v2di)__A, (__v2di)__B);
2137}
2138
2139static __inline __m128i
2140_mm_adds_epi8 (__m128i __A, __m128i __B)
2141{
2142  return (__m128i)__builtin_ia32_paddsb128 ((__v16qi)__A, (__v16qi)__B);
2143}
2144
2145static __inline __m128i
2146_mm_adds_epi16 (__m128i __A, __m128i __B)
2147{
2148  return (__m128i)__builtin_ia32_paddsw128 ((__v8hi)__A, (__v8hi)__B);
2149}
2150
2151static __inline __m128i
2152_mm_adds_epu8 (__m128i __A, __m128i __B)
2153{
2154  return (__m128i)__builtin_ia32_paddusb128 ((__v16qi)__A, (__v16qi)__B);
2155}
2156
2157static __inline __m128i
2158_mm_adds_epu16 (__m128i __A, __m128i __B)
2159{
2160  return (__m128i)__builtin_ia32_paddusw128 ((__v8hi)__A, (__v8hi)__B);
2161}
2162
2163static __inline __m128i
2164_mm_sub_epi8 (__m128i __A, __m128i __B)
2165{
2166  return (__m128i)__builtin_ia32_psubb128 ((__v16qi)__A, (__v16qi)__B);
2167}
2168
2169static __inline __m128i
2170_mm_sub_epi16 (__m128i __A, __m128i __B)
2171{
2172  return (__m128i)__builtin_ia32_psubw128 ((__v8hi)__A, (__v8hi)__B);
2173}
2174
2175static __inline __m128i
2176_mm_sub_epi32 (__m128i __A, __m128i __B)
2177{
2178  return (__m128i)__builtin_ia32_psubd128 ((__v4si)__A, (__v4si)__B);
2179}
2180
2181static __inline __m128i
2182_mm_sub_epi64 (__m128i __A, __m128i __B)
2183{
2184  return (__m128i)__builtin_ia32_psubq128 ((__v2di)__A, (__v2di)__B);
2185}
2186
2187static __inline __m128i
2188_mm_subs_epi8 (__m128i __A, __m128i __B)
2189{
2190  return (__m128i)__builtin_ia32_psubsb128 ((__v16qi)__A, (__v16qi)__B);
2191}
2192
2193static __inline __m128i
2194_mm_subs_epi16 (__m128i __A, __m128i __B)
2195{
2196  return (__m128i)__builtin_ia32_psubsw128 ((__v8hi)__A, (__v8hi)__B);
2197}
2198
2199static __inline __m128i
2200_mm_subs_epu8 (__m128i __A, __m128i __B)
2201{
2202  return (__m128i)__builtin_ia32_psubusb128 ((__v16qi)__A, (__v16qi)__B);
2203}
2204
2205static __inline __m128i
2206_mm_subs_epu16 (__m128i __A, __m128i __B)
2207{
2208  return (__m128i)__builtin_ia32_psubusw128 ((__v8hi)__A, (__v8hi)__B);
2209}
2210
2211static __inline __m128i
2212_mm_madd_epi16 (__m128i __A, __m128i __B)
2213{
2214  return (__m128i)__builtin_ia32_pmaddwd128 ((__v8hi)__A, (__v8hi)__B);
2215}
2216
2217static __inline __m128i
2218_mm_mulhi_epi16 (__m128i __A, __m128i __B)
2219{
2220  return (__m128i)__builtin_ia32_pmulhw128 ((__v8hi)__A, (__v8hi)__B);
2221}
2222
2223static __inline __m128i
2224_mm_mullo_epi16 (__m128i __A, __m128i __B)
2225{
2226  return (__m128i)__builtin_ia32_pmullw128 ((__v8hi)__A, (__v8hi)__B);
2227}
2228
2229static __inline __m64
2230_mm_mul_su32 (__m64 __A, __m64 __B)
2231{
2232  return (__m64)__builtin_ia32_pmuludq ((__v2si)__A, (__v2si)__B);
2233}
2234
2235static __inline __m128i
2236_mm_mul_epu32 (__m128i __A, __m128i __B)
2237{
2238  return (__m128i)__builtin_ia32_pmuludq128 ((__v4si)__A, (__v4si)__B);
2239}
2240
2241static __inline __m128i
2242_mm_sll_epi16 (__m128i __A, __m128i __B)
2243{
2244  return (__m128i)__builtin_ia32_psllw128 ((__v8hi)__A, (__v2di)__B);
2245}
2246
2247static __inline __m128i
2248_mm_sll_epi32 (__m128i __A, __m128i __B)
2249{
2250  return (__m128i)__builtin_ia32_pslld128 ((__v4si)__A, (__v2di)__B);
2251}
2252
2253static __inline __m128i
2254_mm_sll_epi64 (__m128i __A, __m128i __B)
2255{
2256  return (__m128i)__builtin_ia32_psllq128 ((__v2di)__A, (__v2di)__B);
2257}
2258
2259static __inline __m128i
2260_mm_sra_epi16 (__m128i __A, __m128i __B)
2261{
2262  return (__m128i)__builtin_ia32_psraw128 ((__v8hi)__A, (__v2di)__B);
2263}
2264
2265static __inline __m128i
2266_mm_sra_epi32 (__m128i __A, __m128i __B)
2267{
2268  return (__m128i)__builtin_ia32_psrad128 ((__v4si)__A, (__v2di)__B);
2269}
2270
2271static __inline __m128i
2272_mm_srl_epi16 (__m128i __A, __m128i __B)
2273{
2274  return (__m128i)__builtin_ia32_psrlw128 ((__v8hi)__A, (__v2di)__B);
2275}
2276
2277static __inline __m128i
2278_mm_srl_epi32 (__m128i __A, __m128i __B)
2279{
2280  return (__m128i)__builtin_ia32_psrld128 ((__v4si)__A, (__v2di)__B);
2281}
2282
2283static __inline __m128i
2284_mm_srl_epi64 (__m128i __A, __m128i __B)
2285{
2286  return (__m128i)__builtin_ia32_psrlq128 ((__v2di)__A, (__v2di)__B);
2287}
2288
2289static __inline __m128i
2290_mm_slli_epi16 (__m128i __A, int __B)
2291{
2292  return (__m128i)__builtin_ia32_psllwi128 ((__v8hi)__A, __B);
2293}
2294
2295static __inline __m128i
2296_mm_slli_epi32 (__m128i __A, int __B)
2297{
2298  return (__m128i)__builtin_ia32_pslldi128 ((__v4si)__A, __B);
2299}
2300
2301static __inline __m128i
2302_mm_slli_epi64 (__m128i __A, int __B)
2303{
2304  return (__m128i)__builtin_ia32_psllqi128 ((__v2di)__A, __B);
2305}
2306
2307static __inline __m128i
2308_mm_srai_epi16 (__m128i __A, int __B)
2309{
2310  return (__m128i)__builtin_ia32_psrawi128 ((__v8hi)__A, __B);
2311}
2312
2313static __inline __m128i
2314_mm_srai_epi32 (__m128i __A, int __B)
2315{
2316  return (__m128i)__builtin_ia32_psradi128 ((__v4si)__A, __B);
2317}
2318
2319#if 0
2320static __m128i __attribute__((__always_inline__))
2321_mm_srli_si128 (__m128i __A, const int __B)
2322{
2323  return ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
2324}
2325
2326static __m128i __attribute__((__always_inline__))
2327_mm_srli_si128 (__m128i __A, const int __B)
2328{
2329  return ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
2330}
2331#endif
2332#define _mm_srli_si128(__A, __B) ((__m128i)__builtin_ia32_psrldqi128 (__A, __B))
2333#define _mm_slli_si128(__A, __B) ((__m128i)__builtin_ia32_pslldqi128 (__A, __B))
2334
2335static __inline __m128i
2336_mm_srli_epi16 (__m128i __A, int __B)
2337{
2338  return (__m128i)__builtin_ia32_psrlwi128 ((__v8hi)__A, __B);
2339}
2340
2341static __inline __m128i
2342_mm_srli_epi32 (__m128i __A, int __B)
2343{
2344  return (__m128i)__builtin_ia32_psrldi128 ((__v4si)__A, __B);
2345}
2346
2347static __inline __m128i
2348_mm_srli_epi64 (__m128i __A, int __B)
2349{
2350  return (__m128i)__builtin_ia32_psrlqi128 ((__v2di)__A, __B);
2351}
2352
2353static __inline __m128i
2354_mm_and_si128 (__m128i __A, __m128i __B)
2355{
2356  return (__m128i)__builtin_ia32_pand128 ((__v2di)__A, (__v2di)__B);
2357}
2358
2359static __inline __m128i
2360_mm_andnot_si128 (__m128i __A, __m128i __B)
2361{
2362  return (__m128i)__builtin_ia32_pandn128 ((__v2di)__A, (__v2di)__B);
2363}
2364
2365static __inline __m128i
2366_mm_or_si128 (__m128i __A, __m128i __B)
2367{
2368  return (__m128i)__builtin_ia32_por128 ((__v2di)__A, (__v2di)__B);
2369}
2370
2371static __inline __m128i
2372_mm_xor_si128 (__m128i __A, __m128i __B)
2373{
2374  return (__m128i)__builtin_ia32_pxor128 ((__v2di)__A, (__v2di)__B);
2375}
2376
2377static __inline __m128i
2378_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
2379{
2380  return (__m128i)__builtin_ia32_pcmpeqb128 ((__v16qi)__A, (__v16qi)__B);
2381}
2382
2383static __inline __m128i
2384_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
2385{
2386  return (__m128i)__builtin_ia32_pcmpeqw128 ((__v8hi)__A, (__v8hi)__B);
2387}
2388
2389static __inline __m128i
2390_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
2391{
2392  return (__m128i)__builtin_ia32_pcmpeqd128 ((__v4si)__A, (__v4si)__B);
2393}
2394
2395static __inline __m128i
2396_mm_cmplt_epi8 (__m128i __A, __m128i __B)
2397{
2398  return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__B, (__v16qi)__A);
2399}
2400
2401static __inline __m128i
2402_mm_cmplt_epi16 (__m128i __A, __m128i __B)
2403{
2404  return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__B, (__v8hi)__A);
2405}
2406
2407static __inline __m128i
2408_mm_cmplt_epi32 (__m128i __A, __m128i __B)
2409{
2410  return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__B, (__v4si)__A);
2411}
2412
2413static __inline __m128i
2414_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
2415{
2416  return (__m128i)__builtin_ia32_pcmpgtb128 ((__v16qi)__A, (__v16qi)__B);
2417}
2418
2419static __inline __m128i
2420_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
2421{
2422  return (__m128i)__builtin_ia32_pcmpgtw128 ((__v8hi)__A, (__v8hi)__B);
2423}
2424
2425static __inline __m128i
2426_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
2427{
2428  return (__m128i)__builtin_ia32_pcmpgtd128 ((__v4si)__A, (__v4si)__B);
2429}
2430
2431#define _mm_extract_epi16(__A, __B) __builtin_ia32_pextrw128 ((__v8hi)__A, __B)
2432
2433#define _mm_insert_epi16(__A, __B, __C) ((__m128i)__builtin_ia32_pinsrw128 ((__v8hi)__A, __B, __C))
2434
2435static __inline __m128i
2436_mm_max_epi16 (__m128i __A, __m128i __B)
2437{
2438  return (__m128i)__builtin_ia32_pmaxsw128 ((__v8hi)__A, (__v8hi)__B);
2439}
2440
2441static __inline __m128i
2442_mm_max_epu8 (__m128i __A, __m128i __B)
2443{
2444  return (__m128i)__builtin_ia32_pmaxub128 ((__v16qi)__A, (__v16qi)__B);
2445}
2446
2447static __inline __m128i
2448_mm_min_epi16 (__m128i __A, __m128i __B)
2449{
2450  return (__m128i)__builtin_ia32_pminsw128 ((__v8hi)__A, (__v8hi)__B);
2451}
2452
2453static __inline __m128i
2454_mm_min_epu8 (__m128i __A, __m128i __B)
2455{
2456  return (__m128i)__builtin_ia32_pminub128 ((__v16qi)__A, (__v16qi)__B);
2457}
2458
2459static __inline int
2460_mm_movemask_epi8 (__m128i __A)
2461{
2462  return __builtin_ia32_pmovmskb128 ((__v16qi)__A);
2463}
2464
2465static __inline __m128i
2466_mm_mulhi_epu16 (__m128i __A, __m128i __B)
2467{
2468  return (__m128i)__builtin_ia32_pmulhuw128 ((__v8hi)__A, (__v8hi)__B);
2469}
2470
2471#define _mm_shufflehi_epi16(__A, __B) ((__m128i)__builtin_ia32_pshufhw ((__v8hi)__A, __B))
2472#define _mm_shufflelo_epi16(__A, __B) ((__m128i)__builtin_ia32_pshuflw ((__v8hi)__A, __B))
2473#define _mm_shuffle_epi32(__A, __B) ((__m128i)__builtin_ia32_pshufd ((__v4si)__A, __B))
2474
2475static __inline void
2476_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
2477{
2478  __builtin_ia32_maskmovdqu ((__v16qi)__A, (__v16qi)__B, __C);
2479}
2480
2481static __inline __m128i
2482_mm_avg_epu8 (__m128i __A, __m128i __B)
2483{
2484  return (__m128i)__builtin_ia32_pavgb128 ((__v16qi)__A, (__v16qi)__B);
2485}
2486
2487static __inline __m128i
2488_mm_avg_epu16 (__m128i __A, __m128i __B)
2489{
2490  return (__m128i)__builtin_ia32_pavgw128 ((__v8hi)__A, (__v8hi)__B);
2491}
2492
2493static __inline __m128i
2494_mm_sad_epu8 (__m128i __A, __m128i __B)
2495{
2496  return (__m128i)__builtin_ia32_psadbw128 ((__v16qi)__A, (__v16qi)__B);
2497}
2498
2499static __inline void
2500_mm_stream_si32 (int *__A, int __B)
2501{
2502  __builtin_ia32_movnti (__A, __B);
2503}
2504
2505static __inline void
2506_mm_stream_si128 (__m128i *__A, __m128i __B)
2507{
2508  __builtin_ia32_movntdq ((__v2di *)__A, (__v2di)__B);
2509}
2510
2511static __inline void
2512_mm_stream_pd (double *__A, __m128d __B)
2513{
2514  __builtin_ia32_movntpd (__A, (__v2df)__B);
2515}
2516
2517static __inline __m128i
2518_mm_movpi64_epi64 (__m64 __A)
2519{
2520  return (__m128i)__builtin_ia32_movq2dq ((unsigned long long)__A);
2521}
2522
2523static __inline void
2524_mm_clflush (void const *__A)
2525{
2526  return __builtin_ia32_clflush (__A);
2527}
2528
2529static __inline void
2530_mm_lfence (void)
2531{
2532  __builtin_ia32_lfence ();
2533}
2534
2535static __inline void
2536_mm_mfence (void)
2537{
2538  __builtin_ia32_mfence ();
2539}
2540
2541static __inline __m128i
2542_mm_cvtsi32_si128 (int __A)
2543{
2544  return (__m128i) __builtin_ia32_loadd (&__A);
2545}
2546
2547#ifdef __x86_64__
2548static __inline __m128i
2549_mm_cvtsi64x_si128 (long long __A)
2550{
2551  return (__m128i) __builtin_ia32_movq2dq (__A);
2552}
2553#endif
2554
2555static __inline int
2556_mm_cvtsi128_si32 (__m128i __A)
2557{
2558  int __tmp;
2559  __builtin_ia32_stored (&__tmp, (__v4si)__A);
2560  return __tmp;
2561}
2562
2563#ifdef __x86_64__
2564static __inline long long
2565_mm_cvtsi128_si64x (__m128i __A)
2566{
2567  return __builtin_ia32_movdq2q ((__v2di)__A);
2568}
2569#endif
2570
2571#endif /* __SSE2__  */
2572
2573#endif /* __SSE__ */
2574#endif /* _XMMINTRIN_H_INCLUDED */
2575