xmmintrin.h revision 122180
1/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
2
3   This file is part of GNU CC.
4
5   GNU CC is free software; you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation; either version 2, or (at your option)
8   any later version.
9
10   GNU CC is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   You should have received a copy of the GNU General Public License
16   along with GNU CC; see the file COPYING.  If not, write to
17   the Free Software Foundation, 59 Temple Place - Suite 330,
18   Boston, MA 02111-1307, USA.  */
19
20/* As a special exception, if you include this header file into source
21   files compiled by GCC, this header file does not by itself cause
22   the resulting executable to be covered by the GNU General Public
23   License.  This exception does not however invalidate any other
24   reasons why the executable file might be covered by the GNU General
25   Public License.  */
26
27/* Implemented from the specification included in the Intel C++ Compiler
28   User Guide and Reference, version 8.0.  */
29
30#ifndef _XMMINTRIN_H_INCLUDED
31#define _XMMINTRIN_H_INCLUDED
32
33#ifndef __SSE__
34# error "SSE instruction set not enabled"
35#else
36
37/* We need type definitions from the MMX header file.  */
38#include <mmintrin.h>
39
40/* The data type indended for user use.  */
41typedef int __m128 __attribute__ ((__mode__(__V4SF__)));
42
43/* Internal data types for implementing the instrinsics.  */
44typedef int __v4sf __attribute__ ((__mode__(__V4SF__)));
45typedef int __v4si __attribute__ ((__mode__(__V4SI__)));
46
47/* Create a selector for use with the SHUFPS instruction.  */
48#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
49 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
50
51/* Constants for use with _mm_prefetch.  */
52enum _mm_hint
53{
54  _MM_HINT_T0 = 3,
55  _MM_HINT_T1 = 2,
56  _MM_HINT_T2 = 1,
57  _MM_HINT_NTA = 0
58};
59
60/* Bits in the MXCSR.  */
61#define _MM_EXCEPT_MASK       0x003f
62#define _MM_EXCEPT_INVALID    0x0001
63#define _MM_EXCEPT_DENORM     0x0002
64#define _MM_EXCEPT_DIV_ZERO   0x0004
65#define _MM_EXCEPT_OVERFLOW   0x0008
66#define _MM_EXCEPT_UNDERFLOW  0x0010
67#define _MM_EXCEPT_INEXACT    0x0020
68
69#define _MM_MASK_MASK         0x1f80
70#define _MM_MASK_INVALID      0x0080
71#define _MM_MASK_DENORM       0x0100
72#define _MM_MASK_DIV_ZERO     0x0200
73#define _MM_MASK_OVERFLOW     0x0400
74#define _MM_MASK_UNDERFLOW    0x0800
75#define _MM_MASK_INEXACT      0x1000
76
77#define _MM_ROUND_MASK        0x6000
78#define _MM_ROUND_NEAREST     0x0000
79#define _MM_ROUND_DOWN        0x2000
80#define _MM_ROUND_UP          0x4000
81#define _MM_ROUND_TOWARD_ZERO 0x6000
82
83#define _MM_FLUSH_ZERO_MASK   0x8000
84#define _MM_FLUSH_ZERO_ON     0x8000
85#define _MM_FLUSH_ZERO_OFF    0x0000
86
87/* Perform the respective operation on the lower SPFP (single-precision
88   floating-point) values of A and B; the upper three SPFP values are
89   passed through from A.  */
90
91static __inline __m128
92_mm_add_ss (__m128 __A, __m128 __B)
93{
94  return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
95}
96
97static __inline __m128
98_mm_sub_ss (__m128 __A, __m128 __B)
99{
100  return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
101}
102
103static __inline __m128
104_mm_mul_ss (__m128 __A, __m128 __B)
105{
106  return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
107}
108
109static __inline __m128
110_mm_div_ss (__m128 __A, __m128 __B)
111{
112  return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
113}
114
115static __inline __m128
116_mm_sqrt_ss (__m128 __A)
117{
118  return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
119}
120
121static __inline __m128
122_mm_rcp_ss (__m128 __A)
123{
124  return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
125}
126
127static __inline __m128
128_mm_rsqrt_ss (__m128 __A)
129{
130  return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
131}
132
133static __inline __m128
134_mm_min_ss (__m128 __A, __m128 __B)
135{
136  return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
137}
138
139static __inline __m128
140_mm_max_ss (__m128 __A, __m128 __B)
141{
142  return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
143}
144
145/* Perform the respective operation on the four SPFP values in A and B.  */
146
147static __inline __m128
148_mm_add_ps (__m128 __A, __m128 __B)
149{
150  return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
151}
152
153static __inline __m128
154_mm_sub_ps (__m128 __A, __m128 __B)
155{
156  return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
157}
158
159static __inline __m128
160_mm_mul_ps (__m128 __A, __m128 __B)
161{
162  return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
163}
164
165static __inline __m128
166_mm_div_ps (__m128 __A, __m128 __B)
167{
168  return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
169}
170
171static __inline __m128
172_mm_sqrt_ps (__m128 __A)
173{
174  return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
175}
176
177static __inline __m128
178_mm_rcp_ps (__m128 __A)
179{
180  return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
181}
182
183static __inline __m128
184_mm_rsqrt_ps (__m128 __A)
185{
186  return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
187}
188
189static __inline __m128
190_mm_min_ps (__m128 __A, __m128 __B)
191{
192  return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
193}
194
195static __inline __m128
196_mm_max_ps (__m128 __A, __m128 __B)
197{
198  return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
199}
200
201/* Perform logical bit-wise operations on 128-bit values.  */
202
203static __inline __m128
204_mm_and_ps (__m128 __A, __m128 __B)
205{
206  return __builtin_ia32_andps (__A, __B);
207}
208
209static __inline __m128
210_mm_andnot_ps (__m128 __A, __m128 __B)
211{
212  return __builtin_ia32_andnps (__A, __B);
213}
214
215static __inline __m128
216_mm_or_ps (__m128 __A, __m128 __B)
217{
218  return __builtin_ia32_orps (__A, __B);
219}
220
221static __inline __m128
222_mm_xor_ps (__m128 __A, __m128 __B)
223{
224  return __builtin_ia32_xorps (__A, __B);
225}
226
227/* Perform a comparison on the lower SPFP values of A and B.  If the
228   comparison is true, place a mask of all ones in the result, otherwise a
229   mask of zeros.  The upper three SPFP values are passed through from A.  */
230
231static __inline __m128
232_mm_cmpeq_ss (__m128 __A, __m128 __B)
233{
234  return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
235}
236
237static __inline __m128
238_mm_cmplt_ss (__m128 __A, __m128 __B)
239{
240  return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
241}
242
243static __inline __m128
244_mm_cmple_ss (__m128 __A, __m128 __B)
245{
246  return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
247}
248
249static __inline __m128
250_mm_cmpgt_ss (__m128 __A, __m128 __B)
251{
252  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
253					(__v4sf)
254					__builtin_ia32_cmpltss ((__v4sf) __B,
255								(__v4sf)
256								__A));
257}
258
259static __inline __m128
260_mm_cmpge_ss (__m128 __A, __m128 __B)
261{
262  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
263					(__v4sf)
264					__builtin_ia32_cmpless ((__v4sf) __B,
265								(__v4sf)
266								__A));
267}
268
269static __inline __m128
270_mm_cmpneq_ss (__m128 __A, __m128 __B)
271{
272  return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
273}
274
275static __inline __m128
276_mm_cmpnlt_ss (__m128 __A, __m128 __B)
277{
278  return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
279}
280
281static __inline __m128
282_mm_cmpnle_ss (__m128 __A, __m128 __B)
283{
284  return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
285}
286
287static __inline __m128
288_mm_cmpngt_ss (__m128 __A, __m128 __B)
289{
290  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
291					(__v4sf)
292					__builtin_ia32_cmpnltss ((__v4sf) __B,
293								 (__v4sf)
294								 __A));
295}
296
297static __inline __m128
298_mm_cmpnge_ss (__m128 __A, __m128 __B)
299{
300  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
301					(__v4sf)
302					__builtin_ia32_cmpnless ((__v4sf) __B,
303								 (__v4sf)
304								 __A));
305}
306
307static __inline __m128
308_mm_cmpord_ss (__m128 __A, __m128 __B)
309{
310  return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
311}
312
313static __inline __m128
314_mm_cmpunord_ss (__m128 __A, __m128 __B)
315{
316  return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
317}
318
319/* Perform a comparison on the four SPFP values of A and B.  For each
320   element, if the comparison is true, place a mask of all ones in the
321   result, otherwise a mask of zeros.  */
322
323static __inline __m128
324_mm_cmpeq_ps (__m128 __A, __m128 __B)
325{
326  return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
327}
328
329static __inline __m128
330_mm_cmplt_ps (__m128 __A, __m128 __B)
331{
332  return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
333}
334
335static __inline __m128
336_mm_cmple_ps (__m128 __A, __m128 __B)
337{
338  return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
339}
340
341static __inline __m128
342_mm_cmpgt_ps (__m128 __A, __m128 __B)
343{
344  return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
345}
346
347static __inline __m128
348_mm_cmpge_ps (__m128 __A, __m128 __B)
349{
350  return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
351}
352
353static __inline __m128
354_mm_cmpneq_ps (__m128 __A, __m128 __B)
355{
356  return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
357}
358
359static __inline __m128
360_mm_cmpnlt_ps (__m128 __A, __m128 __B)
361{
362  return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
363}
364
365static __inline __m128
366_mm_cmpnle_ps (__m128 __A, __m128 __B)
367{
368  return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
369}
370
371static __inline __m128
372_mm_cmpngt_ps (__m128 __A, __m128 __B)
373{
374  return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
375}
376
377static __inline __m128
378_mm_cmpnge_ps (__m128 __A, __m128 __B)
379{
380  return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
381}
382
383static __inline __m128
384_mm_cmpord_ps (__m128 __A, __m128 __B)
385{
386  return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
387}
388
389static __inline __m128
390_mm_cmpunord_ps (__m128 __A, __m128 __B)
391{
392  return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
393}
394
395/* Compare the lower SPFP values of A and B and return 1 if true
396   and 0 if false.  */
397
398static __inline int
399_mm_comieq_ss (__m128 __A, __m128 __B)
400{
401  return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
402}
403
404static __inline int
405_mm_comilt_ss (__m128 __A, __m128 __B)
406{
407  return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
408}
409
410static __inline int
411_mm_comile_ss (__m128 __A, __m128 __B)
412{
413  return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
414}
415
416static __inline int
417_mm_comigt_ss (__m128 __A, __m128 __B)
418{
419  return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
420}
421
422static __inline int
423_mm_comige_ss (__m128 __A, __m128 __B)
424{
425  return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
426}
427
428static __inline int
429_mm_comineq_ss (__m128 __A, __m128 __B)
430{
431  return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
432}
433
434static __inline int
435_mm_ucomieq_ss (__m128 __A, __m128 __B)
436{
437  return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
438}
439
440static __inline int
441_mm_ucomilt_ss (__m128 __A, __m128 __B)
442{
443  return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
444}
445
446static __inline int
447_mm_ucomile_ss (__m128 __A, __m128 __B)
448{
449  return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
450}
451
452static __inline int
453_mm_ucomigt_ss (__m128 __A, __m128 __B)
454{
455  return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
456}
457
458static __inline int
459_mm_ucomige_ss (__m128 __A, __m128 __B)
460{
461  return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
462}
463
464static __inline int
465_mm_ucomineq_ss (__m128 __A, __m128 __B)
466{
467  return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
468}
469
470/* Convert the lower SPFP value to a 32-bit integer according to the current
471   rounding mode.  */
472static __inline int
473_mm_cvtss_si32 (__m128 __A)
474{
475  return __builtin_ia32_cvtss2si ((__v4sf) __A);
476}
477
478static __inline int
479_mm_cvt_ss2si (__m128 __A)
480{
481  return _mm_cvtss_si32 (__A);
482}
483
484#ifdef __x86_64__
485/* Convert the lower SPFP value to a 32-bit integer according to the current
486   rounding mode.  */
487static __inline long long
488_mm_cvtss_si64x (__m128 __A)
489{
490  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
491}
492#endif
493
494/* Convert the two lower SPFP values to 32-bit integers according to the
495   current rounding mode.  Return the integers in packed form.  */
496static __inline __m64
497_mm_cvtps_pi32 (__m128 __A)
498{
499  return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
500}
501
502static __inline __m64
503_mm_cvt_ps2pi (__m128 __A)
504{
505  return _mm_cvtps_pi32 (__A);
506}
507
508/* Truncate the lower SPFP value to a 32-bit integer.  */
509static __inline int
510_mm_cvttss_si32 (__m128 __A)
511{
512  return __builtin_ia32_cvttss2si ((__v4sf) __A);
513}
514
515static __inline int
516_mm_cvtt_ss2si (__m128 __A)
517{
518  return _mm_cvttss_si32 (__A);
519}
520
521#ifdef __x86_64__
522/* Truncate the lower SPFP value to a 32-bit integer.  */
523static __inline long long
524_mm_cvttss_si64x (__m128 __A)
525{
526  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
527}
528#endif
529
530/* Truncate the two lower SPFP values to 32-bit integers.  Return the
531   integers in packed form.  */
532static __inline __m64
533_mm_cvttps_pi32 (__m128 __A)
534{
535  return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
536}
537
538static __inline __m64
539_mm_cvtt_ps2pi (__m128 __A)
540{
541  return _mm_cvttps_pi32 (__A);
542}
543
544/* Convert B to a SPFP value and insert it as element zero in A.  */
545static __inline __m128
546_mm_cvtsi32_ss (__m128 __A, int __B)
547{
548  return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
549}
550
551static __inline __m128
552_mm_cvt_si2ss (__m128 __A, int __B)
553{
554  return _mm_cvtsi32_ss (__A, __B);
555}
556
557#ifdef __x86_64__
558/* Convert B to a SPFP value and insert it as element zero in A.  */
559static __inline __m128
560_mm_cvtsi64x_ss (__m128 __A, long long __B)
561{
562  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
563}
564#endif
565
566/* Convert the two 32-bit values in B to SPFP form and insert them
567   as the two lower elements in A.  */
568static __inline __m128
569_mm_cvtpi32_ps (__m128 __A, __m64 __B)
570{
571  return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
572}
573
574static __inline __m128
575_mm_cvt_pi2ps (__m128 __A, __m64 __B)
576{
577  return _mm_cvtpi32_ps (__A, __B);
578}
579
580/* Convert the four signed 16-bit values in A to SPFP form.  */
581static __inline __m128
582_mm_cvtpi16_ps (__m64 __A)
583{
584  __v4hi __sign;
585  __v2si __hisi, __losi;
586  __v4sf __r;
587
588  /* This comparison against zero gives us a mask that can be used to
589     fill in the missing sign bits in the unpack operations below, so
590     that we get signed values after unpacking.  */
591  __sign = (__v4hi) __builtin_ia32_mmx_zero ();
592  __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A);
593
594  /* Convert the four words to doublewords.  */
595  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
596  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
597
598  /* Convert the doublewords to floating point two at a time.  */
599  __r = (__v4sf) __builtin_ia32_setzerops ();
600  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
601  __r = __builtin_ia32_movlhps (__r, __r);
602  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
603
604  return (__m128) __r;
605}
606
607/* Convert the four unsigned 16-bit values in A to SPFP form.  */
608static __inline __m128
609_mm_cvtpu16_ps (__m64 __A)
610{
611  __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
612  __v2si __hisi, __losi;
613  __v4sf __r;
614
615  /* Convert the four words to doublewords.  */
616  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero);
617  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero);
618
619  /* Convert the doublewords to floating point two at a time.  */
620  __r = (__v4sf) __builtin_ia32_setzerops ();
621  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
622  __r = __builtin_ia32_movlhps (__r, __r);
623  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
624
625  return (__m128) __r;
626}
627
628/* Convert the low four signed 8-bit values in A to SPFP form.  */
629static __inline __m128
630_mm_cvtpi8_ps (__m64 __A)
631{
632  __v8qi __sign;
633
634  /* This comparison against zero gives us a mask that can be used to
635     fill in the missing sign bits in the unpack operations below, so
636     that we get signed values after unpacking.  */
637  __sign = (__v8qi) __builtin_ia32_mmx_zero ();
638  __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A);
639
640  /* Convert the four low bytes to words.  */
641  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
642
643  return _mm_cvtpi16_ps(__A);
644}
645
646/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
647static __inline __m128
648_mm_cvtpu8_ps(__m64 __A)
649{
650  __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero ();
651  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero);
652  return _mm_cvtpu16_ps(__A);
653}
654
655/* Convert the four signed 32-bit values in A and B to SPFP form.  */
656static __inline __m128
657_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
658{
659  __v4sf __zero = (__v4sf) __builtin_ia32_setzerops ();
660  __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
661  __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
662  return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
663}
664
665/* Convert the four SPFP values in A to four signed 16-bit integers.  */
666static __inline __m64
667_mm_cvtps_pi16(__m128 __A)
668{
669  __v4sf __hisf = (__v4sf)__A;
670  __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
671  __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
672  __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
673  return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
674}
675
676/* Convert the four SPFP values in A to four signed 8-bit integers.  */
677static __inline __m64
678_mm_cvtps_pi8(__m128 __A)
679{
680  __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
681  __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero ();
682  return (__m64) __builtin_ia32_packsswb (__tmp, __zero);
683}
684
685/* Selects four specific SPFP values from A and B based on MASK.  */
686#if 0
687static __inline __m128
688_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
689{
690  return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
691}
692#else
693#define _mm_shuffle_ps(A, B, MASK) \
694 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
695#endif
696
697
698/* Selects and interleaves the upper two SPFP values from A and B.  */
699static __inline __m128
700_mm_unpackhi_ps (__m128 __A, __m128 __B)
701{
702  return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
703}
704
705/* Selects and interleaves the lower two SPFP values from A and B.  */
706static __inline __m128
707_mm_unpacklo_ps (__m128 __A, __m128 __B)
708{
709  return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
710}
711
712/* Sets the upper two SPFP values with 64-bits of data loaded from P;
713   the lower two values are passed through from A.  */
714static __inline __m128
715_mm_loadh_pi (__m128 __A, __m64 const *__P)
716{
717  return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
718}
719
720/* Stores the upper two SPFP values of A into P.  */
721static __inline void
722_mm_storeh_pi (__m64 *__P, __m128 __A)
723{
724  __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
725}
726
727/* Moves the upper two values of B into the lower two values of A.  */
728static __inline __m128
729_mm_movehl_ps (__m128 __A, __m128 __B)
730{
731  return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
732}
733
734/* Moves the lower two values of B into the upper two values of A.  */
735static __inline __m128
736_mm_movelh_ps (__m128 __A, __m128 __B)
737{
738  return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
739}
740
741/* Sets the lower two SPFP values with 64-bits of data loaded from P;
742   the upper two values are passed through from A.  */
743static __inline __m128
744_mm_loadl_pi (__m128 __A, __m64 const *__P)
745{
746  return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
747}
748
749/* Stores the lower two SPFP values of A into P.  */
750static __inline void
751_mm_storel_pi (__m64 *__P, __m128 __A)
752{
753  __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
754}
755
756/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
757static __inline int
758_mm_movemask_ps (__m128 __A)
759{
760  return __builtin_ia32_movmskps ((__v4sf)__A);
761}
762
763/* Return the contents of the control register.  */
764static __inline unsigned int
765_mm_getcsr (void)
766{
767  return __builtin_ia32_stmxcsr ();
768}
769
770/* Read exception bits from the control register.  */
771static __inline unsigned int
772_MM_GET_EXCEPTION_STATE (void)
773{
774  return _mm_getcsr() & _MM_EXCEPT_MASK;
775}
776
777static __inline unsigned int
778_MM_GET_EXCEPTION_MASK (void)
779{
780  return _mm_getcsr() & _MM_MASK_MASK;
781}
782
783static __inline unsigned int
784_MM_GET_ROUNDING_MODE (void)
785{
786  return _mm_getcsr() & _MM_ROUND_MASK;
787}
788
789static __inline unsigned int
790_MM_GET_FLUSH_ZERO_MODE (void)
791{
792  return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
793}
794
795/* Set the control register to I.  */
796static __inline void
797_mm_setcsr (unsigned int __I)
798{
799  __builtin_ia32_ldmxcsr (__I);
800}
801
802/* Set exception bits in the control register.  */
803static __inline void
804_MM_SET_EXCEPTION_STATE(unsigned int __mask)
805{
806  _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
807}
808
809static __inline void
810_MM_SET_EXCEPTION_MASK (unsigned int __mask)
811{
812  _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
813}
814
815static __inline void
816_MM_SET_ROUNDING_MODE (unsigned int __mode)
817{
818  _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
819}
820
821static __inline void
822_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
823{
824  _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
825}
826
827/* Create a vector with element 0 as *P and the rest zero.  */
828static __inline __m128
829_mm_load_ss (float const *__P)
830{
831  return (__m128) __builtin_ia32_loadss (__P);
832}
833
834/* Create a vector with all four elements equal to *P.  */
835static __inline __m128
836_mm_load1_ps (float const *__P)
837{
838  __v4sf __tmp = __builtin_ia32_loadss (__P);
839  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
840}
841
842static __inline __m128
843_mm_load_ps1 (float const *__P)
844{
845  return _mm_load1_ps (__P);
846}
847
848/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
849static __inline __m128
850_mm_load_ps (float const *__P)
851{
852  return (__m128) __builtin_ia32_loadaps (__P);
853}
854
855/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
856static __inline __m128
857_mm_loadu_ps (float const *__P)
858{
859  return (__m128) __builtin_ia32_loadups (__P);
860}
861
862/* Load four SPFP values in reverse order.  The address must be aligned.  */
863static __inline __m128
864_mm_loadr_ps (float const *__P)
865{
866  __v4sf __tmp = __builtin_ia32_loadaps (__P);
867  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
868}
869
870/* Create a vector with element 0 as F and the rest zero.  */
871static __inline __m128
872_mm_set_ss (float __F)
873{
874  return (__m128) __builtin_ia32_loadss (&__F);
875}
876
877/* Create a vector with all four elements equal to F.  */
878static __inline __m128
879_mm_set1_ps (float __F)
880{
881  __v4sf __tmp = __builtin_ia32_loadss (&__F);
882  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0));
883}
884
885static __inline __m128
886_mm_set_ps1 (float __F)
887{
888  return _mm_set1_ps (__F);
889}
890
891/* Create the vector [Z Y X W].  */
892static __inline __m128
893_mm_set_ps (float __Z, float __Y, float __X, float __W)
894{
895  union {
896    float __a[4];
897    __m128 __v;
898  } __u;
899
900  __u.__a[0] = __W;
901  __u.__a[1] = __X;
902  __u.__a[2] = __Y;
903  __u.__a[3] = __Z;
904
905  return __u.__v;
906}
907
908/* Create the vector [W X Y Z].  */
909static __inline __m128
910_mm_setr_ps (float __Z, float __Y, float __X, float __W)
911{
912  return _mm_set_ps (__W, __X, __Y, __Z);
913}
914
915/* Create a vector of zeros.  */
916static __inline __m128
917_mm_setzero_ps (void)
918{
919  return (__m128) __builtin_ia32_setzerops ();
920}
921
922/* Stores the lower SPFP value.  */
923static __inline void
924_mm_store_ss (float *__P, __m128 __A)
925{
926  __builtin_ia32_storess (__P, (__v4sf)__A);
927}
928
929/* Store the lower SPFP value across four words.  */
930static __inline void
931_mm_store1_ps (float *__P, __m128 __A)
932{
933  __v4sf __va = (__v4sf)__A;
934  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
935  __builtin_ia32_storeaps (__P, __tmp);
936}
937
938static __inline void
939_mm_store_ps1 (float *__P, __m128 __A)
940{
941  _mm_store1_ps (__P, __A);
942}
943
944/* Store four SPFP values.  The address must be 16-byte aligned.  */
945static __inline void
946_mm_store_ps (float *__P, __m128 __A)
947{
948  __builtin_ia32_storeaps (__P, (__v4sf)__A);
949}
950
951/* Store four SPFP values.  The address need not be 16-byte aligned.  */
952static __inline void
953_mm_storeu_ps (float *__P, __m128 __A)
954{
955  __builtin_ia32_storeups (__P, (__v4sf)__A);
956}
957
958/* Store four SPFP values in reverse order.  The address must be aligned.  */
959static __inline void
960_mm_storer_ps (float *__P, __m128 __A)
961{
962  __v4sf __va = (__v4sf)__A;
963  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
964  __builtin_ia32_storeaps (__P, __tmp);
965}
966
967/* Sets the low SPFP value of A from the low value of B.  */
968static __inline __m128
969_mm_move_ss (__m128 __A, __m128 __B)
970{
971  return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
972}
973
974/* Extracts one of the four words of A.  The selector N must be immediate.  */
975#if 0
976static __inline int
977_mm_extract_pi16 (__m64 __A, int __N)
978{
979  return __builtin_ia32_pextrw ((__v4hi)__A, __N);
980}
981
982static __inline int
983_m_pextrw (__m64 __A, int __N)
984{
985  return _mm_extract_pi16 (__A, __N);
986}
987#else
988#define _mm_extract_pi16(A, N) \
989  __builtin_ia32_pextrw ((__v4hi)(A), (N))
990#define _m_pextrw(A, N)		_mm_extract_pi16((A), (N))
991#endif
992
993/* Inserts word D into one of four words of A.  The selector N must be
994   immediate.  */
995#if 0
996static __inline __m64
997_mm_insert_pi16 (__m64 __A, int __D, int __N)
998{
999  return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N);
1000}
1001
1002static __inline __m64
1003_m_pinsrw (__m64 __A, int __D, int __N)
1004{
1005  return _mm_insert_pi16 (__A, __D, __N);
1006}
1007#else
1008#define _mm_insert_pi16(A, D, N) \
1009  ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N)))
1010#define _m_pinsrw(A, D, N)	 _mm_insert_pi16((A), (D), (N))
1011#endif
1012
1013/* Compute the element-wise maximum of signed 16-bit values.  */
1014static __inline __m64
1015_mm_max_pi16 (__m64 __A, __m64 __B)
1016{
1017  return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1018}
1019
1020static __inline __m64
1021_m_pmaxsw (__m64 __A, __m64 __B)
1022{
1023  return _mm_max_pi16 (__A, __B);
1024}
1025
1026/* Compute the element-wise maximum of unsigned 8-bit values.  */
1027static __inline __m64
1028_mm_max_pu8 (__m64 __A, __m64 __B)
1029{
1030  return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1031}
1032
1033static __inline __m64
1034_m_pmaxub (__m64 __A, __m64 __B)
1035{
1036  return _mm_max_pu8 (__A, __B);
1037}
1038
1039/* Compute the element-wise minimum of signed 16-bit values.  */
1040static __inline __m64
1041_mm_min_pi16 (__m64 __A, __m64 __B)
1042{
1043  return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1044}
1045
1046static __inline __m64
1047_m_pminsw (__m64 __A, __m64 __B)
1048{
1049  return _mm_min_pi16 (__A, __B);
1050}
1051
1052/* Compute the element-wise minimum of unsigned 8-bit values.  */
1053static __inline __m64
1054_mm_min_pu8 (__m64 __A, __m64 __B)
1055{
1056  return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1057}
1058
1059static __inline __m64
1060_m_pminub (__m64 __A, __m64 __B)
1061{
1062  return _mm_min_pu8 (__A, __B);
1063}
1064
1065/* Create an 8-bit mask of the signs of 8-bit values.  */
1066static __inline int
1067_mm_movemask_pi8 (__m64 __A)
1068{
1069  return __builtin_ia32_pmovmskb ((__v8qi)__A);
1070}
1071
1072static __inline int
1073_m_pmovmskb (__m64 __A)
1074{
1075  return _mm_movemask_pi8 (__A);
1076}
1077
1078/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1079   in B and produce the high 16 bits of the 32-bit results.  */
1080static __inline __m64
1081_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1082{
1083  return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1084}
1085
1086static __inline __m64
1087_m_pmulhuw (__m64 __A, __m64 __B)
1088{
1089  return _mm_mulhi_pu16 (__A, __B);
1090}
1091
1092/* Return a combination of the four 16-bit values in A.  The selector
1093   must be an immediate.  */
1094#if 0
1095static __inline __m64
1096_mm_shuffle_pi16 (__m64 __A, int __N)
1097{
1098  return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1099}
1100
1101static __inline __m64
1102_m_pshufw (__m64 __A, int __N)
1103{
1104  return _mm_shuffle_pi16 (__A, __N);
1105}
1106#else
1107#define _mm_shuffle_pi16(A, N) \
1108  ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1109#define _m_pshufw(A, N)		_mm_shuffle_pi16 ((A), (N))
1110#endif
1111
1112/* Conditionally store byte elements of A into P.  The high bit of each
1113   byte in the selector N determines whether the corresponding byte from
1114   A is stored.  */
1115static __inline void
1116_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1117{
1118  __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1119}
1120
1121static __inline void
1122_m_maskmovq (__m64 __A, __m64 __N, char *__P)
1123{
1124  _mm_maskmove_si64 (__A, __N, __P);
1125}
1126
1127/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
1128static __inline __m64
1129_mm_avg_pu8 (__m64 __A, __m64 __B)
1130{
1131  return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1132}
1133
1134static __inline __m64
1135_m_pavgb (__m64 __A, __m64 __B)
1136{
1137  return _mm_avg_pu8 (__A, __B);
1138}
1139
1140/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
1141static __inline __m64
1142_mm_avg_pu16 (__m64 __A, __m64 __B)
1143{
1144  return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1145}
1146
1147static __inline __m64
1148_m_pavgw (__m64 __A, __m64 __B)
1149{
1150  return _mm_avg_pu16 (__A, __B);
1151}
1152
1153/* Compute the sum of the absolute differences of the unsigned 8-bit
1154   values in A and B.  Return the value in the lower 16-bit word; the
1155   upper words are cleared.  */
1156static __inline __m64
1157_mm_sad_pu8 (__m64 __A, __m64 __B)
1158{
1159  return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1160}
1161
1162static __inline __m64
1163_m_psadbw (__m64 __A, __m64 __B)
1164{
1165  return _mm_sad_pu8 (__A, __B);
1166}
1167
1168/* Loads one cache line from address P to a location "closer" to the
1169   processor.  The selector I specifies the type of prefetch operation.  */
1170#if 0
1171static __inline void
1172_mm_prefetch (void *__P, enum _mm_hint __I)
1173{
1174  __builtin_prefetch (__P, 0, __I);
1175}
1176#else
1177#define _mm_prefetch(P, I) \
1178  __builtin_prefetch ((P), 0, (I))
1179#endif
1180
1181/* Stores the data in A to the address P without polluting the caches.  */
1182static __inline void
1183_mm_stream_pi (__m64 *__P, __m64 __A)
1184{
1185  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1186}
1187
1188/* Likewise.  The address must be 16-byte aligned.  */
1189static __inline void
1190_mm_stream_ps (float *__P, __m128 __A)
1191{
1192  __builtin_ia32_movntps (__P, (__v4sf)__A);
1193}
1194
1195/* Guarantees that every preceeding store is globally visible before
1196   any subsequent store.  */
1197static __inline void
1198_mm_sfence (void)
1199{
1200  __builtin_ia32_sfence ();
1201}
1202
1203/* The execution of the next instruction is delayed by an implementation
1204   specific amount of time.  The instruction does not modify the
1205   architectural state.  */
1206static __inline void
1207_mm_pause (void)
1208{
1209  __asm__ __volatile__ ("rep; nop" : : );
1210}
1211
1212/* Transpose the 4x4 matrix composed of row[0-3].  */
1213#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
1214do {									\
1215  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
1216  __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44);		\
1217  __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE);		\
1218  __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44);		\
1219  __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE);		\
1220  (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88);			\
1221  (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD);			\
1222  (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88);			\
1223  (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD);			\
1224} while (0)
1225
1226/* For backward source compatibility.  */
1227#include <emmintrin.h>
1228
1229#endif /* __SSE__ */
1230#endif /* _XMMINTRIN_H_INCLUDED */
1231