xmmintrin.h revision 169689
1/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
2   Free Software Foundation, Inc.
3
4   This file is part of GCC.
5
6   GCC is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 2, or (at your option)
9   any later version.
10
11   GCC is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with GCC; see the file COPYING.  If not, write to
18   the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19   Boston, MA 02110-1301, USA.  */
20
21/* As a special exception, if you include this header file into source
22   files compiled by GCC, this header file does not by itself cause
23   the resulting executable to be covered by the GNU General Public
24   License.  This exception does not however invalidate any other
25   reasons why the executable file might be covered by the GNU General
26   Public License.  */
27
28/* Implemented from the specification included in the Intel C++ Compiler
29   User Guide and Reference, version 9.0.  */
30
31#ifndef _XMMINTRIN_H_INCLUDED
32#define _XMMINTRIN_H_INCLUDED
33
34#ifndef __SSE__
35# error "SSE instruction set not enabled"
36#else
37
38/* We need type definitions from the MMX header file.  */
39#include <mmintrin.h>
40
41/* Get _mm_malloc () and _mm_free ().  */
42#include <mm_malloc.h>
43
44/* The Intel API is flexible enough that we must allow aliasing with other
45   vector types, and their scalar components.  */
46typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
47
48/* Internal data types for implementing the intrinsics.  */
49typedef float __v4sf __attribute__ ((__vector_size__ (16)));
50
51/* Create a selector for use with the SHUFPS instruction.  */
52#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
53 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
54
55/* Constants for use with _mm_prefetch.  */
56enum _mm_hint
57{
58  _MM_HINT_T0 = 3,
59  _MM_HINT_T1 = 2,
60  _MM_HINT_T2 = 1,
61  _MM_HINT_NTA = 0
62};
63
64/* Bits in the MXCSR.  */
65#define _MM_EXCEPT_MASK       0x003f
66#define _MM_EXCEPT_INVALID    0x0001
67#define _MM_EXCEPT_DENORM     0x0002
68#define _MM_EXCEPT_DIV_ZERO   0x0004
69#define _MM_EXCEPT_OVERFLOW   0x0008
70#define _MM_EXCEPT_UNDERFLOW  0x0010
71#define _MM_EXCEPT_INEXACT    0x0020
72
73#define _MM_MASK_MASK         0x1f80
74#define _MM_MASK_INVALID      0x0080
75#define _MM_MASK_DENORM       0x0100
76#define _MM_MASK_DIV_ZERO     0x0200
77#define _MM_MASK_OVERFLOW     0x0400
78#define _MM_MASK_UNDERFLOW    0x0800
79#define _MM_MASK_INEXACT      0x1000
80
81#define _MM_ROUND_MASK        0x6000
82#define _MM_ROUND_NEAREST     0x0000
83#define _MM_ROUND_DOWN        0x2000
84#define _MM_ROUND_UP          0x4000
85#define _MM_ROUND_TOWARD_ZERO 0x6000
86
87#define _MM_FLUSH_ZERO_MASK   0x8000
88#define _MM_FLUSH_ZERO_ON     0x8000
89#define _MM_FLUSH_ZERO_OFF    0x0000
90
91/* Create a vector of zeros.  */
92static __inline __m128 __attribute__((__always_inline__))
93_mm_setzero_ps (void)
94{
95  return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
96}
97
98/* Perform the respective operation on the lower SPFP (single-precision
99   floating-point) values of A and B; the upper three SPFP values are
100   passed through from A.  */
101
102static __inline __m128 __attribute__((__always_inline__))
103_mm_add_ss (__m128 __A, __m128 __B)
104{
105  return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
106}
107
108static __inline __m128 __attribute__((__always_inline__))
109_mm_sub_ss (__m128 __A, __m128 __B)
110{
111  return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
112}
113
114static __inline __m128 __attribute__((__always_inline__))
115_mm_mul_ss (__m128 __A, __m128 __B)
116{
117  return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
118}
119
120static __inline __m128 __attribute__((__always_inline__))
121_mm_div_ss (__m128 __A, __m128 __B)
122{
123  return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
124}
125
126static __inline __m128 __attribute__((__always_inline__))
127_mm_sqrt_ss (__m128 __A)
128{
129  return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
130}
131
132static __inline __m128 __attribute__((__always_inline__))
133_mm_rcp_ss (__m128 __A)
134{
135  return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
136}
137
138static __inline __m128 __attribute__((__always_inline__))
139_mm_rsqrt_ss (__m128 __A)
140{
141  return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
142}
143
144static __inline __m128 __attribute__((__always_inline__))
145_mm_min_ss (__m128 __A, __m128 __B)
146{
147  return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
148}
149
150static __inline __m128 __attribute__((__always_inline__))
151_mm_max_ss (__m128 __A, __m128 __B)
152{
153  return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
154}
155
156/* Perform the respective operation on the four SPFP values in A and B.  */
157
158static __inline __m128 __attribute__((__always_inline__))
159_mm_add_ps (__m128 __A, __m128 __B)
160{
161  return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
162}
163
164static __inline __m128 __attribute__((__always_inline__))
165_mm_sub_ps (__m128 __A, __m128 __B)
166{
167  return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
168}
169
170static __inline __m128 __attribute__((__always_inline__))
171_mm_mul_ps (__m128 __A, __m128 __B)
172{
173  return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
174}
175
176static __inline __m128 __attribute__((__always_inline__))
177_mm_div_ps (__m128 __A, __m128 __B)
178{
179  return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
180}
181
182static __inline __m128 __attribute__((__always_inline__))
183_mm_sqrt_ps (__m128 __A)
184{
185  return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
186}
187
188static __inline __m128 __attribute__((__always_inline__))
189_mm_rcp_ps (__m128 __A)
190{
191  return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
192}
193
194static __inline __m128 __attribute__((__always_inline__))
195_mm_rsqrt_ps (__m128 __A)
196{
197  return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
198}
199
200static __inline __m128 __attribute__((__always_inline__))
201_mm_min_ps (__m128 __A, __m128 __B)
202{
203  return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
204}
205
206static __inline __m128 __attribute__((__always_inline__))
207_mm_max_ps (__m128 __A, __m128 __B)
208{
209  return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
210}
211
212/* Perform logical bit-wise operations on 128-bit values.  */
213
214static __inline __m128 __attribute__((__always_inline__))
215_mm_and_ps (__m128 __A, __m128 __B)
216{
217  return __builtin_ia32_andps (__A, __B);
218}
219
220static __inline __m128 __attribute__((__always_inline__))
221_mm_andnot_ps (__m128 __A, __m128 __B)
222{
223  return __builtin_ia32_andnps (__A, __B);
224}
225
226static __inline __m128 __attribute__((__always_inline__))
227_mm_or_ps (__m128 __A, __m128 __B)
228{
229  return __builtin_ia32_orps (__A, __B);
230}
231
232static __inline __m128 __attribute__((__always_inline__))
233_mm_xor_ps (__m128 __A, __m128 __B)
234{
235  return __builtin_ia32_xorps (__A, __B);
236}
237
238/* Perform a comparison on the lower SPFP values of A and B.  If the
239   comparison is true, place a mask of all ones in the result, otherwise a
240   mask of zeros.  The upper three SPFP values are passed through from A.  */
241
242static __inline __m128 __attribute__((__always_inline__))
243_mm_cmpeq_ss (__m128 __A, __m128 __B)
244{
245  return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
246}
247
248static __inline __m128 __attribute__((__always_inline__))
249_mm_cmplt_ss (__m128 __A, __m128 __B)
250{
251  return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
252}
253
254static __inline __m128 __attribute__((__always_inline__))
255_mm_cmple_ss (__m128 __A, __m128 __B)
256{
257  return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
258}
259
260static __inline __m128 __attribute__((__always_inline__))
261_mm_cmpgt_ss (__m128 __A, __m128 __B)
262{
263  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
264					(__v4sf)
265					__builtin_ia32_cmpltss ((__v4sf) __B,
266								(__v4sf)
267								__A));
268}
269
270static __inline __m128 __attribute__((__always_inline__))
271_mm_cmpge_ss (__m128 __A, __m128 __B)
272{
273  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
274					(__v4sf)
275					__builtin_ia32_cmpless ((__v4sf) __B,
276								(__v4sf)
277								__A));
278}
279
280static __inline __m128 __attribute__((__always_inline__))
281_mm_cmpneq_ss (__m128 __A, __m128 __B)
282{
283  return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
284}
285
286static __inline __m128 __attribute__((__always_inline__))
287_mm_cmpnlt_ss (__m128 __A, __m128 __B)
288{
289  return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
290}
291
292static __inline __m128 __attribute__((__always_inline__))
293_mm_cmpnle_ss (__m128 __A, __m128 __B)
294{
295  return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
296}
297
298static __inline __m128 __attribute__((__always_inline__))
299_mm_cmpngt_ss (__m128 __A, __m128 __B)
300{
301  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
302					(__v4sf)
303					__builtin_ia32_cmpnltss ((__v4sf) __B,
304								 (__v4sf)
305								 __A));
306}
307
308static __inline __m128 __attribute__((__always_inline__))
309_mm_cmpnge_ss (__m128 __A, __m128 __B)
310{
311  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
312					(__v4sf)
313					__builtin_ia32_cmpnless ((__v4sf) __B,
314								 (__v4sf)
315								 __A));
316}
317
318static __inline __m128 __attribute__((__always_inline__))
319_mm_cmpord_ss (__m128 __A, __m128 __B)
320{
321  return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
322}
323
324static __inline __m128 __attribute__((__always_inline__))
325_mm_cmpunord_ss (__m128 __A, __m128 __B)
326{
327  return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
328}
329
330/* Perform a comparison on the four SPFP values of A and B.  For each
331   element, if the comparison is true, place a mask of all ones in the
332   result, otherwise a mask of zeros.  */
333
334static __inline __m128 __attribute__((__always_inline__))
335_mm_cmpeq_ps (__m128 __A, __m128 __B)
336{
337  return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
338}
339
340static __inline __m128 __attribute__((__always_inline__))
341_mm_cmplt_ps (__m128 __A, __m128 __B)
342{
343  return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
344}
345
346static __inline __m128 __attribute__((__always_inline__))
347_mm_cmple_ps (__m128 __A, __m128 __B)
348{
349  return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
350}
351
352static __inline __m128 __attribute__((__always_inline__))
353_mm_cmpgt_ps (__m128 __A, __m128 __B)
354{
355  return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
356}
357
358static __inline __m128 __attribute__((__always_inline__))
359_mm_cmpge_ps (__m128 __A, __m128 __B)
360{
361  return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
362}
363
364static __inline __m128 __attribute__((__always_inline__))
365_mm_cmpneq_ps (__m128 __A, __m128 __B)
366{
367  return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
368}
369
370static __inline __m128 __attribute__((__always_inline__))
371_mm_cmpnlt_ps (__m128 __A, __m128 __B)
372{
373  return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
374}
375
376static __inline __m128 __attribute__((__always_inline__))
377_mm_cmpnle_ps (__m128 __A, __m128 __B)
378{
379  return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
380}
381
382static __inline __m128 __attribute__((__always_inline__))
383_mm_cmpngt_ps (__m128 __A, __m128 __B)
384{
385  return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
386}
387
388static __inline __m128 __attribute__((__always_inline__))
389_mm_cmpnge_ps (__m128 __A, __m128 __B)
390{
391  return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
392}
393
394static __inline __m128 __attribute__((__always_inline__))
395_mm_cmpord_ps (__m128 __A, __m128 __B)
396{
397  return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
398}
399
400static __inline __m128 __attribute__((__always_inline__))
401_mm_cmpunord_ps (__m128 __A, __m128 __B)
402{
403  return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
404}
405
406/* Compare the lower SPFP values of A and B and return 1 if true
407   and 0 if false.  */
408
409static __inline int __attribute__((__always_inline__))
410_mm_comieq_ss (__m128 __A, __m128 __B)
411{
412  return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
413}
414
415static __inline int __attribute__((__always_inline__))
416_mm_comilt_ss (__m128 __A, __m128 __B)
417{
418  return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
419}
420
421static __inline int __attribute__((__always_inline__))
422_mm_comile_ss (__m128 __A, __m128 __B)
423{
424  return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
425}
426
427static __inline int __attribute__((__always_inline__))
428_mm_comigt_ss (__m128 __A, __m128 __B)
429{
430  return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
431}
432
433static __inline int __attribute__((__always_inline__))
434_mm_comige_ss (__m128 __A, __m128 __B)
435{
436  return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
437}
438
439static __inline int __attribute__((__always_inline__))
440_mm_comineq_ss (__m128 __A, __m128 __B)
441{
442  return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
443}
444
445static __inline int __attribute__((__always_inline__))
446_mm_ucomieq_ss (__m128 __A, __m128 __B)
447{
448  return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
449}
450
451static __inline int __attribute__((__always_inline__))
452_mm_ucomilt_ss (__m128 __A, __m128 __B)
453{
454  return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
455}
456
457static __inline int __attribute__((__always_inline__))
458_mm_ucomile_ss (__m128 __A, __m128 __B)
459{
460  return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
461}
462
463static __inline int __attribute__((__always_inline__))
464_mm_ucomigt_ss (__m128 __A, __m128 __B)
465{
466  return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
467}
468
469static __inline int __attribute__((__always_inline__))
470_mm_ucomige_ss (__m128 __A, __m128 __B)
471{
472  return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
473}
474
475static __inline int __attribute__((__always_inline__))
476_mm_ucomineq_ss (__m128 __A, __m128 __B)
477{
478  return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
479}
480
481/* Convert the lower SPFP value to a 32-bit integer according to the current
482   rounding mode.  */
483static __inline int __attribute__((__always_inline__))
484_mm_cvtss_si32 (__m128 __A)
485{
486  return __builtin_ia32_cvtss2si ((__v4sf) __A);
487}
488
489static __inline int __attribute__((__always_inline__))
490_mm_cvt_ss2si (__m128 __A)
491{
492  return _mm_cvtss_si32 (__A);
493}
494
495#ifdef __x86_64__
496/* Convert the lower SPFP value to a 32-bit integer according to the
497   current rounding mode.  */
498
499/* Intel intrinsic.  */
500static __inline long long __attribute__((__always_inline__))
501_mm_cvtss_si64 (__m128 __A)
502{
503  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
504}
505
506/* Microsoft intrinsic.  */
507static __inline long long __attribute__((__always_inline__))
508_mm_cvtss_si64x (__m128 __A)
509{
510  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
511}
512#endif
513
514/* Convert the two lower SPFP values to 32-bit integers according to the
515   current rounding mode.  Return the integers in packed form.  */
516static __inline __m64 __attribute__((__always_inline__))
517_mm_cvtps_pi32 (__m128 __A)
518{
519  return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
520}
521
522static __inline __m64 __attribute__((__always_inline__))
523_mm_cvt_ps2pi (__m128 __A)
524{
525  return _mm_cvtps_pi32 (__A);
526}
527
528/* Truncate the lower SPFP value to a 32-bit integer.  */
529static __inline int __attribute__((__always_inline__))
530_mm_cvttss_si32 (__m128 __A)
531{
532  return __builtin_ia32_cvttss2si ((__v4sf) __A);
533}
534
535static __inline int __attribute__((__always_inline__))
536_mm_cvtt_ss2si (__m128 __A)
537{
538  return _mm_cvttss_si32 (__A);
539}
540
541#ifdef __x86_64__
542/* Truncate the lower SPFP value to a 32-bit integer.  */
543
544/* Intel intrinsic.  */
545static __inline long long __attribute__((__always_inline__))
546_mm_cvttss_si64 (__m128 __A)
547{
548  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
549}
550
551/* Microsoft intrinsic.  */
552static __inline long long __attribute__((__always_inline__))
553_mm_cvttss_si64x (__m128 __A)
554{
555  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
556}
557#endif
558
559/* Truncate the two lower SPFP values to 32-bit integers.  Return the
560   integers in packed form.  */
561static __inline __m64 __attribute__((__always_inline__))
562_mm_cvttps_pi32 (__m128 __A)
563{
564  return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
565}
566
567static __inline __m64 __attribute__((__always_inline__))
568_mm_cvtt_ps2pi (__m128 __A)
569{
570  return _mm_cvttps_pi32 (__A);
571}
572
573/* Convert B to a SPFP value and insert it as element zero in A.  */
574static __inline __m128 __attribute__((__always_inline__))
575_mm_cvtsi32_ss (__m128 __A, int __B)
576{
577  return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
578}
579
580static __inline __m128 __attribute__((__always_inline__))
581_mm_cvt_si2ss (__m128 __A, int __B)
582{
583  return _mm_cvtsi32_ss (__A, __B);
584}
585
586#ifdef __x86_64__
587/* Convert B to a SPFP value and insert it as element zero in A.  */
588
589/* Intel intrinsic.  */
590static __inline __m128 __attribute__((__always_inline__))
591_mm_cvtsi64_ss (__m128 __A, long long __B)
592{
593  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
594}
595
596/* Microsoft intrinsic.  */
597static __inline __m128 __attribute__((__always_inline__))
598_mm_cvtsi64x_ss (__m128 __A, long long __B)
599{
600  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
601}
602#endif
603
604/* Convert the two 32-bit values in B to SPFP form and insert them
605   as the two lower elements in A.  */
606static __inline __m128 __attribute__((__always_inline__))
607_mm_cvtpi32_ps (__m128 __A, __m64 __B)
608{
609  return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
610}
611
612static __inline __m128 __attribute__((__always_inline__))
613_mm_cvt_pi2ps (__m128 __A, __m64 __B)
614{
615  return _mm_cvtpi32_ps (__A, __B);
616}
617
618/* Convert the four signed 16-bit values in A to SPFP form.  */
619static __inline __m128 __attribute__((__always_inline__))
620_mm_cvtpi16_ps (__m64 __A)
621{
622  __v4hi __sign;
623  __v2si __hisi, __losi;
624  __v4sf __r;
625
626  /* This comparison against zero gives us a mask that can be used to
627     fill in the missing sign bits in the unpack operations below, so
628     that we get signed values after unpacking.  */
629  __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
630
631  /* Convert the four words to doublewords.  */
632  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
633  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
634
635  /* Convert the doublewords to floating point two at a time.  */
636  __r = (__v4sf) _mm_setzero_ps ();
637  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
638  __r = __builtin_ia32_movlhps (__r, __r);
639  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
640
641  return (__m128) __r;
642}
643
644/* Convert the four unsigned 16-bit values in A to SPFP form.  */
645static __inline __m128 __attribute__((__always_inline__))
646_mm_cvtpu16_ps (__m64 __A)
647{
648  __v2si __hisi, __losi;
649  __v4sf __r;
650
651  /* Convert the four words to doublewords.  */
652  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
653  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
654
655  /* Convert the doublewords to floating point two at a time.  */
656  __r = (__v4sf) _mm_setzero_ps ();
657  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
658  __r = __builtin_ia32_movlhps (__r, __r);
659  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
660
661  return (__m128) __r;
662}
663
664/* Convert the low four signed 8-bit values in A to SPFP form.  */
665static __inline __m128 __attribute__((__always_inline__))
666_mm_cvtpi8_ps (__m64 __A)
667{
668  __v8qi __sign;
669
670  /* This comparison against zero gives us a mask that can be used to
671     fill in the missing sign bits in the unpack operations below, so
672     that we get signed values after unpacking.  */
673  __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
674
675  /* Convert the four low bytes to words.  */
676  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
677
678  return _mm_cvtpi16_ps(__A);
679}
680
681/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
682static __inline __m128 __attribute__((__always_inline__))
683_mm_cvtpu8_ps(__m64 __A)
684{
685  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
686  return _mm_cvtpu16_ps(__A);
687}
688
689/* Convert the four signed 32-bit values in A and B to SPFP form.  */
690static __inline __m128 __attribute__((__always_inline__))
691_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
692{
693  __v4sf __zero = (__v4sf) _mm_setzero_ps ();
694  __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
695  __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
696  return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
697}
698
699/* Convert the four SPFP values in A to four signed 16-bit integers.  */
700static __inline __m64 __attribute__((__always_inline__))
701_mm_cvtps_pi16(__m128 __A)
702{
703  __v4sf __hisf = (__v4sf)__A;
704  __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
705  __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
706  __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
707  return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
708}
709
710/* Convert the four SPFP values in A to four signed 8-bit integers.  */
711static __inline __m64 __attribute__((__always_inline__))
712_mm_cvtps_pi8(__m128 __A)
713{
714  __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
715  return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
716}
717
718/* Selects four specific SPFP values from A and B based on MASK.  */
719#if 0
720static __inline __m128 __attribute__((__always_inline__))
721_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
722{
723  return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
724}
725#else
726#define _mm_shuffle_ps(A, B, MASK) \
727 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
728#endif
729
730
731/* Selects and interleaves the upper two SPFP values from A and B.  */
732static __inline __m128 __attribute__((__always_inline__))
733_mm_unpackhi_ps (__m128 __A, __m128 __B)
734{
735  return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
736}
737
738/* Selects and interleaves the lower two SPFP values from A and B.  */
739static __inline __m128 __attribute__((__always_inline__))
740_mm_unpacklo_ps (__m128 __A, __m128 __B)
741{
742  return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
743}
744
745/* Sets the upper two SPFP values with 64-bits of data loaded from P;
746   the lower two values are passed through from A.  */
747static __inline __m128 __attribute__((__always_inline__))
748_mm_loadh_pi (__m128 __A, __m64 const *__P)
749{
750  return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
751}
752
753/* Stores the upper two SPFP values of A into P.  */
754static __inline void __attribute__((__always_inline__))
755_mm_storeh_pi (__m64 *__P, __m128 __A)
756{
757  __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
758}
759
760/* Moves the upper two values of B into the lower two values of A.  */
761static __inline __m128 __attribute__((__always_inline__))
762_mm_movehl_ps (__m128 __A, __m128 __B)
763{
764  return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
765}
766
767/* Moves the lower two values of B into the upper two values of A.  */
768static __inline __m128 __attribute__((__always_inline__))
769_mm_movelh_ps (__m128 __A, __m128 __B)
770{
771  return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
772}
773
774/* Sets the lower two SPFP values with 64-bits of data loaded from P;
775   the upper two values are passed through from A.  */
776static __inline __m128 __attribute__((__always_inline__))
777_mm_loadl_pi (__m128 __A, __m64 const *__P)
778{
779  return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
780}
781
782/* Stores the lower two SPFP values of A into P.  */
783static __inline void __attribute__((__always_inline__))
784_mm_storel_pi (__m64 *__P, __m128 __A)
785{
786  __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
787}
788
789/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
790static __inline int __attribute__((__always_inline__))
791_mm_movemask_ps (__m128 __A)
792{
793  return __builtin_ia32_movmskps ((__v4sf)__A);
794}
795
796/* Return the contents of the control register.  */
797static __inline unsigned int __attribute__((__always_inline__))
798_mm_getcsr (void)
799{
800  return __builtin_ia32_stmxcsr ();
801}
802
803/* Read exception bits from the control register.  */
804static __inline unsigned int __attribute__((__always_inline__))
805_MM_GET_EXCEPTION_STATE (void)
806{
807  return _mm_getcsr() & _MM_EXCEPT_MASK;
808}
809
810static __inline unsigned int __attribute__((__always_inline__))
811_MM_GET_EXCEPTION_MASK (void)
812{
813  return _mm_getcsr() & _MM_MASK_MASK;
814}
815
816static __inline unsigned int __attribute__((__always_inline__))
817_MM_GET_ROUNDING_MODE (void)
818{
819  return _mm_getcsr() & _MM_ROUND_MASK;
820}
821
822static __inline unsigned int __attribute__((__always_inline__))
823_MM_GET_FLUSH_ZERO_MODE (void)
824{
825  return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
826}
827
828/* Set the control register to I.  */
829static __inline void __attribute__((__always_inline__))
830_mm_setcsr (unsigned int __I)
831{
832  __builtin_ia32_ldmxcsr (__I);
833}
834
835/* Set exception bits in the control register.  */
836static __inline void __attribute__((__always_inline__))
837_MM_SET_EXCEPTION_STATE(unsigned int __mask)
838{
839  _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
840}
841
842static __inline void __attribute__((__always_inline__))
843_MM_SET_EXCEPTION_MASK (unsigned int __mask)
844{
845  _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
846}
847
848static __inline void __attribute__((__always_inline__))
849_MM_SET_ROUNDING_MODE (unsigned int __mode)
850{
851  _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
852}
853
854static __inline void __attribute__((__always_inline__))
855_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
856{
857  _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
858}
859
860/* Create a vector with element 0 as F and the rest zero.  */
861static __inline __m128 __attribute__((__always_inline__))
862_mm_set_ss (float __F)
863{
864  return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
865}
866
867/* Create a vector with all four elements equal to F.  */
868static __inline __m128 __attribute__((__always_inline__))
869_mm_set1_ps (float __F)
870{
871  return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
872}
873
874static __inline __m128 __attribute__((__always_inline__))
875_mm_set_ps1 (float __F)
876{
877  return _mm_set1_ps (__F);
878}
879
880/* Create a vector with element 0 as *P and the rest zero.  */
881static __inline __m128 __attribute__((__always_inline__))
882_mm_load_ss (float const *__P)
883{
884  return _mm_set_ss (*__P);
885}
886
887/* Create a vector with all four elements equal to *P.  */
888static __inline __m128 __attribute__((__always_inline__))
889_mm_load1_ps (float const *__P)
890{
891  return _mm_set1_ps (*__P);
892}
893
894static __inline __m128 __attribute__((__always_inline__))
895_mm_load_ps1 (float const *__P)
896{
897  return _mm_load1_ps (__P);
898}
899
900/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
901static __inline __m128 __attribute__((__always_inline__))
902_mm_load_ps (float const *__P)
903{
904  return (__m128) *(__v4sf *)__P;
905}
906
907/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
908static __inline __m128 __attribute__((__always_inline__))
909_mm_loadu_ps (float const *__P)
910{
911  return (__m128) __builtin_ia32_loadups (__P);
912}
913
914/* Load four SPFP values in reverse order.  The address must be aligned.  */
915static __inline __m128 __attribute__((__always_inline__))
916_mm_loadr_ps (float const *__P)
917{
918  __v4sf __tmp = *(__v4sf *)__P;
919  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
920}
921
922/* Create the vector [Z Y X W].  */
923static __inline __m128 __attribute__((__always_inline__))
924_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
925{
926  return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
927}
928
929/* Create the vector [W X Y Z].  */
930static __inline __m128 __attribute__((__always_inline__))
931_mm_setr_ps (float __Z, float __Y, float __X, float __W)
932{
933  return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
934}
935
936/* Stores the lower SPFP value.  */
937static __inline void __attribute__((__always_inline__))
938_mm_store_ss (float *__P, __m128 __A)
939{
940  *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
941}
942
943static __inline float __attribute__((__always_inline__))
944_mm_cvtss_f32 (__m128 __A)
945{
946  return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
947}
948
949/* Store four SPFP values.  The address must be 16-byte aligned.  */
950static __inline void __attribute__((__always_inline__))
951_mm_store_ps (float *__P, __m128 __A)
952{
953  *(__v4sf *)__P = (__v4sf)__A;
954}
955
956/* Store four SPFP values.  The address need not be 16-byte aligned.  */
957static __inline void __attribute__((__always_inline__))
958_mm_storeu_ps (float *__P, __m128 __A)
959{
960  __builtin_ia32_storeups (__P, (__v4sf)__A);
961}
962
963/* Store the lower SPFP value across four words.  */
964static __inline void __attribute__((__always_inline__))
965_mm_store1_ps (float *__P, __m128 __A)
966{
967  __v4sf __va = (__v4sf)__A;
968  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
969  _mm_storeu_ps (__P, __tmp);
970}
971
972static __inline void __attribute__((__always_inline__))
973_mm_store_ps1 (float *__P, __m128 __A)
974{
975  _mm_store1_ps (__P, __A);
976}
977
978/* Store four SPFP values in reverse order.  The address must be aligned.  */
979static __inline void __attribute__((__always_inline__))
980_mm_storer_ps (float *__P, __m128 __A)
981{
982  __v4sf __va = (__v4sf)__A;
983  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
984  _mm_store_ps (__P, __tmp);
985}
986
987/* Sets the low SPFP value of A from the low value of B.  */
988static __inline __m128 __attribute__((__always_inline__))
989_mm_move_ss (__m128 __A, __m128 __B)
990{
991  return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
992}
993
994/* Extracts one of the four words of A.  The selector N must be immediate.  */
995#if 0
996static __inline int __attribute__((__always_inline__))
997_mm_extract_pi16 (__m64 const __A, int const __N)
998{
999  return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1000}
1001
1002static __inline int __attribute__((__always_inline__))
1003_m_pextrw (__m64 const __A, int const __N)
1004{
1005  return _mm_extract_pi16 (__A, __N);
1006}
1007#else
1008#define _mm_extract_pi16(A, N)	__builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
1009#define _m_pextrw(A, N)		_mm_extract_pi16((A), (N))
1010#endif
1011
1012/* Inserts word D into one of four words of A.  The selector N must be
1013   immediate.  */
1014#if 0
1015static __inline __m64 __attribute__((__always_inline__))
1016_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1017{
1018  return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1019}
1020
1021static __inline __m64 __attribute__((__always_inline__))
1022_m_pinsrw (__m64 const __A, int const __D, int const __N)
1023{
1024  return _mm_insert_pi16 (__A, __D, __N);
1025}
1026#else
1027#define _mm_insert_pi16(A, D, N) \
1028  ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
1029#define _m_pinsrw(A, D, N)	 _mm_insert_pi16((A), (D), (N))
1030#endif
1031
1032/* Compute the element-wise maximum of signed 16-bit values.  */
1033static __inline __m64 __attribute__((__always_inline__))
1034_mm_max_pi16 (__m64 __A, __m64 __B)
1035{
1036  return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1037}
1038
1039static __inline __m64 __attribute__((__always_inline__))
1040_m_pmaxsw (__m64 __A, __m64 __B)
1041{
1042  return _mm_max_pi16 (__A, __B);
1043}
1044
1045/* Compute the element-wise maximum of unsigned 8-bit values.  */
1046static __inline __m64 __attribute__((__always_inline__))
1047_mm_max_pu8 (__m64 __A, __m64 __B)
1048{
1049  return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1050}
1051
1052static __inline __m64 __attribute__((__always_inline__))
1053_m_pmaxub (__m64 __A, __m64 __B)
1054{
1055  return _mm_max_pu8 (__A, __B);
1056}
1057
1058/* Compute the element-wise minimum of signed 16-bit values.  */
1059static __inline __m64 __attribute__((__always_inline__))
1060_mm_min_pi16 (__m64 __A, __m64 __B)
1061{
1062  return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1063}
1064
1065static __inline __m64 __attribute__((__always_inline__))
1066_m_pminsw (__m64 __A, __m64 __B)
1067{
1068  return _mm_min_pi16 (__A, __B);
1069}
1070
1071/* Compute the element-wise minimum of unsigned 8-bit values.  */
1072static __inline __m64 __attribute__((__always_inline__))
1073_mm_min_pu8 (__m64 __A, __m64 __B)
1074{
1075  return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1076}
1077
1078static __inline __m64 __attribute__((__always_inline__))
1079_m_pminub (__m64 __A, __m64 __B)
1080{
1081  return _mm_min_pu8 (__A, __B);
1082}
1083
1084/* Create an 8-bit mask of the signs of 8-bit values.  */
1085static __inline int __attribute__((__always_inline__))
1086_mm_movemask_pi8 (__m64 __A)
1087{
1088  return __builtin_ia32_pmovmskb ((__v8qi)__A);
1089}
1090
1091static __inline int __attribute__((__always_inline__))
1092_m_pmovmskb (__m64 __A)
1093{
1094  return _mm_movemask_pi8 (__A);
1095}
1096
1097/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1098   in B and produce the high 16 bits of the 32-bit results.  */
1099static __inline __m64 __attribute__((__always_inline__))
1100_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1101{
1102  return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1103}
1104
1105static __inline __m64 __attribute__((__always_inline__))
1106_m_pmulhuw (__m64 __A, __m64 __B)
1107{
1108  return _mm_mulhi_pu16 (__A, __B);
1109}
1110
1111/* Return a combination of the four 16-bit values in A.  The selector
1112   must be an immediate.  */
1113#if 0
1114static __inline __m64 __attribute__((__always_inline__))
1115_mm_shuffle_pi16 (__m64 __A, int __N)
1116{
1117  return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1118}
1119
1120static __inline __m64 __attribute__((__always_inline__))
1121_m_pshufw (__m64 __A, int __N)
1122{
1123  return _mm_shuffle_pi16 (__A, __N);
1124}
1125#else
1126#define _mm_shuffle_pi16(A, N) \
1127  ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1128#define _m_pshufw(A, N)		_mm_shuffle_pi16 ((A), (N))
1129#endif
1130
1131/* Conditionally store byte elements of A into P.  The high bit of each
1132   byte in the selector N determines whether the corresponding byte from
1133   A is stored.  */
1134static __inline void __attribute__((__always_inline__))
1135_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1136{
1137  __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1138}
1139
1140static __inline void __attribute__((__always_inline__))
1141_m_maskmovq (__m64 __A, __m64 __N, char *__P)
1142{
1143  _mm_maskmove_si64 (__A, __N, __P);
1144}
1145
1146/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
1147static __inline __m64 __attribute__((__always_inline__))
1148_mm_avg_pu8 (__m64 __A, __m64 __B)
1149{
1150  return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1151}
1152
1153static __inline __m64 __attribute__((__always_inline__))
1154_m_pavgb (__m64 __A, __m64 __B)
1155{
1156  return _mm_avg_pu8 (__A, __B);
1157}
1158
1159/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
1160static __inline __m64 __attribute__((__always_inline__))
1161_mm_avg_pu16 (__m64 __A, __m64 __B)
1162{
1163  return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1164}
1165
1166static __inline __m64 __attribute__((__always_inline__))
1167_m_pavgw (__m64 __A, __m64 __B)
1168{
1169  return _mm_avg_pu16 (__A, __B);
1170}
1171
1172/* Compute the sum of the absolute differences of the unsigned 8-bit
1173   values in A and B.  Return the value in the lower 16-bit word; the
1174   upper words are cleared.  */
1175static __inline __m64 __attribute__((__always_inline__))
1176_mm_sad_pu8 (__m64 __A, __m64 __B)
1177{
1178  return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1179}
1180
1181static __inline __m64 __attribute__((__always_inline__))
1182_m_psadbw (__m64 __A, __m64 __B)
1183{
1184  return _mm_sad_pu8 (__A, __B);
1185}
1186
1187/* Loads one cache line from address P to a location "closer" to the
1188   processor.  The selector I specifies the type of prefetch operation.  */
1189#if 0
1190static __inline void __attribute__((__always_inline__))
1191_mm_prefetch (void *__P, enum _mm_hint __I)
1192{
1193  __builtin_prefetch (__P, 0, __I);
1194}
1195#else
1196#define _mm_prefetch(P, I) \
1197  __builtin_prefetch ((P), 0, (I))
1198#endif
1199
1200/* Stores the data in A to the address P without polluting the caches.  */
1201static __inline void __attribute__((__always_inline__))
1202_mm_stream_pi (__m64 *__P, __m64 __A)
1203{
1204  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1205}
1206
1207/* Likewise.  The address must be 16-byte aligned.  */
1208static __inline void __attribute__((__always_inline__))
1209_mm_stream_ps (float *__P, __m128 __A)
1210{
1211  __builtin_ia32_movntps (__P, (__v4sf)__A);
1212}
1213
1214/* Guarantees that every preceding store is globally visible before
1215   any subsequent store.  */
1216static __inline void __attribute__((__always_inline__))
1217_mm_sfence (void)
1218{
1219  __builtin_ia32_sfence ();
1220}
1221
1222/* The execution of the next instruction is delayed by an implementation
1223   specific amount of time.  The instruction does not modify the
1224   architectural state.  */
1225static __inline void __attribute__((__always_inline__))
1226_mm_pause (void)
1227{
1228  __asm__ __volatile__ ("rep; nop" : : );
1229}
1230
1231/* Transpose the 4x4 matrix composed of row[0-3].  */
1232#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
1233do {									\
1234  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
1235  __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1);			\
1236  __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3);			\
1237  __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1);			\
1238  __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3);			\
1239  (row0) = __builtin_ia32_movlhps (__t0, __t1);				\
1240  (row1) = __builtin_ia32_movhlps (__t1, __t0);				\
1241  (row2) = __builtin_ia32_movlhps (__t2, __t3);				\
1242  (row3) = __builtin_ia32_movhlps (__t3, __t2);				\
1243} while (0)
1244
1245/* For backward source compatibility.  */
1246#include <emmintrin.h>
1247
1248#endif /* __SSE__ */
1249#endif /* _XMMINTRIN_H_INCLUDED */
1250