1/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
2
3   This file is part of GCC.
4
5   GCC is free software; you can redistribute it and/or modify it
6   under the terms of the GNU General Public License as published
7   by the Free Software Foundation; either version 2, or (at your
8   option) any later version.
9
10   GCC is distributed in the hope that it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
13   License for more details.
14
15   You should have received a copy of the GNU General Public License
16   along with GCC; see the file COPYING.  If not, write to the Free
17   Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
18   02110-1301, USA.  */
19
20/* As a special exception, if you include this header file into source
21   files compiled by GCC, this header file does not by itself cause
22   the resulting executable to be covered by the GNU General Public
23   License.  This exception does not however invalidate any other
24   reasons why the executable file might be covered by the GNU General
25   Public License.  */
26
27#ifndef _MMINTRIN_H_INCLUDED
28#define _MMINTRIN_H_INCLUDED
29
30/* The data type intended for user use.  */
31typedef unsigned long long __m64, __int64;
32
33/* Internal data types for implementing the intrinsics.  */
34typedef int __v2si __attribute__ ((vector_size (8)));
35typedef short __v4hi __attribute__ ((vector_size (8)));
36typedef char __v8qi __attribute__ ((vector_size (8)));
37
38/* "Convert" __m64 and __int64 into each other.  */
39static __inline __m64
40_mm_cvtsi64_m64 (__int64 __i)
41{
42  return __i;
43}
44
45static __inline __int64
46_mm_cvtm64_si64 (__m64 __i)
47{
48  return __i;
49}
50
51static __inline int
52_mm_cvtsi64_si32 (__int64 __i)
53{
54  return __i;
55}
56
57static __inline __int64
58_mm_cvtsi32_si64 (int __i)
59{
60  return __i;
61}
62
63/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
64   the result, and the four 16-bit values from M2 into the upper four 8-bit
65   values of the result, all with signed saturation.  */
66static __inline __m64
67_mm_packs_pi16 (__m64 __m1, __m64 __m2)
68{
69  return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
70}
71
72/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
73   the result, and the two 32-bit values from M2 into the upper two 16-bit
74   values of the result, all with signed saturation.  */
75static __inline __m64
76_mm_packs_pi32 (__m64 __m1, __m64 __m2)
77{
78  return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
79}
80
81/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
82   the 64-bit value from M2 into the upper 32-bits of the result, all with
83   signed saturation for values that do not fit exactly into 32-bits.  */
84static __inline __m64
85_mm_packs_pi64 (__m64 __m1, __m64 __m2)
86{
87  return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
88}
89
90/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
91   the result, and the four 16-bit values from M2 into the upper four 8-bit
92   values of the result, all with unsigned saturation.  */
93static __inline __m64
94_mm_packs_pu16 (__m64 __m1, __m64 __m2)
95{
96  return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
97}
98
99/* Pack the two 32-bit values from M1 into the lower two 16-bit values of
100   the result, and the two 32-bit values from M2 into the upper two 16-bit
101   values of the result, all with unsigned saturation.  */
102static __inline __m64
103_mm_packs_pu32 (__m64 __m1, __m64 __m2)
104{
105  return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
106}
107
108/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
109   the 64-bit value from M2 into the upper 32-bits of the result, all with
110   unsigned saturation for values that do not fit exactly into 32-bits.  */
111static __inline __m64
112_mm_packs_pu64 (__m64 __m1, __m64 __m2)
113{
114  return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
115}
116
117/* Interleave the four 8-bit values from the high half of M1 with the four
118   8-bit values from the high half of M2.  */
119static __inline __m64
120_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
121{
122  return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
123}
124
125/* Interleave the two 16-bit values from the high half of M1 with the two
126   16-bit values from the high half of M2.  */
127static __inline __m64
128_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
129{
130  return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
131}
132
133/* Interleave the 32-bit value from the high half of M1 with the 32-bit
134   value from the high half of M2.  */
135static __inline __m64
136_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
137{
138  return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
139}
140
141/* Interleave the four 8-bit values from the low half of M1 with the four
142   8-bit values from the low half of M2.  */
143static __inline __m64
144_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
145{
146  return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
147}
148
149/* Interleave the two 16-bit values from the low half of M1 with the two
150   16-bit values from the low half of M2.  */
151static __inline __m64
152_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
153{
154  return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
155}
156
157/* Interleave the 32-bit value from the low half of M1 with the 32-bit
158   value from the low half of M2.  */
159static __inline __m64
160_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
161{
162  return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
163}
164
165/* Take the four 8-bit values from the low half of M1, sign extend them,
166   and return the result as a vector of four 16-bit quantities.  */
167static __inline __m64
168_mm_unpackel_pi8 (__m64 __m1)
169{
170  return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
171}
172
173/* Take the two 16-bit values from the low half of M1, sign extend them,
174   and return the result as a vector of two 32-bit quantities.  */
175static __inline __m64
176_mm_unpackel_pi16 (__m64 __m1)
177{
178  return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
179}
180
181/* Take the 32-bit value from the low half of M1, and return it sign extended
182  to 64 bits.  */
183static __inline __m64
184_mm_unpackel_pi32 (__m64 __m1)
185{
186  return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
187}
188
189/* Take the four 8-bit values from the high half of M1, sign extend them,
190   and return the result as a vector of four 16-bit quantities.  */
191static __inline __m64
192_mm_unpackeh_pi8 (__m64 __m1)
193{
194  return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
195}
196
197/* Take the two 16-bit values from the high half of M1, sign extend them,
198   and return the result as a vector of two 32-bit quantities.  */
199static __inline __m64
200_mm_unpackeh_pi16 (__m64 __m1)
201{
202  return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
203}
204
205/* Take the 32-bit value from the high half of M1, and return it sign extended
206  to 64 bits.  */
207static __inline __m64
208_mm_unpackeh_pi32 (__m64 __m1)
209{
210  return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
211}
212
213/* Take the four 8-bit values from the low half of M1, zero extend them,
214   and return the result as a vector of four 16-bit quantities.  */
215static __inline __m64
216_mm_unpackel_pu8 (__m64 __m1)
217{
218  return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
219}
220
221/* Take the two 16-bit values from the low half of M1, zero extend them,
222   and return the result as a vector of two 32-bit quantities.  */
223static __inline __m64
224_mm_unpackel_pu16 (__m64 __m1)
225{
226  return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
227}
228
229/* Take the 32-bit value from the low half of M1, and return it zero extended
230  to 64 bits.  */
231static __inline __m64
232_mm_unpackel_pu32 (__m64 __m1)
233{
234  return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
235}
236
237/* Take the four 8-bit values from the high half of M1, zero extend them,
238   and return the result as a vector of four 16-bit quantities.  */
239static __inline __m64
240_mm_unpackeh_pu8 (__m64 __m1)
241{
242  return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
243}
244
245/* Take the two 16-bit values from the high half of M1, zero extend them,
246   and return the result as a vector of two 32-bit quantities.  */
247static __inline __m64
248_mm_unpackeh_pu16 (__m64 __m1)
249{
250  return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
251}
252
253/* Take the 32-bit value from the high half of M1, and return it zero extended
254  to 64 bits.  */
255static __inline __m64
256_mm_unpackeh_pu32 (__m64 __m1)
257{
258  return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
259}
260
261/* Add the 8-bit values in M1 to the 8-bit values in M2.  */
262static __inline __m64
263_mm_add_pi8 (__m64 __m1, __m64 __m2)
264{
265  return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
266}
267
268/* Add the 16-bit values in M1 to the 16-bit values in M2.  */
269static __inline __m64
270_mm_add_pi16 (__m64 __m1, __m64 __m2)
271{
272  return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
273}
274
275/* Add the 32-bit values in M1 to the 32-bit values in M2.  */
276static __inline __m64
277_mm_add_pi32 (__m64 __m1, __m64 __m2)
278{
279  return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
280}
281
282/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
283   saturated arithmetic.  */
284static __inline __m64
285_mm_adds_pi8 (__m64 __m1, __m64 __m2)
286{
287  return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
288}
289
290/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
291   saturated arithmetic.  */
292static __inline __m64
293_mm_adds_pi16 (__m64 __m1, __m64 __m2)
294{
295  return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
296}
297
298/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
299   saturated arithmetic.  */
300static __inline __m64
301_mm_adds_pi32 (__m64 __m1, __m64 __m2)
302{
303  return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
304}
305
306/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
307   saturated arithmetic.  */
308static __inline __m64
309_mm_adds_pu8 (__m64 __m1, __m64 __m2)
310{
311  return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
312}
313
314/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
315   saturated arithmetic.  */
316static __inline __m64
317_mm_adds_pu16 (__m64 __m1, __m64 __m2)
318{
319  return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
320}
321
322/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
323   saturated arithmetic.  */
324static __inline __m64
325_mm_adds_pu32 (__m64 __m1, __m64 __m2)
326{
327  return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
328}
329
330/* Subtract the 8-bit values in M2 from the 8-bit values in M1.  */
331static __inline __m64
332_mm_sub_pi8 (__m64 __m1, __m64 __m2)
333{
334  return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
335}
336
337/* Subtract the 16-bit values in M2 from the 16-bit values in M1.  */
338static __inline __m64
339_mm_sub_pi16 (__m64 __m1, __m64 __m2)
340{
341  return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
342}
343
344/* Subtract the 32-bit values in M2 from the 32-bit values in M1.  */
345static __inline __m64
346_mm_sub_pi32 (__m64 __m1, __m64 __m2)
347{
348  return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
349}
350
351/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
352   saturating arithmetic.  */
353static __inline __m64
354_mm_subs_pi8 (__m64 __m1, __m64 __m2)
355{
356  return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
357}
358
359/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
360   signed saturating arithmetic.  */
361static __inline __m64
362_mm_subs_pi16 (__m64 __m1, __m64 __m2)
363{
364  return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
365}
366
367/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
368   signed saturating arithmetic.  */
369static __inline __m64
370_mm_subs_pi32 (__m64 __m1, __m64 __m2)
371{
372  return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
373}
374
375/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
376   unsigned saturating arithmetic.  */
377static __inline __m64
378_mm_subs_pu8 (__m64 __m1, __m64 __m2)
379{
380  return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
381}
382
383/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
384   unsigned saturating arithmetic.  */
385static __inline __m64
386_mm_subs_pu16 (__m64 __m1, __m64 __m2)
387{
388  return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
389}
390
391/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
392   unsigned saturating arithmetic.  */
393static __inline __m64
394_mm_subs_pu32 (__m64 __m1, __m64 __m2)
395{
396  return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
397}
398
399/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
400   four 32-bit intermediate results, which are then summed by pairs to
401   produce two 32-bit results.  */
402static __inline __m64
403_mm_madd_pi16 (__m64 __m1, __m64 __m2)
404{
405  return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
406}
407
408/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
409   four 32-bit intermediate results, which are then summed by pairs to
410   produce two 32-bit results.  */
411static __inline __m64
412_mm_madd_pu16 (__m64 __m1, __m64 __m2)
413{
414  return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
415}
416
417/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
418   M2 and produce the high 16 bits of the 32-bit results.  */
419static __inline __m64
420_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
421{
422  return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
423}
424
425/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
426   M2 and produce the high 16 bits of the 32-bit results.  */
427static __inline __m64
428_mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
429{
430  return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
431}
432
433/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
434   the low 16 bits of the results.  */
435static __inline __m64
436_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
437{
438  return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
439}
440
441/* Shift four 16-bit values in M left by COUNT.  */
442static __inline __m64
443_mm_sll_pi16 (__m64 __m, __m64 __count)
444{
445  return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
446}
447
448static __inline __m64
449_mm_slli_pi16 (__m64 __m, int __count)
450{
451  return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
452}
453
454/* Shift two 32-bit values in M left by COUNT.  */
455static __inline __m64
456_mm_sll_pi32 (__m64 __m, __m64 __count)
457{
458  return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
459}
460
461static __inline __m64
462_mm_slli_pi32 (__m64 __m, int __count)
463{
464  return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
465}
466
467/* Shift the 64-bit value in M left by COUNT.  */
468static __inline __m64
469_mm_sll_si64 (__m64 __m, __m64 __count)
470{
471  return (__m64) __builtin_arm_wslld (__m, __count);
472}
473
474static __inline __m64
475_mm_slli_si64 (__m64 __m, int __count)
476{
477  return (__m64) __builtin_arm_wslldi (__m, __count);
478}
479
480/* Shift four 16-bit values in M right by COUNT; shift in the sign bit.  */
481static __inline __m64
482_mm_sra_pi16 (__m64 __m, __m64 __count)
483{
484  return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
485}
486
487static __inline __m64
488_mm_srai_pi16 (__m64 __m, int __count)
489{
490  return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
491}
492
493/* Shift two 32-bit values in M right by COUNT; shift in the sign bit.  */
494static __inline __m64
495_mm_sra_pi32 (__m64 __m, __m64 __count)
496{
497  return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
498}
499
500static __inline __m64
501_mm_srai_pi32 (__m64 __m, int __count)
502{
503  return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
504}
505
506/* Shift the 64-bit value in M right by COUNT; shift in the sign bit.  */
507static __inline __m64
508_mm_sra_si64 (__m64 __m, __m64 __count)
509{
510  return (__m64) __builtin_arm_wsrad (__m, __count);
511}
512
513static __inline __m64
514_mm_srai_si64 (__m64 __m, int __count)
515{
516  return (__m64) __builtin_arm_wsradi (__m, __count);
517}
518
519/* Shift four 16-bit values in M right by COUNT; shift in zeros.  */
520static __inline __m64
521_mm_srl_pi16 (__m64 __m, __m64 __count)
522{
523  return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
524}
525
526static __inline __m64
527_mm_srli_pi16 (__m64 __m, int __count)
528{
529  return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
530}
531
532/* Shift two 32-bit values in M right by COUNT; shift in zeros.  */
533static __inline __m64
534_mm_srl_pi32 (__m64 __m, __m64 __count)
535{
536  return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
537}
538
539static __inline __m64
540_mm_srli_pi32 (__m64 __m, int __count)
541{
542  return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
543}
544
545/* Shift the 64-bit value in M left by COUNT; shift in zeros.  */
546static __inline __m64
547_mm_srl_si64 (__m64 __m, __m64 __count)
548{
549  return (__m64) __builtin_arm_wsrld (__m, __count);
550}
551
552static __inline __m64
553_mm_srli_si64 (__m64 __m, int __count)
554{
555  return (__m64) __builtin_arm_wsrldi (__m, __count);
556}
557
558/* Rotate four 16-bit values in M right by COUNT.  */
559static __inline __m64
560_mm_ror_pi16 (__m64 __m, __m64 __count)
561{
562  return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
563}
564
565static __inline __m64
566_mm_rori_pi16 (__m64 __m, int __count)
567{
568  return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
569}
570
571/* Rotate two 32-bit values in M right by COUNT.  */
572static __inline __m64
573_mm_ror_pi32 (__m64 __m, __m64 __count)
574{
575  return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
576}
577
578static __inline __m64
579_mm_rori_pi32 (__m64 __m, int __count)
580{
581  return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
582}
583
584/* Rotate two 64-bit values in M right by COUNT.  */
585static __inline __m64
586_mm_ror_si64 (__m64 __m, __m64 __count)
587{
588  return (__m64) __builtin_arm_wrord (__m, __count);
589}
590
591static __inline __m64
592_mm_rori_si64 (__m64 __m, int __count)
593{
594  return (__m64) __builtin_arm_wrordi (__m, __count);
595}
596
597/* Bit-wise AND the 64-bit values in M1 and M2.  */
598static __inline __m64
599_mm_and_si64 (__m64 __m1, __m64 __m2)
600{
601  return __builtin_arm_wand (__m1, __m2);
602}
603
604/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
605   64-bit value in M2.  */
606static __inline __m64
607_mm_andnot_si64 (__m64 __m1, __m64 __m2)
608{
609  return __builtin_arm_wandn (__m1, __m2);
610}
611
612/* Bit-wise inclusive OR the 64-bit values in M1 and M2.  */
613static __inline __m64
614_mm_or_si64 (__m64 __m1, __m64 __m2)
615{
616  return __builtin_arm_wor (__m1, __m2);
617}
618
619/* Bit-wise exclusive OR the 64-bit values in M1 and M2.  */
620static __inline __m64
621_mm_xor_si64 (__m64 __m1, __m64 __m2)
622{
623  return __builtin_arm_wxor (__m1, __m2);
624}
625
626/* Compare eight 8-bit values.  The result of the comparison is 0xFF if the
627   test is true and zero if false.  */
628static __inline __m64
629_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
630{
631  return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
632}
633
634static __inline __m64
635_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
636{
637  return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
638}
639
640static __inline __m64
641_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
642{
643  return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
644}
645
646/* Compare four 16-bit values.  The result of the comparison is 0xFFFF if
647   the test is true and zero if false.  */
648static __inline __m64
649_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
650{
651  return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
652}
653
654static __inline __m64
655_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
656{
657  return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
658}
659
660static __inline __m64
661_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
662{
663  return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
664}
665
666/* Compare two 32-bit values.  The result of the comparison is 0xFFFFFFFF if
667   the test is true and zero if false.  */
668static __inline __m64
669_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
670{
671  return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
672}
673
674static __inline __m64
675_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
676{
677  return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
678}
679
680static __inline __m64
681_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
682{
683  return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
684}
685
686/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
687   by accumulate across all elements and __A.  */
688static __inline __m64
689_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
690{
691  return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
692}
693
694/* Element-wise multiplication of signed 16-bit values __B and __C, followed
695   by accumulate across all elements and __A.  */
696static __inline __m64
697_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
698{
699  return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
700}
701
702/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
703   by accumulate across all elements.  */
704static __inline __m64
705_mm_macz_pu16 (__m64 __A, __m64 __B)
706{
707  return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
708}
709
710/* Element-wise multiplication of signed 16-bit values __B and __C, followed
711   by accumulate across all elements.  */
712static __inline __m64
713_mm_macz_pi16 (__m64 __A, __m64 __B)
714{
715  return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
716}
717
718/* Accumulate across all unsigned 8-bit values in __A.  */
719static __inline __m64
720_mm_acc_pu8 (__m64 __A)
721{
722  return __builtin_arm_waccb ((__v8qi)__A);
723}
724
725/* Accumulate across all unsigned 16-bit values in __A.  */
726static __inline __m64
727_mm_acc_pu16 (__m64 __A)
728{
729  return __builtin_arm_wacch ((__v4hi)__A);
730}
731
732/* Accumulate across all unsigned 32-bit values in __A.  */
733static __inline __m64
734_mm_acc_pu32 (__m64 __A)
735{
736  return __builtin_arm_waccw ((__v2si)__A);
737}
738
739static __inline __m64
740_mm_mia_si64 (__m64 __A, int __B, int __C)
741{
742  return __builtin_arm_tmia (__A, __B, __C);
743}
744
745static __inline __m64
746_mm_miaph_si64 (__m64 __A, int __B, int __C)
747{
748  return __builtin_arm_tmiaph (__A, __B, __C);
749}
750
751static __inline __m64
752_mm_miabb_si64 (__m64 __A, int __B, int __C)
753{
754  return __builtin_arm_tmiabb (__A, __B, __C);
755}
756
757static __inline __m64
758_mm_miabt_si64 (__m64 __A, int __B, int __C)
759{
760  return __builtin_arm_tmiabt (__A, __B, __C);
761}
762
763static __inline __m64
764_mm_miatb_si64 (__m64 __A, int __B, int __C)
765{
766  return __builtin_arm_tmiatb (__A, __B, __C);
767}
768
769static __inline __m64
770_mm_miatt_si64 (__m64 __A, int __B, int __C)
771{
772  return __builtin_arm_tmiatt (__A, __B, __C);
773}
774
775/* Extract one of the elements of A and sign extend.  The selector N must
776   be immediate.  */
777#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
778#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
779#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
780
781/* Extract one of the elements of A and zero extend.  The selector N must
782   be immediate.  */
783#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
784#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
785#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
786
787/* Inserts word D into one of the elements of A.  The selector N must be
788   immediate.  */
789#define _mm_insert_pi8(A, D, N) \
790  ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
791#define _mm_insert_pi16(A, D, N) \
792  ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
793#define _mm_insert_pi32(A, D, N) \
794  ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
795
796/* Compute the element-wise maximum of signed 8-bit values.  */
797static __inline __m64
798_mm_max_pi8 (__m64 __A, __m64 __B)
799{
800  return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
801}
802
803/* Compute the element-wise maximum of signed 16-bit values.  */
804static __inline __m64
805_mm_max_pi16 (__m64 __A, __m64 __B)
806{
807  return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
808}
809
810/* Compute the element-wise maximum of signed 32-bit values.  */
811static __inline __m64
812_mm_max_pi32 (__m64 __A, __m64 __B)
813{
814  return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
815}
816
817/* Compute the element-wise maximum of unsigned 8-bit values.  */
818static __inline __m64
819_mm_max_pu8 (__m64 __A, __m64 __B)
820{
821  return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
822}
823
824/* Compute the element-wise maximum of unsigned 16-bit values.  */
825static __inline __m64
826_mm_max_pu16 (__m64 __A, __m64 __B)
827{
828  return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
829}
830
831/* Compute the element-wise maximum of unsigned 32-bit values.  */
832static __inline __m64
833_mm_max_pu32 (__m64 __A, __m64 __B)
834{
835  return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
836}
837
838/* Compute the element-wise minimum of signed 16-bit values.  */
839static __inline __m64
840_mm_min_pi8 (__m64 __A, __m64 __B)
841{
842  return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
843}
844
845/* Compute the element-wise minimum of signed 16-bit values.  */
846static __inline __m64
847_mm_min_pi16 (__m64 __A, __m64 __B)
848{
849  return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
850}
851
852/* Compute the element-wise minimum of signed 32-bit values.  */
853static __inline __m64
854_mm_min_pi32 (__m64 __A, __m64 __B)
855{
856  return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
857}
858
859/* Compute the element-wise minimum of unsigned 16-bit values.  */
860static __inline __m64
861_mm_min_pu8 (__m64 __A, __m64 __B)
862{
863  return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
864}
865
866/* Compute the element-wise minimum of unsigned 16-bit values.  */
867static __inline __m64
868_mm_min_pu16 (__m64 __A, __m64 __B)
869{
870  return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
871}
872
873/* Compute the element-wise minimum of unsigned 32-bit values.  */
874static __inline __m64
875_mm_min_pu32 (__m64 __A, __m64 __B)
876{
877  return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
878}
879
880/* Create an 8-bit mask of the signs of 8-bit values.  */
881static __inline int
882_mm_movemask_pi8 (__m64 __A)
883{
884  return __builtin_arm_tmovmskb ((__v8qi)__A);
885}
886
887/* Create an 8-bit mask of the signs of 16-bit values.  */
888static __inline int
889_mm_movemask_pi16 (__m64 __A)
890{
891  return __builtin_arm_tmovmskh ((__v4hi)__A);
892}
893
894/* Create an 8-bit mask of the signs of 32-bit values.  */
895static __inline int
896_mm_movemask_pi32 (__m64 __A)
897{
898  return __builtin_arm_tmovmskw ((__v2si)__A);
899}
900
901/* Return a combination of the four 16-bit values in A.  The selector
902   must be an immediate.  */
903#define _mm_shuffle_pi16(A, N) \
904  ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
905
906
907/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
908static __inline __m64
909_mm_avg_pu8 (__m64 __A, __m64 __B)
910{
911  return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
912}
913
914/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
915static __inline __m64
916_mm_avg_pu16 (__m64 __A, __m64 __B)
917{
918  return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
919}
920
921/* Compute the averages of the unsigned 8-bit values in A and B.  */
922static __inline __m64
923_mm_avg2_pu8 (__m64 __A, __m64 __B)
924{
925  return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
926}
927
928/* Compute the averages of the unsigned 16-bit values in A and B.  */
929static __inline __m64
930_mm_avg2_pu16 (__m64 __A, __m64 __B)
931{
932  return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
933}
934
935/* Compute the sum of the absolute differences of the unsigned 8-bit
936   values in A and B.  Return the value in the lower 16-bit word; the
937   upper words are cleared.  */
938static __inline __m64
939_mm_sad_pu8 (__m64 __A, __m64 __B)
940{
941  return (__m64) __builtin_arm_wsadb ((__v8qi)__A, (__v8qi)__B);
942}
943
944/* Compute the sum of the absolute differences of the unsigned 16-bit
945   values in A and B.  Return the value in the lower 32-bit word; the
946   upper words are cleared.  */
947static __inline __m64
948_mm_sad_pu16 (__m64 __A, __m64 __B)
949{
950  return (__m64) __builtin_arm_wsadh ((__v4hi)__A, (__v4hi)__B);
951}
952
953/* Compute the sum of the absolute differences of the unsigned 8-bit
954   values in A and B.  Return the value in the lower 16-bit word; the
955   upper words are cleared.  */
956static __inline __m64
957_mm_sadz_pu8 (__m64 __A, __m64 __B)
958{
959  return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
960}
961
962/* Compute the sum of the absolute differences of the unsigned 16-bit
963   values in A and B.  Return the value in the lower 32-bit word; the
964   upper words are cleared.  */
965static __inline __m64
966_mm_sadz_pu16 (__m64 __A, __m64 __B)
967{
968  return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
969}
970
971static __inline __m64
972_mm_align_si64 (__m64 __A, __m64 __B, int __C)
973{
974  return (__m64) __builtin_arm_walign ((__v8qi)__A, (__v8qi)__B, __C);
975}
976
977/* Creates a 64-bit zero.  */
978static __inline __m64
979_mm_setzero_si64 (void)
980{
981  return __builtin_arm_wzero ();
982}
983
984/* Set and Get arbitrary iWMMXt Control registers.
985   Note only registers 0-3 and 8-11 are currently defined,
986   the rest are reserved.  */
987
988static __inline void
989_mm_setwcx (const int __value, const int __regno)
990{
991  switch (__regno)
992    {
993    case 0:  __builtin_arm_setwcx (__value, 0); break;
994    case 1:  __builtin_arm_setwcx (__value, 1); break;
995    case 2:  __builtin_arm_setwcx (__value, 2); break;
996    case 3:  __builtin_arm_setwcx (__value, 3); break;
997    case 8:  __builtin_arm_setwcx (__value, 8); break;
998    case 9:  __builtin_arm_setwcx (__value, 9); break;
999    case 10: __builtin_arm_setwcx (__value, 10); break;
1000    case 11: __builtin_arm_setwcx (__value, 11); break;
1001    default: break;
1002    }
1003}
1004
1005static __inline int
1006_mm_getwcx (const int __regno)
1007{
1008  switch (__regno)
1009    {
1010    case 0:  return __builtin_arm_getwcx (0);
1011    case 1:  return __builtin_arm_getwcx (1);
1012    case 2:  return __builtin_arm_getwcx (2);
1013    case 3:  return __builtin_arm_getwcx (3);
1014    case 8:  return __builtin_arm_getwcx (8);
1015    case 9:  return __builtin_arm_getwcx (9);
1016    case 10: return __builtin_arm_getwcx (10);
1017    case 11: return __builtin_arm_getwcx (11);
1018    default: return 0;
1019    }
1020}
1021
1022/* Creates a vector of two 32-bit values; I0 is least significant.  */
1023static __inline __m64
1024_mm_set_pi32 (int __i1, int __i0)
1025{
1026  union {
1027    __m64 __q;
1028    struct {
1029      unsigned int __i0;
1030      unsigned int __i1;
1031    } __s;
1032  } __u;
1033
1034  __u.__s.__i0 = __i0;
1035  __u.__s.__i1 = __i1;
1036
1037  return __u.__q;
1038}
1039
1040/* Creates a vector of four 16-bit values; W0 is least significant.  */
1041static __inline __m64
1042_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
1043{
1044  unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2;
1045  unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0;
1046  return _mm_set_pi32 (__i1, __i0);
1047
1048}
1049
1050/* Creates a vector of eight 8-bit values; B0 is least significant.  */
1051static __inline __m64
1052_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
1053	     char __b3, char __b2, char __b1, char __b0)
1054{
1055  unsigned int __i1, __i0;
1056
1057  __i1 = (unsigned char)__b7;
1058  __i1 = __i1 << 8 | (unsigned char)__b6;
1059  __i1 = __i1 << 8 | (unsigned char)__b5;
1060  __i1 = __i1 << 8 | (unsigned char)__b4;
1061
1062  __i0 = (unsigned char)__b3;
1063  __i0 = __i0 << 8 | (unsigned char)__b2;
1064  __i0 = __i0 << 8 | (unsigned char)__b1;
1065  __i0 = __i0 << 8 | (unsigned char)__b0;
1066
1067  return _mm_set_pi32 (__i1, __i0);
1068}
1069
1070/* Similar, but with the arguments in reverse order.  */
1071static __inline __m64
1072_mm_setr_pi32 (int __i0, int __i1)
1073{
1074  return _mm_set_pi32 (__i1, __i0);
1075}
1076
1077static __inline __m64
1078_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
1079{
1080  return _mm_set_pi16 (__w3, __w2, __w1, __w0);
1081}
1082
1083static __inline __m64
1084_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
1085	      char __b4, char __b5, char __b6, char __b7)
1086{
1087  return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
1088}
1089
1090/* Creates a vector of two 32-bit values, both elements containing I.  */
1091static __inline __m64
1092_mm_set1_pi32 (int __i)
1093{
1094  return _mm_set_pi32 (__i, __i);
1095}
1096
1097/* Creates a vector of four 16-bit values, all elements containing W.  */
1098static __inline __m64
1099_mm_set1_pi16 (short __w)
1100{
1101  unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
1102  return _mm_set1_pi32 (__i);
1103}
1104
1105/* Creates a vector of four 16-bit values, all elements containing B.  */
1106static __inline __m64
1107_mm_set1_pi8 (char __b)
1108{
1109  unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
1110  unsigned int __i = __w << 16 | __w;
1111  return _mm_set1_pi32 (__i);
1112}
1113
1114/* Convert an integer to a __m64 object.  */
1115static __inline __m64
1116_m_from_int (int __a)
1117{
1118  return (__m64)__a;
1119}
1120
1121#define _m_packsswb _mm_packs_pi16
1122#define _m_packssdw _mm_packs_pi32
1123#define _m_packuswb _mm_packs_pu16
1124#define _m_packusdw _mm_packs_pu32
1125#define _m_packssqd _mm_packs_pi64
1126#define _m_packusqd _mm_packs_pu64
1127#define _mm_packs_si64 _mm_packs_pi64
1128#define _mm_packs_su64 _mm_packs_pu64
1129#define _m_punpckhbw _mm_unpackhi_pi8
1130#define _m_punpckhwd _mm_unpackhi_pi16
1131#define _m_punpckhdq _mm_unpackhi_pi32
1132#define _m_punpcklbw _mm_unpacklo_pi8
1133#define _m_punpcklwd _mm_unpacklo_pi16
1134#define _m_punpckldq _mm_unpacklo_pi32
1135#define _m_punpckehsbw _mm_unpackeh_pi8
1136#define _m_punpckehswd _mm_unpackeh_pi16
1137#define _m_punpckehsdq _mm_unpackeh_pi32
1138#define _m_punpckehubw _mm_unpackeh_pu8
1139#define _m_punpckehuwd _mm_unpackeh_pu16
1140#define _m_punpckehudq _mm_unpackeh_pu32
1141#define _m_punpckelsbw _mm_unpackel_pi8
1142#define _m_punpckelswd _mm_unpackel_pi16
1143#define _m_punpckelsdq _mm_unpackel_pi32
1144#define _m_punpckelubw _mm_unpackel_pu8
1145#define _m_punpckeluwd _mm_unpackel_pu16
1146#define _m_punpckeludq _mm_unpackel_pu32
1147#define _m_paddb _mm_add_pi8
1148#define _m_paddw _mm_add_pi16
1149#define _m_paddd _mm_add_pi32
1150#define _m_paddsb _mm_adds_pi8
1151#define _m_paddsw _mm_adds_pi16
1152#define _m_paddsd _mm_adds_pi32
1153#define _m_paddusb _mm_adds_pu8
1154#define _m_paddusw _mm_adds_pu16
1155#define _m_paddusd _mm_adds_pu32
1156#define _m_psubb _mm_sub_pi8
1157#define _m_psubw _mm_sub_pi16
1158#define _m_psubd _mm_sub_pi32
1159#define _m_psubsb _mm_subs_pi8
1160#define _m_psubsw _mm_subs_pi16
1161#define _m_psubuw _mm_subs_pi32
1162#define _m_psubusb _mm_subs_pu8
1163#define _m_psubusw _mm_subs_pu16
1164#define _m_psubusd _mm_subs_pu32
1165#define _m_pmaddwd _mm_madd_pi16
1166#define _m_pmadduwd _mm_madd_pu16
1167#define _m_pmulhw _mm_mulhi_pi16
1168#define _m_pmulhuw _mm_mulhi_pu16
1169#define _m_pmullw _mm_mullo_pi16
1170#define _m_pmacsw _mm_mac_pi16
1171#define _m_pmacuw _mm_mac_pu16
1172#define _m_pmacszw _mm_macz_pi16
1173#define _m_pmacuzw _mm_macz_pu16
1174#define _m_paccb _mm_acc_pu8
1175#define _m_paccw _mm_acc_pu16
1176#define _m_paccd _mm_acc_pu32
1177#define _m_pmia _mm_mia_si64
1178#define _m_pmiaph _mm_miaph_si64
1179#define _m_pmiabb _mm_miabb_si64
1180#define _m_pmiabt _mm_miabt_si64
1181#define _m_pmiatb _mm_miatb_si64
1182#define _m_pmiatt _mm_miatt_si64
1183#define _m_psllw _mm_sll_pi16
1184#define _m_psllwi _mm_slli_pi16
1185#define _m_pslld _mm_sll_pi32
1186#define _m_pslldi _mm_slli_pi32
1187#define _m_psllq _mm_sll_si64
1188#define _m_psllqi _mm_slli_si64
1189#define _m_psraw _mm_sra_pi16
1190#define _m_psrawi _mm_srai_pi16
1191#define _m_psrad _mm_sra_pi32
1192#define _m_psradi _mm_srai_pi32
1193#define _m_psraq _mm_sra_si64
1194#define _m_psraqi _mm_srai_si64
1195#define _m_psrlw _mm_srl_pi16
1196#define _m_psrlwi _mm_srli_pi16
1197#define _m_psrld _mm_srl_pi32
1198#define _m_psrldi _mm_srli_pi32
1199#define _m_psrlq _mm_srl_si64
1200#define _m_psrlqi _mm_srli_si64
1201#define _m_prorw _mm_ror_pi16
1202#define _m_prorwi _mm_rori_pi16
1203#define _m_prord _mm_ror_pi32
1204#define _m_prordi _mm_rori_pi32
1205#define _m_prorq _mm_ror_si64
1206#define _m_prorqi _mm_rori_si64
1207#define _m_pand _mm_and_si64
1208#define _m_pandn _mm_andnot_si64
1209#define _m_por _mm_or_si64
1210#define _m_pxor _mm_xor_si64
1211#define _m_pcmpeqb _mm_cmpeq_pi8
1212#define _m_pcmpeqw _mm_cmpeq_pi16
1213#define _m_pcmpeqd _mm_cmpeq_pi32
1214#define _m_pcmpgtb _mm_cmpgt_pi8
1215#define _m_pcmpgtub _mm_cmpgt_pu8
1216#define _m_pcmpgtw _mm_cmpgt_pi16
1217#define _m_pcmpgtuw _mm_cmpgt_pu16
1218#define _m_pcmpgtd _mm_cmpgt_pi32
1219#define _m_pcmpgtud _mm_cmpgt_pu32
1220#define _m_pextrb _mm_extract_pi8
1221#define _m_pextrw _mm_extract_pi16
1222#define _m_pextrd _mm_extract_pi32
1223#define _m_pextrub _mm_extract_pu8
1224#define _m_pextruw _mm_extract_pu16
1225#define _m_pextrud _mm_extract_pu32
1226#define _m_pinsrb _mm_insert_pi8
1227#define _m_pinsrw _mm_insert_pi16
1228#define _m_pinsrd _mm_insert_pi32
1229#define _m_pmaxsb _mm_max_pi8
1230#define _m_pmaxsw _mm_max_pi16
1231#define _m_pmaxsd _mm_max_pi32
1232#define _m_pmaxub _mm_max_pu8
1233#define _m_pmaxuw _mm_max_pu16
1234#define _m_pmaxud _mm_max_pu32
1235#define _m_pminsb _mm_min_pi8
1236#define _m_pminsw _mm_min_pi16
1237#define _m_pminsd _mm_min_pi32
1238#define _m_pminub _mm_min_pu8
1239#define _m_pminuw _mm_min_pu16
1240#define _m_pminud _mm_min_pu32
1241#define _m_pmovmskb _mm_movemask_pi8
1242#define _m_pmovmskw _mm_movemask_pi16
1243#define _m_pmovmskd _mm_movemask_pi32
1244#define _m_pshufw _mm_shuffle_pi16
1245#define _m_pavgb _mm_avg_pu8
1246#define _m_pavgw _mm_avg_pu16
1247#define _m_pavg2b _mm_avg2_pu8
1248#define _m_pavg2w _mm_avg2_pu16
1249#define _m_psadbw _mm_sad_pu8
1250#define _m_psadwd _mm_sad_pu16
1251#define _m_psadzbw _mm_sadz_pu8
1252#define _m_psadzwd _mm_sadz_pu16
1253#define _m_paligniq _mm_align_si64
1254#define _m_cvt_si2pi _mm_cvtsi64_m64
1255#define _m_cvt_pi2si _mm_cvtm64_si64
1256
1257#endif /* _MMINTRIN_H_INCLUDED */
1258