xmmintrin.h revision 132718
1/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. 2 3 This file is part of GCC. 4 5 GCC is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2, or (at your option) 8 any later version. 9 10 GCC is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 GNU General Public License for more details. 14 15 You should have received a copy of the GNU General Public License 16 along with GCC; see the file COPYING. If not, write to 17 the Free Software Foundation, 59 Temple Place - Suite 330, 18 Boston, MA 02111-1307, USA. */ 19 20/* As a special exception, if you include this header file into source 21 files compiled by GCC, this header file does not by itself cause 22 the resulting executable to be covered by the GNU General Public 23 License. This exception does not however invalidate any other 24 reasons why the executable file might be covered by the GNU General 25 Public License. */ 26 27/* Implemented from the specification included in the Intel C++ Compiler 28 User Guide and Reference, version 8.0. */ 29 30#ifndef _XMMINTRIN_H_INCLUDED 31#define _XMMINTRIN_H_INCLUDED 32 33#ifndef __SSE__ 34# error "SSE instruction set not enabled" 35#else 36 37/* We need type definitions from the MMX header file. */ 38#include <mmintrin.h> 39 40/* The data type intended for user use. */ 41typedef int __m128 __attribute__ ((__mode__(__V4SF__))); 42 43/* Internal data types for implementing the intrinsics. */ 44typedef int __v4sf __attribute__ ((__mode__(__V4SF__))); 45 46/* Create a selector for use with the SHUFPS instruction. */ 47#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ 48 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) 49 50/* Constants for use with _mm_prefetch. */ 51enum _mm_hint 52{ 53 _MM_HINT_T0 = 3, 54 _MM_HINT_T1 = 2, 55 _MM_HINT_T2 = 1, 56 _MM_HINT_NTA = 0 57}; 58 59/* Bits in the MXCSR. */ 60#define _MM_EXCEPT_MASK 0x003f 61#define _MM_EXCEPT_INVALID 0x0001 62#define _MM_EXCEPT_DENORM 0x0002 63#define _MM_EXCEPT_DIV_ZERO 0x0004 64#define _MM_EXCEPT_OVERFLOW 0x0008 65#define _MM_EXCEPT_UNDERFLOW 0x0010 66#define _MM_EXCEPT_INEXACT 0x0020 67 68#define _MM_MASK_MASK 0x1f80 69#define _MM_MASK_INVALID 0x0080 70#define _MM_MASK_DENORM 0x0100 71#define _MM_MASK_DIV_ZERO 0x0200 72#define _MM_MASK_OVERFLOW 0x0400 73#define _MM_MASK_UNDERFLOW 0x0800 74#define _MM_MASK_INEXACT 0x1000 75 76#define _MM_ROUND_MASK 0x6000 77#define _MM_ROUND_NEAREST 0x0000 78#define _MM_ROUND_DOWN 0x2000 79#define _MM_ROUND_UP 0x4000 80#define _MM_ROUND_TOWARD_ZERO 0x6000 81 82#define _MM_FLUSH_ZERO_MASK 0x8000 83#define _MM_FLUSH_ZERO_ON 0x8000 84#define _MM_FLUSH_ZERO_OFF 0x0000 85 86/* Perform the respective operation on the lower SPFP (single-precision 87 floating-point) values of A and B; the upper three SPFP values are 88 passed through from A. */ 89 90static __inline __m128 91_mm_add_ss (__m128 __A, __m128 __B) 92{ 93 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B); 94} 95 96static __inline __m128 97_mm_sub_ss (__m128 __A, __m128 __B) 98{ 99 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B); 100} 101 102static __inline __m128 103_mm_mul_ss (__m128 __A, __m128 __B) 104{ 105 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B); 106} 107 108static __inline __m128 109_mm_div_ss (__m128 __A, __m128 __B) 110{ 111 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B); 112} 113 114static __inline __m128 115_mm_sqrt_ss (__m128 __A) 116{ 117 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A); 118} 119 120static __inline __m128 121_mm_rcp_ss (__m128 __A) 122{ 123 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A); 124} 125 126static __inline __m128 127_mm_rsqrt_ss (__m128 __A) 128{ 129 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A); 130} 131 132static __inline __m128 133_mm_min_ss (__m128 __A, __m128 __B) 134{ 135 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B); 136} 137 138static __inline __m128 139_mm_max_ss (__m128 __A, __m128 __B) 140{ 141 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B); 142} 143 144/* Perform the respective operation on the four SPFP values in A and B. */ 145 146static __inline __m128 147_mm_add_ps (__m128 __A, __m128 __B) 148{ 149 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B); 150} 151 152static __inline __m128 153_mm_sub_ps (__m128 __A, __m128 __B) 154{ 155 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B); 156} 157 158static __inline __m128 159_mm_mul_ps (__m128 __A, __m128 __B) 160{ 161 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B); 162} 163 164static __inline __m128 165_mm_div_ps (__m128 __A, __m128 __B) 166{ 167 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B); 168} 169 170static __inline __m128 171_mm_sqrt_ps (__m128 __A) 172{ 173 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A); 174} 175 176static __inline __m128 177_mm_rcp_ps (__m128 __A) 178{ 179 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A); 180} 181 182static __inline __m128 183_mm_rsqrt_ps (__m128 __A) 184{ 185 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A); 186} 187 188static __inline __m128 189_mm_min_ps (__m128 __A, __m128 __B) 190{ 191 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B); 192} 193 194static __inline __m128 195_mm_max_ps (__m128 __A, __m128 __B) 196{ 197 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B); 198} 199 200/* Perform logical bit-wise operations on 128-bit values. */ 201 202static __inline __m128 203_mm_and_ps (__m128 __A, __m128 __B) 204{ 205 return __builtin_ia32_andps (__A, __B); 206} 207 208static __inline __m128 209_mm_andnot_ps (__m128 __A, __m128 __B) 210{ 211 return __builtin_ia32_andnps (__A, __B); 212} 213 214static __inline __m128 215_mm_or_ps (__m128 __A, __m128 __B) 216{ 217 return __builtin_ia32_orps (__A, __B); 218} 219 220static __inline __m128 221_mm_xor_ps (__m128 __A, __m128 __B) 222{ 223 return __builtin_ia32_xorps (__A, __B); 224} 225 226/* Perform a comparison on the lower SPFP values of A and B. If the 227 comparison is true, place a mask of all ones in the result, otherwise a 228 mask of zeros. The upper three SPFP values are passed through from A. */ 229 230static __inline __m128 231_mm_cmpeq_ss (__m128 __A, __m128 __B) 232{ 233 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B); 234} 235 236static __inline __m128 237_mm_cmplt_ss (__m128 __A, __m128 __B) 238{ 239 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B); 240} 241 242static __inline __m128 243_mm_cmple_ss (__m128 __A, __m128 __B) 244{ 245 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B); 246} 247 248static __inline __m128 249_mm_cmpgt_ss (__m128 __A, __m128 __B) 250{ 251 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 252 (__v4sf) 253 __builtin_ia32_cmpltss ((__v4sf) __B, 254 (__v4sf) 255 __A)); 256} 257 258static __inline __m128 259_mm_cmpge_ss (__m128 __A, __m128 __B) 260{ 261 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 262 (__v4sf) 263 __builtin_ia32_cmpless ((__v4sf) __B, 264 (__v4sf) 265 __A)); 266} 267 268static __inline __m128 269_mm_cmpneq_ss (__m128 __A, __m128 __B) 270{ 271 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B); 272} 273 274static __inline __m128 275_mm_cmpnlt_ss (__m128 __A, __m128 __B) 276{ 277 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B); 278} 279 280static __inline __m128 281_mm_cmpnle_ss (__m128 __A, __m128 __B) 282{ 283 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B); 284} 285 286static __inline __m128 287_mm_cmpngt_ss (__m128 __A, __m128 __B) 288{ 289 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 290 (__v4sf) 291 __builtin_ia32_cmpnltss ((__v4sf) __B, 292 (__v4sf) 293 __A)); 294} 295 296static __inline __m128 297_mm_cmpnge_ss (__m128 __A, __m128 __B) 298{ 299 return (__m128) __builtin_ia32_movss ((__v4sf) __A, 300 (__v4sf) 301 __builtin_ia32_cmpnless ((__v4sf) __B, 302 (__v4sf) 303 __A)); 304} 305 306static __inline __m128 307_mm_cmpord_ss (__m128 __A, __m128 __B) 308{ 309 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B); 310} 311 312static __inline __m128 313_mm_cmpunord_ss (__m128 __A, __m128 __B) 314{ 315 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B); 316} 317 318/* Perform a comparison on the four SPFP values of A and B. For each 319 element, if the comparison is true, place a mask of all ones in the 320 result, otherwise a mask of zeros. */ 321 322static __inline __m128 323_mm_cmpeq_ps (__m128 __A, __m128 __B) 324{ 325 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B); 326} 327 328static __inline __m128 329_mm_cmplt_ps (__m128 __A, __m128 __B) 330{ 331 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B); 332} 333 334static __inline __m128 335_mm_cmple_ps (__m128 __A, __m128 __B) 336{ 337 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B); 338} 339 340static __inline __m128 341_mm_cmpgt_ps (__m128 __A, __m128 __B) 342{ 343 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B); 344} 345 346static __inline __m128 347_mm_cmpge_ps (__m128 __A, __m128 __B) 348{ 349 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B); 350} 351 352static __inline __m128 353_mm_cmpneq_ps (__m128 __A, __m128 __B) 354{ 355 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B); 356} 357 358static __inline __m128 359_mm_cmpnlt_ps (__m128 __A, __m128 __B) 360{ 361 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B); 362} 363 364static __inline __m128 365_mm_cmpnle_ps (__m128 __A, __m128 __B) 366{ 367 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B); 368} 369 370static __inline __m128 371_mm_cmpngt_ps (__m128 __A, __m128 __B) 372{ 373 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B); 374} 375 376static __inline __m128 377_mm_cmpnge_ps (__m128 __A, __m128 __B) 378{ 379 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B); 380} 381 382static __inline __m128 383_mm_cmpord_ps (__m128 __A, __m128 __B) 384{ 385 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B); 386} 387 388static __inline __m128 389_mm_cmpunord_ps (__m128 __A, __m128 __B) 390{ 391 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B); 392} 393 394/* Compare the lower SPFP values of A and B and return 1 if true 395 and 0 if false. */ 396 397static __inline int 398_mm_comieq_ss (__m128 __A, __m128 __B) 399{ 400 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B); 401} 402 403static __inline int 404_mm_comilt_ss (__m128 __A, __m128 __B) 405{ 406 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B); 407} 408 409static __inline int 410_mm_comile_ss (__m128 __A, __m128 __B) 411{ 412 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B); 413} 414 415static __inline int 416_mm_comigt_ss (__m128 __A, __m128 __B) 417{ 418 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B); 419} 420 421static __inline int 422_mm_comige_ss (__m128 __A, __m128 __B) 423{ 424 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B); 425} 426 427static __inline int 428_mm_comineq_ss (__m128 __A, __m128 __B) 429{ 430 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B); 431} 432 433static __inline int 434_mm_ucomieq_ss (__m128 __A, __m128 __B) 435{ 436 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B); 437} 438 439static __inline int 440_mm_ucomilt_ss (__m128 __A, __m128 __B) 441{ 442 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B); 443} 444 445static __inline int 446_mm_ucomile_ss (__m128 __A, __m128 __B) 447{ 448 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B); 449} 450 451static __inline int 452_mm_ucomigt_ss (__m128 __A, __m128 __B) 453{ 454 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B); 455} 456 457static __inline int 458_mm_ucomige_ss (__m128 __A, __m128 __B) 459{ 460 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B); 461} 462 463static __inline int 464_mm_ucomineq_ss (__m128 __A, __m128 __B) 465{ 466 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B); 467} 468 469/* Convert the lower SPFP value to a 32-bit integer according to the current 470 rounding mode. */ 471static __inline int 472_mm_cvtss_si32 (__m128 __A) 473{ 474 return __builtin_ia32_cvtss2si ((__v4sf) __A); 475} 476 477static __inline int 478_mm_cvt_ss2si (__m128 __A) 479{ 480 return _mm_cvtss_si32 (__A); 481} 482 483#ifdef __x86_64__ 484/* Convert the lower SPFP value to a 32-bit integer according to the current 485 rounding mode. */ 486static __inline long long 487_mm_cvtss_si64x (__m128 __A) 488{ 489 return __builtin_ia32_cvtss2si64 ((__v4sf) __A); 490} 491#endif 492 493/* Convert the two lower SPFP values to 32-bit integers according to the 494 current rounding mode. Return the integers in packed form. */ 495static __inline __m64 496_mm_cvtps_pi32 (__m128 __A) 497{ 498 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A); 499} 500 501static __inline __m64 502_mm_cvt_ps2pi (__m128 __A) 503{ 504 return _mm_cvtps_pi32 (__A); 505} 506 507/* Truncate the lower SPFP value to a 32-bit integer. */ 508static __inline int 509_mm_cvttss_si32 (__m128 __A) 510{ 511 return __builtin_ia32_cvttss2si ((__v4sf) __A); 512} 513 514static __inline int 515_mm_cvtt_ss2si (__m128 __A) 516{ 517 return _mm_cvttss_si32 (__A); 518} 519 520#ifdef __x86_64__ 521/* Truncate the lower SPFP value to a 32-bit integer. */ 522static __inline long long 523_mm_cvttss_si64x (__m128 __A) 524{ 525 return __builtin_ia32_cvttss2si64 ((__v4sf) __A); 526} 527#endif 528 529/* Truncate the two lower SPFP values to 32-bit integers. Return the 530 integers in packed form. */ 531static __inline __m64 532_mm_cvttps_pi32 (__m128 __A) 533{ 534 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A); 535} 536 537static __inline __m64 538_mm_cvtt_ps2pi (__m128 __A) 539{ 540 return _mm_cvttps_pi32 (__A); 541} 542 543/* Convert B to a SPFP value and insert it as element zero in A. */ 544static __inline __m128 545_mm_cvtsi32_ss (__m128 __A, int __B) 546{ 547 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B); 548} 549 550static __inline __m128 551_mm_cvt_si2ss (__m128 __A, int __B) 552{ 553 return _mm_cvtsi32_ss (__A, __B); 554} 555 556#ifdef __x86_64__ 557/* Convert B to a SPFP value and insert it as element zero in A. */ 558static __inline __m128 559_mm_cvtsi64x_ss (__m128 __A, long long __B) 560{ 561 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B); 562} 563#endif 564 565/* Convert the two 32-bit values in B to SPFP form and insert them 566 as the two lower elements in A. */ 567static __inline __m128 568_mm_cvtpi32_ps (__m128 __A, __m64 __B) 569{ 570 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B); 571} 572 573static __inline __m128 574_mm_cvt_pi2ps (__m128 __A, __m64 __B) 575{ 576 return _mm_cvtpi32_ps (__A, __B); 577} 578 579/* Convert the four signed 16-bit values in A to SPFP form. */ 580static __inline __m128 581_mm_cvtpi16_ps (__m64 __A) 582{ 583 __v4hi __sign; 584 __v2si __hisi, __losi; 585 __v4sf __r; 586 587 /* This comparison against zero gives us a mask that can be used to 588 fill in the missing sign bits in the unpack operations below, so 589 that we get signed values after unpacking. */ 590 __sign = (__v4hi) __builtin_ia32_mmx_zero (); 591 __sign = __builtin_ia32_pcmpgtw (__sign, (__v4hi)__A); 592 593 /* Convert the four words to doublewords. */ 594 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign); 595 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign); 596 597 /* Convert the doublewords to floating point two at a time. */ 598 __r = (__v4sf) __builtin_ia32_setzerops (); 599 __r = __builtin_ia32_cvtpi2ps (__r, __hisi); 600 __r = __builtin_ia32_movlhps (__r, __r); 601 __r = __builtin_ia32_cvtpi2ps (__r, __losi); 602 603 return (__m128) __r; 604} 605 606/* Convert the four unsigned 16-bit values in A to SPFP form. */ 607static __inline __m128 608_mm_cvtpu16_ps (__m64 __A) 609{ 610 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); 611 __v2si __hisi, __losi; 612 __v4sf __r; 613 614 /* Convert the four words to doublewords. */ 615 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __zero); 616 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __zero); 617 618 /* Convert the doublewords to floating point two at a time. */ 619 __r = (__v4sf) __builtin_ia32_setzerops (); 620 __r = __builtin_ia32_cvtpi2ps (__r, __hisi); 621 __r = __builtin_ia32_movlhps (__r, __r); 622 __r = __builtin_ia32_cvtpi2ps (__r, __losi); 623 624 return (__m128) __r; 625} 626 627/* Convert the low four signed 8-bit values in A to SPFP form. */ 628static __inline __m128 629_mm_cvtpi8_ps (__m64 __A) 630{ 631 __v8qi __sign; 632 633 /* This comparison against zero gives us a mask that can be used to 634 fill in the missing sign bits in the unpack operations below, so 635 that we get signed values after unpacking. */ 636 __sign = (__v8qi) __builtin_ia32_mmx_zero (); 637 __sign = __builtin_ia32_pcmpgtb (__sign, (__v8qi)__A); 638 639 /* Convert the four low bytes to words. */ 640 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign); 641 642 return _mm_cvtpi16_ps(__A); 643} 644 645/* Convert the low four unsigned 8-bit values in A to SPFP form. */ 646static __inline __m128 647_mm_cvtpu8_ps(__m64 __A) 648{ 649 __v8qi __zero = (__v8qi) __builtin_ia32_mmx_zero (); 650 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __zero); 651 return _mm_cvtpu16_ps(__A); 652} 653 654/* Convert the four signed 32-bit values in A and B to SPFP form. */ 655static __inline __m128 656_mm_cvtpi32x2_ps(__m64 __A, __m64 __B) 657{ 658 __v4sf __zero = (__v4sf) __builtin_ia32_setzerops (); 659 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A); 660 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B); 661 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb); 662} 663 664/* Convert the four SPFP values in A to four signed 16-bit integers. */ 665static __inline __m64 666_mm_cvtps_pi16(__m128 __A) 667{ 668 __v4sf __hisf = (__v4sf)__A; 669 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf); 670 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf); 671 __v2si __losi = __builtin_ia32_cvtps2pi (__losf); 672 return (__m64) __builtin_ia32_packssdw (__hisi, __losi); 673} 674 675/* Convert the four SPFP values in A to four signed 8-bit integers. */ 676static __inline __m64 677_mm_cvtps_pi8(__m128 __A) 678{ 679 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A); 680 __v4hi __zero = (__v4hi) __builtin_ia32_mmx_zero (); 681 return (__m64) __builtin_ia32_packsswb (__tmp, __zero); 682} 683 684/* Selects four specific SPFP values from A and B based on MASK. */ 685#if 0 686static __inline __m128 687_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask) 688{ 689 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask); 690} 691#else 692#define _mm_shuffle_ps(A, B, MASK) \ 693 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK))) 694#endif 695 696 697/* Selects and interleaves the upper two SPFP values from A and B. */ 698static __inline __m128 699_mm_unpackhi_ps (__m128 __A, __m128 __B) 700{ 701 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B); 702} 703 704/* Selects and interleaves the lower two SPFP values from A and B. */ 705static __inline __m128 706_mm_unpacklo_ps (__m128 __A, __m128 __B) 707{ 708 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B); 709} 710 711/* Sets the upper two SPFP values with 64-bits of data loaded from P; 712 the lower two values are passed through from A. */ 713static __inline __m128 714_mm_loadh_pi (__m128 __A, __m64 const *__P) 715{ 716 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P); 717} 718 719/* Stores the upper two SPFP values of A into P. */ 720static __inline void 721_mm_storeh_pi (__m64 *__P, __m128 __A) 722{ 723 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A); 724} 725 726/* Moves the upper two values of B into the lower two values of A. */ 727static __inline __m128 728_mm_movehl_ps (__m128 __A, __m128 __B) 729{ 730 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B); 731} 732 733/* Moves the lower two values of B into the upper two values of A. */ 734static __inline __m128 735_mm_movelh_ps (__m128 __A, __m128 __B) 736{ 737 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B); 738} 739 740/* Sets the lower two SPFP values with 64-bits of data loaded from P; 741 the upper two values are passed through from A. */ 742static __inline __m128 743_mm_loadl_pi (__m128 __A, __m64 const *__P) 744{ 745 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P); 746} 747 748/* Stores the lower two SPFP values of A into P. */ 749static __inline void 750_mm_storel_pi (__m64 *__P, __m128 __A) 751{ 752 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A); 753} 754 755/* Creates a 4-bit mask from the most significant bits of the SPFP values. */ 756static __inline int 757_mm_movemask_ps (__m128 __A) 758{ 759 return __builtin_ia32_movmskps ((__v4sf)__A); 760} 761 762/* Return the contents of the control register. */ 763static __inline unsigned int 764_mm_getcsr (void) 765{ 766 return __builtin_ia32_stmxcsr (); 767} 768 769/* Read exception bits from the control register. */ 770static __inline unsigned int 771_MM_GET_EXCEPTION_STATE (void) 772{ 773 return _mm_getcsr() & _MM_EXCEPT_MASK; 774} 775 776static __inline unsigned int 777_MM_GET_EXCEPTION_MASK (void) 778{ 779 return _mm_getcsr() & _MM_MASK_MASK; 780} 781 782static __inline unsigned int 783_MM_GET_ROUNDING_MODE (void) 784{ 785 return _mm_getcsr() & _MM_ROUND_MASK; 786} 787 788static __inline unsigned int 789_MM_GET_FLUSH_ZERO_MODE (void) 790{ 791 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK; 792} 793 794/* Set the control register to I. */ 795static __inline void 796_mm_setcsr (unsigned int __I) 797{ 798 __builtin_ia32_ldmxcsr (__I); 799} 800 801/* Set exception bits in the control register. */ 802static __inline void 803_MM_SET_EXCEPTION_STATE(unsigned int __mask) 804{ 805 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask); 806} 807 808static __inline void 809_MM_SET_EXCEPTION_MASK (unsigned int __mask) 810{ 811 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask); 812} 813 814static __inline void 815_MM_SET_ROUNDING_MODE (unsigned int __mode) 816{ 817 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode); 818} 819 820static __inline void 821_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode) 822{ 823 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode); 824} 825 826/* Create a vector with element 0 as *P and the rest zero. */ 827static __inline __m128 828_mm_load_ss (float const *__P) 829{ 830 return (__m128) __builtin_ia32_loadss (__P); 831} 832 833/* Create a vector with all four elements equal to *P. */ 834static __inline __m128 835_mm_load1_ps (float const *__P) 836{ 837 __v4sf __tmp = __builtin_ia32_loadss (__P); 838 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); 839} 840 841static __inline __m128 842_mm_load_ps1 (float const *__P) 843{ 844 return _mm_load1_ps (__P); 845} 846 847/* Load four SPFP values from P. The address must be 16-byte aligned. */ 848static __inline __m128 849_mm_load_ps (float const *__P) 850{ 851 return (__m128) __builtin_ia32_loadaps (__P); 852} 853 854/* Load four SPFP values from P. The address need not be 16-byte aligned. */ 855static __inline __m128 856_mm_loadu_ps (float const *__P) 857{ 858 return (__m128) __builtin_ia32_loadups (__P); 859} 860 861/* Load four SPFP values in reverse order. The address must be aligned. */ 862static __inline __m128 863_mm_loadr_ps (float const *__P) 864{ 865 __v4sf __tmp = __builtin_ia32_loadaps (__P); 866 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3)); 867} 868 869/* Create a vector with element 0 as F and the rest zero. */ 870static __inline __m128 871_mm_set_ss (float __F) 872{ 873 return (__m128) __builtin_ia32_loadss (&__F); 874} 875 876/* Create a vector with all four elements equal to F. */ 877static __inline __m128 878_mm_set1_ps (float __F) 879{ 880 __v4sf __tmp = __builtin_ia32_loadss (&__F); 881 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,0,0,0)); 882} 883 884static __inline __m128 885_mm_set_ps1 (float __F) 886{ 887 return _mm_set1_ps (__F); 888} 889 890/* Create the vector [Z Y X W]. */ 891static __inline __m128 892_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W) 893{ 894 return (__v4sf) {__W, __X, __Y, __Z}; 895} 896 897/* Create the vector [W X Y Z]. */ 898static __inline __m128 899_mm_setr_ps (float __Z, float __Y, float __X, float __W) 900{ 901 return _mm_set_ps (__W, __X, __Y, __Z); 902} 903 904/* Create a vector of zeros. */ 905static __inline __m128 906_mm_setzero_ps (void) 907{ 908 return (__m128) __builtin_ia32_setzerops (); 909} 910 911/* Stores the lower SPFP value. */ 912static __inline void 913_mm_store_ss (float *__P, __m128 __A) 914{ 915 __builtin_ia32_storess (__P, (__v4sf)__A); 916} 917 918/* Store the lower SPFP value across four words. */ 919static __inline void 920_mm_store1_ps (float *__P, __m128 __A) 921{ 922 __v4sf __va = (__v4sf)__A; 923 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0)); 924 __builtin_ia32_storeaps (__P, __tmp); 925} 926 927static __inline void 928_mm_store_ps1 (float *__P, __m128 __A) 929{ 930 _mm_store1_ps (__P, __A); 931} 932 933/* Store four SPFP values. The address must be 16-byte aligned. */ 934static __inline void 935_mm_store_ps (float *__P, __m128 __A) 936{ 937 __builtin_ia32_storeaps (__P, (__v4sf)__A); 938} 939 940/* Store four SPFP values. The address need not be 16-byte aligned. */ 941static __inline void 942_mm_storeu_ps (float *__P, __m128 __A) 943{ 944 __builtin_ia32_storeups (__P, (__v4sf)__A); 945} 946 947/* Store four SPFP values in reverse order. The address must be aligned. */ 948static __inline void 949_mm_storer_ps (float *__P, __m128 __A) 950{ 951 __v4sf __va = (__v4sf)__A; 952 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3)); 953 __builtin_ia32_storeaps (__P, __tmp); 954} 955 956/* Sets the low SPFP value of A from the low value of B. */ 957static __inline __m128 958_mm_move_ss (__m128 __A, __m128 __B) 959{ 960 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B); 961} 962 963/* Extracts one of the four words of A. The selector N must be immediate. */ 964#if 0 965static __inline int 966_mm_extract_pi16 (__m64 __A, int __N) 967{ 968 return __builtin_ia32_pextrw ((__v4hi)__A, __N); 969} 970 971static __inline int 972_m_pextrw (__m64 __A, int __N) 973{ 974 return _mm_extract_pi16 (__A, __N); 975} 976#else 977#define _mm_extract_pi16(A, N) \ 978 __builtin_ia32_pextrw ((__v4hi)(A), (N)) 979#define _m_pextrw(A, N) _mm_extract_pi16((A), (N)) 980#endif 981 982/* Inserts word D into one of four words of A. The selector N must be 983 immediate. */ 984#if 0 985static __inline __m64 986_mm_insert_pi16 (__m64 __A, int __D, int __N) 987{ 988 return (__m64)__builtin_ia32_pinsrw ((__v4hi)__A, __D, __N); 989} 990 991static __inline __m64 992_m_pinsrw (__m64 __A, int __D, int __N) 993{ 994 return _mm_insert_pi16 (__A, __D, __N); 995} 996#else 997#define _mm_insert_pi16(A, D, N) \ 998 ((__m64) __builtin_ia32_pinsrw ((__v4hi)(A), (D), (N))) 999#define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N)) 1000#endif 1001 1002/* Compute the element-wise maximum of signed 16-bit values. */ 1003static __inline __m64 1004_mm_max_pi16 (__m64 __A, __m64 __B) 1005{ 1006 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B); 1007} 1008 1009static __inline __m64 1010_m_pmaxsw (__m64 __A, __m64 __B) 1011{ 1012 return _mm_max_pi16 (__A, __B); 1013} 1014 1015/* Compute the element-wise maximum of unsigned 8-bit values. */ 1016static __inline __m64 1017_mm_max_pu8 (__m64 __A, __m64 __B) 1018{ 1019 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B); 1020} 1021 1022static __inline __m64 1023_m_pmaxub (__m64 __A, __m64 __B) 1024{ 1025 return _mm_max_pu8 (__A, __B); 1026} 1027 1028/* Compute the element-wise minimum of signed 16-bit values. */ 1029static __inline __m64 1030_mm_min_pi16 (__m64 __A, __m64 __B) 1031{ 1032 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B); 1033} 1034 1035static __inline __m64 1036_m_pminsw (__m64 __A, __m64 __B) 1037{ 1038 return _mm_min_pi16 (__A, __B); 1039} 1040 1041/* Compute the element-wise minimum of unsigned 8-bit values. */ 1042static __inline __m64 1043_mm_min_pu8 (__m64 __A, __m64 __B) 1044{ 1045 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B); 1046} 1047 1048static __inline __m64 1049_m_pminub (__m64 __A, __m64 __B) 1050{ 1051 return _mm_min_pu8 (__A, __B); 1052} 1053 1054/* Create an 8-bit mask of the signs of 8-bit values. */ 1055static __inline int 1056_mm_movemask_pi8 (__m64 __A) 1057{ 1058 return __builtin_ia32_pmovmskb ((__v8qi)__A); 1059} 1060 1061static __inline int 1062_m_pmovmskb (__m64 __A) 1063{ 1064 return _mm_movemask_pi8 (__A); 1065} 1066 1067/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values 1068 in B and produce the high 16 bits of the 32-bit results. */ 1069static __inline __m64 1070_mm_mulhi_pu16 (__m64 __A, __m64 __B) 1071{ 1072 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B); 1073} 1074 1075static __inline __m64 1076_m_pmulhuw (__m64 __A, __m64 __B) 1077{ 1078 return _mm_mulhi_pu16 (__A, __B); 1079} 1080 1081/* Return a combination of the four 16-bit values in A. The selector 1082 must be an immediate. */ 1083#if 0 1084static __inline __m64 1085_mm_shuffle_pi16 (__m64 __A, int __N) 1086{ 1087 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N); 1088} 1089 1090static __inline __m64 1091_m_pshufw (__m64 __A, int __N) 1092{ 1093 return _mm_shuffle_pi16 (__A, __N); 1094} 1095#else 1096#define _mm_shuffle_pi16(A, N) \ 1097 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N))) 1098#define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N)) 1099#endif 1100 1101/* Conditionally store byte elements of A into P. The high bit of each 1102 byte in the selector N determines whether the corresponding byte from 1103 A is stored. */ 1104static __inline void 1105_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P) 1106{ 1107 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P); 1108} 1109 1110static __inline void 1111_m_maskmovq (__m64 __A, __m64 __N, char *__P) 1112{ 1113 _mm_maskmove_si64 (__A, __N, __P); 1114} 1115 1116/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ 1117static __inline __m64 1118_mm_avg_pu8 (__m64 __A, __m64 __B) 1119{ 1120 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B); 1121} 1122 1123static __inline __m64 1124_m_pavgb (__m64 __A, __m64 __B) 1125{ 1126 return _mm_avg_pu8 (__A, __B); 1127} 1128 1129/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ 1130static __inline __m64 1131_mm_avg_pu16 (__m64 __A, __m64 __B) 1132{ 1133 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B); 1134} 1135 1136static __inline __m64 1137_m_pavgw (__m64 __A, __m64 __B) 1138{ 1139 return _mm_avg_pu16 (__A, __B); 1140} 1141 1142/* Compute the sum of the absolute differences of the unsigned 8-bit 1143 values in A and B. Return the value in the lower 16-bit word; the 1144 upper words are cleared. */ 1145static __inline __m64 1146_mm_sad_pu8 (__m64 __A, __m64 __B) 1147{ 1148 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B); 1149} 1150 1151static __inline __m64 1152_m_psadbw (__m64 __A, __m64 __B) 1153{ 1154 return _mm_sad_pu8 (__A, __B); 1155} 1156 1157/* Loads one cache line from address P to a location "closer" to the 1158 processor. The selector I specifies the type of prefetch operation. */ 1159#if 0 1160static __inline void 1161_mm_prefetch (void *__P, enum _mm_hint __I) 1162{ 1163 __builtin_prefetch (__P, 0, __I); 1164} 1165#else 1166#define _mm_prefetch(P, I) \ 1167 __builtin_prefetch ((P), 0, (I)) 1168#endif 1169 1170/* Stores the data in A to the address P without polluting the caches. */ 1171static __inline void 1172_mm_stream_pi (__m64 *__P, __m64 __A) 1173{ 1174 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A); 1175} 1176 1177/* Likewise. The address must be 16-byte aligned. */ 1178static __inline void 1179_mm_stream_ps (float *__P, __m128 __A) 1180{ 1181 __builtin_ia32_movntps (__P, (__v4sf)__A); 1182} 1183 1184/* Guarantees that every preceding store is globally visible before 1185 any subsequent store. */ 1186static __inline void 1187_mm_sfence (void) 1188{ 1189 __builtin_ia32_sfence (); 1190} 1191 1192/* The execution of the next instruction is delayed by an implementation 1193 specific amount of time. The instruction does not modify the 1194 architectural state. */ 1195static __inline void 1196_mm_pause (void) 1197{ 1198 __asm__ __volatile__ ("rep; nop" : : ); 1199} 1200 1201/* Transpose the 4x4 matrix composed of row[0-3]. */ 1202#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ 1203do { \ 1204 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \ 1205 __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \ 1206 __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \ 1207 __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \ 1208 __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \ 1209 (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \ 1210 (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \ 1211 (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \ 1212 (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \ 1213} while (0) 1214 1215/* For backward source compatibility. */ 1216#include <emmintrin.h> 1217 1218#endif /* __SSE__ */ 1219#endif /* _XMMINTRIN_H_INCLUDED */ 1220