Lines Matching defs:__m128d

17    Since X86 SSE2 intrinsics mainly handles __m128i and __m128d type,
61 typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
73 extern __inline __m128d
76 return __extension__(__m128d){__F, 0.0};
80 extern __inline __m128d
83 return __extension__(__m128d){__F, __F};
86 extern __inline __m128d
93 extern __inline __m128d
96 return __extension__(__m128d){__X, __W};
100 extern __inline __m128d
103 return __extension__(__m128d){__W, __X};
107 extern __inline __m128d
110 __m128d __Y = __Y;
115 extern __inline __m128d
118 return (__m128d)vec_splats(0);
122 extern __inline __m128d
124 _mm_move_sd(__m128d __A, __m128d __B) {
127 return (__m128d)__result;
131 extern __inline __m128d
134 return ((__m128d)vec_ld(0, (__v16qu *)__P));
138 extern __inline __m128d
145 extern __inline __m128d
152 extern __inline __m128d
158 extern __inline __m128d
165 extern __inline __m128d
169 return (__m128d)vec_xxpermdi(__tmp, __tmp, 2);
175 _mm_store_pd(double *__P, __m128d __A) {
182 _mm_storeu_pd(double *__P, __m128d __A) {
189 _mm_store_sd(double *__P, __m128d __A) {
195 _mm_cvtsd_f64(__m128d __A) {
201 _mm_storel_pd(double *__P, __m128d __A) {
208 _mm_storeh_pd(double *__P, __m128d __A) {
215 _mm_store1_pd(double *__P, __m128d __A) {
221 _mm_store_pd1(double *__P, __m128d __A) {
228 _mm_storer_pd(double *__P, __m128d __A) {
246 extern __inline __m128d
248 _mm_add_pd(__m128d __A, __m128d __B) {
249 return (__m128d)((__v2df)__A + (__v2df)__B);
255 extern __inline __m128d
257 _mm_add_sd(__m128d __A, __m128d __B) {
262 extern __inline __m128d
264 _mm_sub_pd(__m128d __A, __m128d __B) {
265 return (__m128d)((__v2df)__A - (__v2df)__B);
268 extern __inline __m128d
270 _mm_sub_sd(__m128d __A, __m128d __B) {
275 extern __inline __m128d
277 _mm_mul_pd(__m128d __A, __m128d __B) {
278 return (__m128d)((__v2df)__A * (__v2df)__B);
281 extern __inline __m128d
283 _mm_mul_sd(__m128d __A, __m128d __B) {
288 extern __inline __m128d
290 _mm_div_pd(__m128d __A, __m128d __B) {
291 return (__m128d)((__v2df)__A / (__v2df)__B);
294 extern __inline __m128d
296 _mm_div_sd(__m128d __A, __m128d __B) {
301 extern __inline __m128d
303 _mm_sqrt_pd(__m128d __A) {
308 extern __inline __m128d
310 _mm_sqrt_sd(__m128d __A, __m128d __B) {
313 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
316 extern __inline __m128d
318 _mm_min_pd(__m128d __A, __m128d __B) {
322 extern __inline __m128d
324 _mm_min_sd(__m128d __A, __m128d __B) {
329 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
332 extern __inline __m128d
334 _mm_max_pd(__m128d __A, __m128d __B) {
338 extern __inline __m128d
340 _mm_max_sd(__m128d __A, __m128d __B) {
345 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
348 extern __inline __m128d
350 _mm_cmpeq_pd(__m128d __A, __m128d __B) {
351 return ((__m128d)vec_cmpeq((__v2df)__A, (__v2df)__B));
354 extern __inline __m128d
356 _mm_cmplt_pd(__m128d __A, __m128d __B) {
357 return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
360 extern __inline __m128d
362 _mm_cmple_pd(__m128d __A, __m128d __B) {
363 return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
366 extern __inline __m128d
368 _mm_cmpgt_pd(__m128d __A, __m128d __B) {
369 return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
372 extern __inline __m128d
374 _mm_cmpge_pd(__m128d __A, __m128d __B) {
375 return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
378 extern __inline __m128d
380 _mm_cmpneq_pd(__m128d __A, __m128d __B) {
382 return ((__m128d)vec_nor(__temp, __temp));
385 extern __inline __m128d
387 _mm_cmpnlt_pd(__m128d __A, __m128d __B) {
388 return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
391 extern __inline __m128d
393 _mm_cmpnle_pd(__m128d __A, __m128d __B) {
394 return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
397 extern __inline __m128d
399 _mm_cmpngt_pd(__m128d __A, __m128d __B) {
400 return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
403 extern __inline __m128d
405 _mm_cmpnge_pd(__m128d __A, __m128d __B) {
406 return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
409 extern __inline __m128d
411 _mm_cmpord_pd(__m128d __A, __m128d __B) {
417 return ((__m128d)vec_and(__c, __d));
420 extern __inline __m128d
422 _mm_cmpunord_pd(__m128d __A, __m128d __B) {
431 return ((__m128d)vec_orc(__c, __d));
440 return ((__m128d)vec_or(__c, __d));
444 extern __inline __m128d
446 _mm_cmpeq_sd(__m128d __A, __m128d __B) {
457 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
460 extern __inline __m128d
462 _mm_cmplt_sd(__m128d __A, __m128d __B) {
467 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
470 extern __inline __m128d
472 _mm_cmple_sd(__m128d __A, __m128d __B) {
477 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
480 extern __inline __m128d
482 _mm_cmpgt_sd(__m128d __A, __m128d __B) {
487 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
490 extern __inline __m128d
492 _mm_cmpge_sd(__m128d __A, __m128d __B) {
497 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
500 extern __inline __m128d
502 _mm_cmpneq_sd(__m128d __A, __m128d __B) {
508 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
511 extern __inline __m128d
513 _mm_cmpnlt_sd(__m128d __A, __m128d __B) {
519 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
522 extern __inline __m128d
524 _mm_cmpnle_sd(__m128d __A, __m128d __B) {
530 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
533 extern __inline __m128d
535 _mm_cmpngt_sd(__m128d __A, __m128d __B) {
541 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
544 extern __inline __m128d
546 _mm_cmpnge_sd(__m128d __A, __m128d __B) {
552 return (__m128d)_mm_setr_pd(__c[0], __A[1]);
555 extern __inline __m128d
557 _mm_cmpord_sd(__m128d __A, __m128d __B) {
560 return (__m128d)_mm_setr_pd(__r[0], ((__v2df)__A)[1]);
563 extern __inline __m128d
565 _mm_cmpunord_sd(__m128d __A, __m128d __B) {
568 return (__m128d)_mm_setr_pd(__r[0], __A[1]);
580 _mm_comieq_sd(__m128d __A, __m128d __B) {
586 _mm_comilt_sd(__m128d __A, __m128d __B) {
592 _mm_comile_sd(__m128d __A, __m128d __B) {
598 _mm_comigt_sd(__m128d __A, __m128d __B) {
604 _mm_comige_sd(__m128d __A, __m128d __B) {
610 _mm_comineq_sd(__m128d __A, __m128d __B) {
616 _mm_ucomieq_sd(__m128d __A, __m128d __B) {
622 _mm_ucomilt_sd(__m128d __A, __m128d __B) {
628 _mm_ucomile_sd(__m128d __A, __m128d __B) {
634 _mm_ucomigt_sd(__m128d __A, __m128d __B) {
640 _mm_ucomige_sd(__m128d __A, __m128d __B) {
646 _mm_ucomineq_sd(__m128d __A, __m128d __B) {
822 extern __inline __m128d
830 return (__m128d)vec_ctf(__val, 0);
842 _mm_cvtpd_epi32(__m128d __A) {
871 _mm_cvtpd_pi32(__m128d __A) {
879 _mm_cvtpd_ps(__m128d __A) {
906 _mm_cvttpd_epi32(__m128d __A) {
936 _mm_cvttpd_pi32(__m128d __A) {
949 extern __inline __m128d
959 return (__m128d)__result;
983 extern __inline __m128d
988 return (__m128d)vec_doubleh((__v4sf)__A);
1010 return (__m128d)__result;
1016 _mm_cvtsd_si32(__m128d __A) {
1025 _mm_cvtsd_si64(__m128d __A) {
1035 _mm_cvtsd_si64x(__m128d __A) {
1041 _mm_cvttsd_si32(__m128d __A) {
1050 _mm_cvttsd_si64(__m128d __A) {
1059 _mm_cvttsd_si64x(__m128d __A) {
1065 _mm_cvtsd_ss(__m128 __A, __m128d __B) {
1085 extern __inline __m128d
1087 _mm_cvtsi32_sd(__m128d __A, int __B) {
1091 return (__m128d)__result;
1095 extern __inline __m128d
1097 _mm_cvtsi64_sd(__m128d __A, long long __B) {
1101 return (__m128d)__result;
1105 extern __inline __m128d
1107 _mm_cvtsi64x_sd(__m128d __A, long long __B) {
1111 extern __inline __m128d
1113 _mm_cvtss_sd(__m128d __A, __m128 __B) {
1120 return (__m128d)vec_mergel(__res, (__v2df)__A);
1124 return (__m128d)__res;
1128 extern __inline __m128d
1130 _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) {
1153 extern __inline __m128d
1155 _mm_unpackhi_pd(__m128d __A, __m128d __B) {
1156 return (__m128d)vec_mergel((__v2df)__A, (__v2df)__B);
1159 extern __inline __m128d
1161 _mm_unpacklo_pd(__m128d __A, __m128d __B) {
1162 return (__m128d)vec_mergeh((__v2df)__A, (__v2df)__B);
1165 extern __inline __m128d
1167 _mm_loadh_pd(__m128d __A, double const *__B) {
1170 return (__m128d)__result;
1173 extern __inline __m128d
1175 _mm_loadl_pd(__m128d __A, double const *__B) {
1178 return (__m128d)__result;
1187 _mm_movemask_pd(__m128d __A) {
1812 extern __inline __m128d
1814 _mm_and_pd(__m128d __A, __m128d __B) {
1818 extern __inline __m128d
1820 _mm_andnot_pd(__m128d __A, __m128d __B) {
1824 extern __inline __m128d
1826 _mm_or_pd(__m128d __A, __m128d __B) {
1830 extern __inline __m128d
1832 _mm_xor_pd(__m128d __A, __m128d __B) {
2179 _mm_stream_pd(double *__A, __m128d __B) {
2182 *(__m128d *)__A = __B;
2229 _mm_castpd_ps(__m128d __A) {
2235 _mm_castpd_si128(__m128d __A) {
2239 extern __inline __m128d
2242 return (__m128d)__A;
2257 extern __inline __m128d
2260 return (__m128d)__A;