fma4intrin.h revision 1.1.1.1
1/* Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
2
3   This file is part of GCC.
4
5   GCC is free software; you can redistribute it and/or modify
6   it under the terms of the GNU General Public License as published by
7   the Free Software Foundation; either version 3, or (at your option)
8   any later version.
9
10   GCC is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13   GNU General Public License for more details.
14
15   Under Section 7 of GPL version 3, you are granted additional
16   permissions described in the GCC Runtime Library Exception, version
17   3.1, as published by the Free Software Foundation.
18
19   You should have received a copy of the GNU General Public License and
20   a copy of the GCC Runtime Library Exception along with this program;
21   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22   <http://www.gnu.org/licenses/>.  */
23
24#ifndef _X86INTRIN_H_INCLUDED
25# error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
26#endif
27
28#ifndef _FMA4INTRIN_H_INCLUDED
29#define _FMA4INTRIN_H_INCLUDED
30
31#ifndef __FMA4__
32# error "FMA4 instruction set not enabled"
33#else
34
35/* We need definitions from the SSE4A, SSE3, SSE2 and SSE header files.  */
36#include <ammintrin.h>
37
38/* 128b Floating point multiply/add type instructions.  */
39extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
40_mm_macc_ps (__m128 __A, __m128 __B, __m128 __C)
41{
42  return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
43}
44
45extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
46_mm_macc_pd (__m128d __A, __m128d __B, __m128d __C)
47{
48  return (__m128d) __builtin_ia32_vfmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
49}
50
51extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52_mm_macc_ss (__m128 __A, __m128 __B, __m128 __C)
53{
54  return (__m128) __builtin_ia32_vfmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
55}
56
57extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
58_mm_macc_sd (__m128d __A, __m128d __B, __m128d __C)
59{
60  return (__m128d) __builtin_ia32_vfmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
61}
62
63extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
64_mm_msub_ps (__m128 __A, __m128 __B, __m128 __C)
65
66{
67  return (__m128) __builtin_ia32_vfmsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
68}
69
70extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
71_mm_msub_pd (__m128d __A, __m128d __B, __m128d __C)
72{
73  return (__m128d) __builtin_ia32_vfmsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
74}
75
76extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
77_mm_msub_ss (__m128 __A, __m128 __B, __m128 __C)
78{
79  return (__m128) __builtin_ia32_vfmsubss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
80}
81
82extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
83_mm_msub_sd (__m128d __A, __m128d __B, __m128d __C)
84{
85  return (__m128d) __builtin_ia32_vfmsubsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
86}
87
88extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
89_mm_nmacc_ps (__m128 __A, __m128 __B, __m128 __C)
90{
91  return (__m128) __builtin_ia32_vfnmaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
92}
93
94extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
95_mm_nmacc_pd (__m128d __A, __m128d __B, __m128d __C)
96{
97  return (__m128d) __builtin_ia32_vfnmaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
98}
99
100extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
101_mm_nmacc_ss (__m128 __A, __m128 __B, __m128 __C)
102{
103  return (__m128) __builtin_ia32_vfnmaddss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
104}
105
106extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
107_mm_nmacc_sd (__m128d __A, __m128d __B, __m128d __C)
108{
109  return (__m128d) __builtin_ia32_vfnmaddsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
110}
111
112extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
113_mm_nmsub_ps (__m128 __A, __m128 __B, __m128 __C)
114{
115  return (__m128) __builtin_ia32_vfnmsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
116}
117
118extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
119_mm_nmsub_pd (__m128d __A, __m128d __B, __m128d __C)
120{
121  return (__m128d) __builtin_ia32_vfnmsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
122}
123
124extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
125_mm_nmsub_ss (__m128 __A, __m128 __B, __m128 __C)
126{
127  return (__m128) __builtin_ia32_vfnmsubss ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
128}
129
130extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
131_mm_nmsub_sd (__m128d __A, __m128d __B, __m128d __C)
132{
133  return (__m128d) __builtin_ia32_vfnmsubsd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
134}
135
136extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
137_mm_maddsub_ps (__m128 __A, __m128 __B, __m128 __C)
138{
139  return (__m128) __builtin_ia32_vfmaddsubps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
140}
141
142extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
143_mm_maddsub_pd (__m128d __A, __m128d __B, __m128d __C)
144{
145  return (__m128d) __builtin_ia32_vfmaddsubpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
146}
147
148extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
149_mm_msubadd_ps (__m128 __A, __m128 __B, __m128 __C)
150{
151  return (__m128) __builtin_ia32_vfmsubaddps ((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
152}
153
154extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
155_mm_msubadd_pd (__m128d __A, __m128d __B, __m128d __C)
156{
157  return (__m128d) __builtin_ia32_vfmsubaddpd ((__v2df)__A, (__v2df)__B, (__v2df)__C);
158}
159
160/* 256b Floating point multiply/add type instructions.  */
161extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
162_mm256_macc_ps (__m256 __A, __m256 __B, __m256 __C)
163{
164  return (__m256) __builtin_ia32_vfmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
165}
166
167extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
168_mm256_macc_pd (__m256d __A, __m256d __B, __m256d __C)
169{
170  return (__m256d) __builtin_ia32_vfmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
171}
172
173extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
174_mm256_msub_ps (__m256 __A, __m256 __B, __m256 __C)
175
176{
177  return (__m256) __builtin_ia32_vfmsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
178}
179
180extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
181_mm256_msub_pd (__m256d __A, __m256d __B, __m256d __C)
182{
183  return (__m256d) __builtin_ia32_vfmsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
184}
185
186extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
187_mm256_nmacc_ps (__m256 __A, __m256 __B, __m256 __C)
188{
189  return (__m256) __builtin_ia32_vfnmaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
190}
191
192extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
193_mm256_nmacc_pd (__m256d __A, __m256d __B, __m256d __C)
194{
195  return (__m256d) __builtin_ia32_vfnmaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
196}
197
198extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
199_mm256_nmsub_ps (__m256 __A, __m256 __B, __m256 __C)
200{
201  return (__m256) __builtin_ia32_vfnmsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
202}
203
204extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
205_mm256_nmsub_pd (__m256d __A, __m256d __B, __m256d __C)
206{
207  return (__m256d) __builtin_ia32_vfnmsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
208}
209
210extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
211_mm256_maddsub_ps (__m256 __A, __m256 __B, __m256 __C)
212{
213  return (__m256) __builtin_ia32_vfmaddsubps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
214}
215
216extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
217_mm256_maddsub_pd (__m256d __A, __m256d __B, __m256d __C)
218{
219  return (__m256d) __builtin_ia32_vfmaddsubpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
220}
221
222extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
223_mm256_msubadd_ps (__m256 __A, __m256 __B, __m256 __C)
224{
225  return (__m256) __builtin_ia32_vfmsubaddps256 ((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
226}
227
228extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229_mm256_msubadd_pd (__m256d __A, __m256d __B, __m256d __C)
230{
231  return (__m256d) __builtin_ia32_vfmsubaddpd256 ((__v4df)__A, (__v4df)__B, (__v4df)__C);
232}
233
234#endif
235
236#endif
237