x86.ad revision 3142:9b8ce46870df
1//
2// Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
3// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4//
5// This code is free software; you can redistribute it and/or modify it
6// under the terms of the GNU General Public License version 2 only, as
7// published by the Free Software Foundation.
8//
9// This code is distributed in the hope that it will be useful, but WITHOUT
10// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12// version 2 for more details (a copy is included in the LICENSE file that
13// accompanied this code).
14//
15// You should have received a copy of the GNU General Public License version
16// 2 along with this work; if not, write to the Free Software Foundation,
17// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18//
19// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20// or visit www.oracle.com if you need additional information or have any
21// questions.
22//
23//
24
25// X86 Common Architecture Description File
26
27source %{
28  // Float masks come from different places depending on platform.
29#ifdef _LP64
30  static address float_signmask()  { return StubRoutines::x86::float_sign_mask(); }
31  static address float_signflip()  { return StubRoutines::x86::float_sign_flip(); }
32  static address double_signmask() { return StubRoutines::x86::double_sign_mask(); }
33  static address double_signflip() { return StubRoutines::x86::double_sign_flip(); }
34#else
35  static address float_signmask()  { return (address)float_signmask_pool; }
36  static address float_signflip()  { return (address)float_signflip_pool; }
37  static address double_signmask() { return (address)double_signmask_pool; }
38  static address double_signflip() { return (address)double_signflip_pool; }
39#endif
40
41#ifndef PRODUCT
42  void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
43    st->print("nop \t# %d bytes pad for loops and calls", _count);
44  }
45#endif
46
47  void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
48    MacroAssembler _masm(&cbuf);
49    __ nop(_count);
50  }
51
52  uint MachNopNode::size(PhaseRegAlloc*) const {
53    return _count;
54  }
55
56#ifndef PRODUCT
57  void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
58    st->print("# breakpoint");
59  }
60#endif
61
62  void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
63    MacroAssembler _masm(&cbuf);
64    __ int3();
65  }
66
67  uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
68    return MachNode::size(ra_);
69  }
70
71%}
72
73encode %{
74
75  enc_class preserve_SP %{
76    debug_only(int off0 = cbuf.insts_size());
77    MacroAssembler _masm(&cbuf);
78    // RBP is preserved across all calls, even compiled calls.
79    // Use it to preserve RSP in places where the callee might change the SP.
80    __ movptr(rbp_mh_SP_save, rsp);
81    debug_only(int off1 = cbuf.insts_size());
82    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
83  %}
84
85  enc_class restore_SP %{
86    MacroAssembler _masm(&cbuf);
87    __ movptr(rsp, rbp_mh_SP_save);
88  %}
89
90  enc_class call_epilog %{
91    if (VerifyStackAtCalls) {
92      // Check that stack depth is unchanged: find majik cookie on stack
93      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
94      MacroAssembler _masm(&cbuf);
95      Label L;
96      __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d);
97      __ jccb(Assembler::equal, L);
98      // Die if stack mismatch
99      __ int3();
100      __ bind(L);
101    }
102  %}
103
104%}
105
106// INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
107
108// ============================================================================
109
110instruct ShouldNotReachHere() %{
111  match(Halt);
112  format %{ "int3\t# ShouldNotReachHere" %}
113  ins_encode %{
114    __ int3();
115  %}
116  ins_pipe(pipe_slow);
117%}
118
119// ============================================================================
120
121instruct addF_reg(regF dst, regF src) %{
122  predicate((UseSSE>=1) && (UseAVX == 0));
123  match(Set dst (AddF dst src));
124
125  format %{ "addss   $dst, $src" %}
126  ins_cost(150);
127  ins_encode %{
128    __ addss($dst$$XMMRegister, $src$$XMMRegister);
129  %}
130  ins_pipe(pipe_slow);
131%}
132
133instruct addF_mem(regF dst, memory src) %{
134  predicate((UseSSE>=1) && (UseAVX == 0));
135  match(Set dst (AddF dst (LoadF src)));
136
137  format %{ "addss   $dst, $src" %}
138  ins_cost(150);
139  ins_encode %{
140    __ addss($dst$$XMMRegister, $src$$Address);
141  %}
142  ins_pipe(pipe_slow);
143%}
144
145instruct addF_imm(regF dst, immF con) %{
146  predicate((UseSSE>=1) && (UseAVX == 0));
147  match(Set dst (AddF dst con));
148  format %{ "addss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
149  ins_cost(150);
150  ins_encode %{
151    __ addss($dst$$XMMRegister, $constantaddress($con));
152  %}
153  ins_pipe(pipe_slow);
154%}
155
156instruct vaddF_reg(regF dst, regF src1, regF src2) %{
157  predicate(UseAVX > 0);
158  match(Set dst (AddF src1 src2));
159
160  format %{ "vaddss  $dst, $src1, $src2" %}
161  ins_cost(150);
162  ins_encode %{
163    __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
164  %}
165  ins_pipe(pipe_slow);
166%}
167
168instruct vaddF_mem(regF dst, regF src1, memory src2) %{
169  predicate(UseAVX > 0);
170  match(Set dst (AddF src1 (LoadF src2)));
171
172  format %{ "vaddss  $dst, $src1, $src2" %}
173  ins_cost(150);
174  ins_encode %{
175    __ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
176  %}
177  ins_pipe(pipe_slow);
178%}
179
180instruct vaddF_imm(regF dst, regF src, immF con) %{
181  predicate(UseAVX > 0);
182  match(Set dst (AddF src con));
183
184  format %{ "vaddss  $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
185  ins_cost(150);
186  ins_encode %{
187    __ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
188  %}
189  ins_pipe(pipe_slow);
190%}
191
192instruct addD_reg(regD dst, regD src) %{
193  predicate((UseSSE>=2) && (UseAVX == 0));
194  match(Set dst (AddD dst src));
195
196  format %{ "addsd   $dst, $src" %}
197  ins_cost(150);
198  ins_encode %{
199    __ addsd($dst$$XMMRegister, $src$$XMMRegister);
200  %}
201  ins_pipe(pipe_slow);
202%}
203
204instruct addD_mem(regD dst, memory src) %{
205  predicate((UseSSE>=2) && (UseAVX == 0));
206  match(Set dst (AddD dst (LoadD src)));
207
208  format %{ "addsd   $dst, $src" %}
209  ins_cost(150);
210  ins_encode %{
211    __ addsd($dst$$XMMRegister, $src$$Address);
212  %}
213  ins_pipe(pipe_slow);
214%}
215
216instruct addD_imm(regD dst, immD con) %{
217  predicate((UseSSE>=2) && (UseAVX == 0));
218  match(Set dst (AddD dst con));
219  format %{ "addsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
220  ins_cost(150);
221  ins_encode %{
222    __ addsd($dst$$XMMRegister, $constantaddress($con));
223  %}
224  ins_pipe(pipe_slow);
225%}
226
227instruct vaddD_reg(regD dst, regD src1, regD src2) %{
228  predicate(UseAVX > 0);
229  match(Set dst (AddD src1 src2));
230
231  format %{ "vaddsd  $dst, $src1, $src2" %}
232  ins_cost(150);
233  ins_encode %{
234    __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
235  %}
236  ins_pipe(pipe_slow);
237%}
238
239instruct vaddD_mem(regD dst, regD src1, memory src2) %{
240  predicate(UseAVX > 0);
241  match(Set dst (AddD src1 (LoadD src2)));
242
243  format %{ "vaddsd  $dst, $src1, $src2" %}
244  ins_cost(150);
245  ins_encode %{
246    __ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
247  %}
248  ins_pipe(pipe_slow);
249%}
250
251instruct vaddD_imm(regD dst, regD src, immD con) %{
252  predicate(UseAVX > 0);
253  match(Set dst (AddD src con));
254
255  format %{ "vaddsd  $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
256  ins_cost(150);
257  ins_encode %{
258    __ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
259  %}
260  ins_pipe(pipe_slow);
261%}
262
263instruct subF_reg(regF dst, regF src) %{
264  predicate((UseSSE>=1) && (UseAVX == 0));
265  match(Set dst (SubF dst src));
266
267  format %{ "subss   $dst, $src" %}
268  ins_cost(150);
269  ins_encode %{
270    __ subss($dst$$XMMRegister, $src$$XMMRegister);
271  %}
272  ins_pipe(pipe_slow);
273%}
274
275instruct subF_mem(regF dst, memory src) %{
276  predicate((UseSSE>=1) && (UseAVX == 0));
277  match(Set dst (SubF dst (LoadF src)));
278
279  format %{ "subss   $dst, $src" %}
280  ins_cost(150);
281  ins_encode %{
282    __ subss($dst$$XMMRegister, $src$$Address);
283  %}
284  ins_pipe(pipe_slow);
285%}
286
287instruct subF_imm(regF dst, immF con) %{
288  predicate((UseSSE>=1) && (UseAVX == 0));
289  match(Set dst (SubF dst con));
290  format %{ "subss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
291  ins_cost(150);
292  ins_encode %{
293    __ subss($dst$$XMMRegister, $constantaddress($con));
294  %}
295  ins_pipe(pipe_slow);
296%}
297
298instruct vsubF_reg(regF dst, regF src1, regF src2) %{
299  predicate(UseAVX > 0);
300  match(Set dst (SubF src1 src2));
301
302  format %{ "vsubss  $dst, $src1, $src2" %}
303  ins_cost(150);
304  ins_encode %{
305    __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
306  %}
307  ins_pipe(pipe_slow);
308%}
309
310instruct vsubF_mem(regF dst, regF src1, memory src2) %{
311  predicate(UseAVX > 0);
312  match(Set dst (SubF src1 (LoadF src2)));
313
314  format %{ "vsubss  $dst, $src1, $src2" %}
315  ins_cost(150);
316  ins_encode %{
317    __ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
318  %}
319  ins_pipe(pipe_slow);
320%}
321
322instruct vsubF_imm(regF dst, regF src, immF con) %{
323  predicate(UseAVX > 0);
324  match(Set dst (SubF src con));
325
326  format %{ "vsubss  $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
327  ins_cost(150);
328  ins_encode %{
329    __ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
330  %}
331  ins_pipe(pipe_slow);
332%}
333
334instruct subD_reg(regD dst, regD src) %{
335  predicate((UseSSE>=2) && (UseAVX == 0));
336  match(Set dst (SubD dst src));
337
338  format %{ "subsd   $dst, $src" %}
339  ins_cost(150);
340  ins_encode %{
341    __ subsd($dst$$XMMRegister, $src$$XMMRegister);
342  %}
343  ins_pipe(pipe_slow);
344%}
345
346instruct subD_mem(regD dst, memory src) %{
347  predicate((UseSSE>=2) && (UseAVX == 0));
348  match(Set dst (SubD dst (LoadD src)));
349
350  format %{ "subsd   $dst, $src" %}
351  ins_cost(150);
352  ins_encode %{
353    __ subsd($dst$$XMMRegister, $src$$Address);
354  %}
355  ins_pipe(pipe_slow);
356%}
357
358instruct subD_imm(regD dst, immD con) %{
359  predicate((UseSSE>=2) && (UseAVX == 0));
360  match(Set dst (SubD dst con));
361  format %{ "subsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
362  ins_cost(150);
363  ins_encode %{
364    __ subsd($dst$$XMMRegister, $constantaddress($con));
365  %}
366  ins_pipe(pipe_slow);
367%}
368
369instruct vsubD_reg(regD dst, regD src1, regD src2) %{
370  predicate(UseAVX > 0);
371  match(Set dst (SubD src1 src2));
372
373  format %{ "vsubsd  $dst, $src1, $src2" %}
374  ins_cost(150);
375  ins_encode %{
376    __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
377  %}
378  ins_pipe(pipe_slow);
379%}
380
381instruct vsubD_mem(regD dst, regD src1, memory src2) %{
382  predicate(UseAVX > 0);
383  match(Set dst (SubD src1 (LoadD src2)));
384
385  format %{ "vsubsd  $dst, $src1, $src2" %}
386  ins_cost(150);
387  ins_encode %{
388    __ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
389  %}
390  ins_pipe(pipe_slow);
391%}
392
393instruct vsubD_imm(regD dst, regD src, immD con) %{
394  predicate(UseAVX > 0);
395  match(Set dst (SubD src con));
396
397  format %{ "vsubsd  $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
398  ins_cost(150);
399  ins_encode %{
400    __ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
401  %}
402  ins_pipe(pipe_slow);
403%}
404
405instruct mulF_reg(regF dst, regF src) %{
406  predicate((UseSSE>=1) && (UseAVX == 0));
407  match(Set dst (MulF dst src));
408
409  format %{ "mulss   $dst, $src" %}
410  ins_cost(150);
411  ins_encode %{
412    __ mulss($dst$$XMMRegister, $src$$XMMRegister);
413  %}
414  ins_pipe(pipe_slow);
415%}
416
417instruct mulF_mem(regF dst, memory src) %{
418  predicate((UseSSE>=1) && (UseAVX == 0));
419  match(Set dst (MulF dst (LoadF src)));
420
421  format %{ "mulss   $dst, $src" %}
422  ins_cost(150);
423  ins_encode %{
424    __ mulss($dst$$XMMRegister, $src$$Address);
425  %}
426  ins_pipe(pipe_slow);
427%}
428
429instruct mulF_imm(regF dst, immF con) %{
430  predicate((UseSSE>=1) && (UseAVX == 0));
431  match(Set dst (MulF dst con));
432  format %{ "mulss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
433  ins_cost(150);
434  ins_encode %{
435    __ mulss($dst$$XMMRegister, $constantaddress($con));
436  %}
437  ins_pipe(pipe_slow);
438%}
439
440instruct vmulF_reg(regF dst, regF src1, regF src2) %{
441  predicate(UseAVX > 0);
442  match(Set dst (MulF src1 src2));
443
444  format %{ "vmulss  $dst, $src1, $src2" %}
445  ins_cost(150);
446  ins_encode %{
447    __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
448  %}
449  ins_pipe(pipe_slow);
450%}
451
452instruct vmulF_mem(regF dst, regF src1, memory src2) %{
453  predicate(UseAVX > 0);
454  match(Set dst (MulF src1 (LoadF src2)));
455
456  format %{ "vmulss  $dst, $src1, $src2" %}
457  ins_cost(150);
458  ins_encode %{
459    __ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
460  %}
461  ins_pipe(pipe_slow);
462%}
463
464instruct vmulF_imm(regF dst, regF src, immF con) %{
465  predicate(UseAVX > 0);
466  match(Set dst (MulF src con));
467
468  format %{ "vmulss  $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
469  ins_cost(150);
470  ins_encode %{
471    __ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
472  %}
473  ins_pipe(pipe_slow);
474%}
475
476instruct mulD_reg(regD dst, regD src) %{
477  predicate((UseSSE>=2) && (UseAVX == 0));
478  match(Set dst (MulD dst src));
479
480  format %{ "mulsd   $dst, $src" %}
481  ins_cost(150);
482  ins_encode %{
483    __ mulsd($dst$$XMMRegister, $src$$XMMRegister);
484  %}
485  ins_pipe(pipe_slow);
486%}
487
488instruct mulD_mem(regD dst, memory src) %{
489  predicate((UseSSE>=2) && (UseAVX == 0));
490  match(Set dst (MulD dst (LoadD src)));
491
492  format %{ "mulsd   $dst, $src" %}
493  ins_cost(150);
494  ins_encode %{
495    __ mulsd($dst$$XMMRegister, $src$$Address);
496  %}
497  ins_pipe(pipe_slow);
498%}
499
500instruct mulD_imm(regD dst, immD con) %{
501  predicate((UseSSE>=2) && (UseAVX == 0));
502  match(Set dst (MulD dst con));
503  format %{ "mulsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
504  ins_cost(150);
505  ins_encode %{
506    __ mulsd($dst$$XMMRegister, $constantaddress($con));
507  %}
508  ins_pipe(pipe_slow);
509%}
510
511instruct vmulD_reg(regD dst, regD src1, regD src2) %{
512  predicate(UseAVX > 0);
513  match(Set dst (MulD src1 src2));
514
515  format %{ "vmulsd  $dst, $src1, $src2" %}
516  ins_cost(150);
517  ins_encode %{
518    __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
519  %}
520  ins_pipe(pipe_slow);
521%}
522
523instruct vmulD_mem(regD dst, regD src1, memory src2) %{
524  predicate(UseAVX > 0);
525  match(Set dst (MulD src1 (LoadD src2)));
526
527  format %{ "vmulsd  $dst, $src1, $src2" %}
528  ins_cost(150);
529  ins_encode %{
530    __ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
531  %}
532  ins_pipe(pipe_slow);
533%}
534
535instruct vmulD_imm(regD dst, regD src, immD con) %{
536  predicate(UseAVX > 0);
537  match(Set dst (MulD src con));
538
539  format %{ "vmulsd  $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
540  ins_cost(150);
541  ins_encode %{
542    __ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
543  %}
544  ins_pipe(pipe_slow);
545%}
546
547instruct divF_reg(regF dst, regF src) %{
548  predicate((UseSSE>=1) && (UseAVX == 0));
549  match(Set dst (DivF dst src));
550
551  format %{ "divss   $dst, $src" %}
552  ins_cost(150);
553  ins_encode %{
554    __ divss($dst$$XMMRegister, $src$$XMMRegister);
555  %}
556  ins_pipe(pipe_slow);
557%}
558
559instruct divF_mem(regF dst, memory src) %{
560  predicate((UseSSE>=1) && (UseAVX == 0));
561  match(Set dst (DivF dst (LoadF src)));
562
563  format %{ "divss   $dst, $src" %}
564  ins_cost(150);
565  ins_encode %{
566    __ divss($dst$$XMMRegister, $src$$Address);
567  %}
568  ins_pipe(pipe_slow);
569%}
570
571instruct divF_imm(regF dst, immF con) %{
572  predicate((UseSSE>=1) && (UseAVX == 0));
573  match(Set dst (DivF dst con));
574  format %{ "divss   $dst, [$constantaddress]\t# load from constant table: float=$con" %}
575  ins_cost(150);
576  ins_encode %{
577    __ divss($dst$$XMMRegister, $constantaddress($con));
578  %}
579  ins_pipe(pipe_slow);
580%}
581
582instruct vdivF_reg(regF dst, regF src1, regF src2) %{
583  predicate(UseAVX > 0);
584  match(Set dst (DivF src1 src2));
585
586  format %{ "vdivss  $dst, $src1, $src2" %}
587  ins_cost(150);
588  ins_encode %{
589    __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
590  %}
591  ins_pipe(pipe_slow);
592%}
593
594instruct vdivF_mem(regF dst, regF src1, memory src2) %{
595  predicate(UseAVX > 0);
596  match(Set dst (DivF src1 (LoadF src2)));
597
598  format %{ "vdivss  $dst, $src1, $src2" %}
599  ins_cost(150);
600  ins_encode %{
601    __ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
602  %}
603  ins_pipe(pipe_slow);
604%}
605
606instruct vdivF_imm(regF dst, regF src, immF con) %{
607  predicate(UseAVX > 0);
608  match(Set dst (DivF src con));
609
610  format %{ "vdivss  $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
611  ins_cost(150);
612  ins_encode %{
613    __ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
614  %}
615  ins_pipe(pipe_slow);
616%}
617
618instruct divD_reg(regD dst, regD src) %{
619  predicate((UseSSE>=2) && (UseAVX == 0));
620  match(Set dst (DivD dst src));
621
622  format %{ "divsd   $dst, $src" %}
623  ins_cost(150);
624  ins_encode %{
625    __ divsd($dst$$XMMRegister, $src$$XMMRegister);
626  %}
627  ins_pipe(pipe_slow);
628%}
629
630instruct divD_mem(regD dst, memory src) %{
631  predicate((UseSSE>=2) && (UseAVX == 0));
632  match(Set dst (DivD dst (LoadD src)));
633
634  format %{ "divsd   $dst, $src" %}
635  ins_cost(150);
636  ins_encode %{
637    __ divsd($dst$$XMMRegister, $src$$Address);
638  %}
639  ins_pipe(pipe_slow);
640%}
641
642instruct divD_imm(regD dst, immD con) %{
643  predicate((UseSSE>=2) && (UseAVX == 0));
644  match(Set dst (DivD dst con));
645  format %{ "divsd   $dst, [$constantaddress]\t# load from constant table: double=$con" %}
646  ins_cost(150);
647  ins_encode %{
648    __ divsd($dst$$XMMRegister, $constantaddress($con));
649  %}
650  ins_pipe(pipe_slow);
651%}
652
653instruct vdivD_reg(regD dst, regD src1, regD src2) %{
654  predicate(UseAVX > 0);
655  match(Set dst (DivD src1 src2));
656
657  format %{ "vdivsd  $dst, $src1, $src2" %}
658  ins_cost(150);
659  ins_encode %{
660    __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
661  %}
662  ins_pipe(pipe_slow);
663%}
664
665instruct vdivD_mem(regD dst, regD src1, memory src2) %{
666  predicate(UseAVX > 0);
667  match(Set dst (DivD src1 (LoadD src2)));
668
669  format %{ "vdivsd  $dst, $src1, $src2" %}
670  ins_cost(150);
671  ins_encode %{
672    __ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
673  %}
674  ins_pipe(pipe_slow);
675%}
676
677instruct vdivD_imm(regD dst, regD src, immD con) %{
678  predicate(UseAVX > 0);
679  match(Set dst (DivD src con));
680
681  format %{ "vdivsd  $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
682  ins_cost(150);
683  ins_encode %{
684    __ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
685  %}
686  ins_pipe(pipe_slow);
687%}
688
689instruct absF_reg(regF dst) %{
690  predicate((UseSSE>=1) && (UseAVX == 0));
691  match(Set dst (AbsF dst));
692  ins_cost(150);
693  format %{ "andps   $dst, [0x7fffffff]\t# abs float by sign masking" %}
694  ins_encode %{
695    __ andps($dst$$XMMRegister, ExternalAddress(float_signmask()));
696  %}
697  ins_pipe(pipe_slow);
698%}
699
700instruct vabsF_reg(regF dst, regF src) %{
701  predicate(UseAVX > 0);
702  match(Set dst (AbsF src));
703  ins_cost(150);
704  format %{ "vandps  $dst, $src, [0x7fffffff]\t# abs float by sign masking" %}
705  ins_encode %{
706    __ vandps($dst$$XMMRegister, $src$$XMMRegister,
707              ExternalAddress(float_signmask()));
708  %}
709  ins_pipe(pipe_slow);
710%}
711
712instruct absD_reg(regD dst) %{
713  predicate((UseSSE>=2) && (UseAVX == 0));
714  match(Set dst (AbsD dst));
715  ins_cost(150);
716  format %{ "andpd   $dst, [0x7fffffffffffffff]\t"
717            "# abs double by sign masking" %}
718  ins_encode %{
719    __ andpd($dst$$XMMRegister, ExternalAddress(double_signmask()));
720  %}
721  ins_pipe(pipe_slow);
722%}
723
724instruct vabsD_reg(regD dst, regD src) %{
725  predicate(UseAVX > 0);
726  match(Set dst (AbsD src));
727  ins_cost(150);
728  format %{ "vandpd  $dst, $src, [0x7fffffffffffffff]\t"
729            "# abs double by sign masking" %}
730  ins_encode %{
731    __ vandpd($dst$$XMMRegister, $src$$XMMRegister,
732              ExternalAddress(double_signmask()));
733  %}
734  ins_pipe(pipe_slow);
735%}
736
737instruct negF_reg(regF dst) %{
738  predicate((UseSSE>=1) && (UseAVX == 0));
739  match(Set dst (NegF dst));
740  ins_cost(150);
741  format %{ "xorps   $dst, [0x80000000]\t# neg float by sign flipping" %}
742  ins_encode %{
743    __ xorps($dst$$XMMRegister, ExternalAddress(float_signflip()));
744  %}
745  ins_pipe(pipe_slow);
746%}
747
748instruct vnegF_reg(regF dst, regF src) %{
749  predicate(UseAVX > 0);
750  match(Set dst (NegF src));
751  ins_cost(150);
752  format %{ "vxorps  $dst, $src, [0x80000000]\t# neg float by sign flipping" %}
753  ins_encode %{
754    __ vxorps($dst$$XMMRegister, $src$$XMMRegister,
755              ExternalAddress(float_signflip()));
756  %}
757  ins_pipe(pipe_slow);
758%}
759
760instruct negD_reg(regD dst) %{
761  predicate((UseSSE>=2) && (UseAVX == 0));
762  match(Set dst (NegD dst));
763  ins_cost(150);
764  format %{ "xorpd   $dst, [0x8000000000000000]\t"
765            "# neg double by sign flipping" %}
766  ins_encode %{
767    __ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip()));
768  %}
769  ins_pipe(pipe_slow);
770%}
771
772instruct vnegD_reg(regD dst, regD src) %{
773  predicate(UseAVX > 0);
774  match(Set dst (NegD src));
775  ins_cost(150);
776  format %{ "vxorpd  $dst, $src, [0x8000000000000000]\t"
777            "# neg double by sign flipping" %}
778  ins_encode %{
779    __ vxorpd($dst$$XMMRegister, $src$$XMMRegister,
780              ExternalAddress(double_signflip()));
781  %}
782  ins_pipe(pipe_slow);
783%}
784
785instruct sqrtF_reg(regF dst, regF src) %{
786  predicate(UseSSE>=1);
787  match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
788
789  format %{ "sqrtss  $dst, $src" %}
790  ins_cost(150);
791  ins_encode %{
792    __ sqrtss($dst$$XMMRegister, $src$$XMMRegister);
793  %}
794  ins_pipe(pipe_slow);
795%}
796
797instruct sqrtF_mem(regF dst, memory src) %{
798  predicate(UseSSE>=1);
799  match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
800
801  format %{ "sqrtss  $dst, $src" %}
802  ins_cost(150);
803  ins_encode %{
804    __ sqrtss($dst$$XMMRegister, $src$$Address);
805  %}
806  ins_pipe(pipe_slow);
807%}
808
809instruct sqrtF_imm(regF dst, immF con) %{
810  predicate(UseSSE>=1);
811  match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
812  format %{ "sqrtss  $dst, [$constantaddress]\t# load from constant table: float=$con" %}
813  ins_cost(150);
814  ins_encode %{
815    __ sqrtss($dst$$XMMRegister, $constantaddress($con));
816  %}
817  ins_pipe(pipe_slow);
818%}
819
820instruct sqrtD_reg(regD dst, regD src) %{
821  predicate(UseSSE>=2);
822  match(Set dst (SqrtD src));
823
824  format %{ "sqrtsd  $dst, $src" %}
825  ins_cost(150);
826  ins_encode %{
827    __ sqrtsd($dst$$XMMRegister, $src$$XMMRegister);
828  %}
829  ins_pipe(pipe_slow);
830%}
831
832instruct sqrtD_mem(regD dst, memory src) %{
833  predicate(UseSSE>=2);
834  match(Set dst (SqrtD (LoadD src)));
835
836  format %{ "sqrtsd  $dst, $src" %}
837  ins_cost(150);
838  ins_encode %{
839    __ sqrtsd($dst$$XMMRegister, $src$$Address);
840  %}
841  ins_pipe(pipe_slow);
842%}
843
844instruct sqrtD_imm(regD dst, immD con) %{
845  predicate(UseSSE>=2);
846  match(Set dst (SqrtD con));
847  format %{ "sqrtsd  $dst, [$constantaddress]\t# load from constant table: double=$con" %}
848  ins_cost(150);
849  ins_encode %{
850    __ sqrtsd($dst$$XMMRegister, $constantaddress($con));
851  %}
852  ins_pipe(pipe_slow);
853%}
854
855