1;; GCC machine description for i386 synchronization instructions.
2;; Copyright (C) 2005-2015 Free Software Foundation, Inc.
3;;
4;; This file is part of GCC.
5;;
6;; GCC is free software; you can redistribute it and/or modify
7;; it under the terms of the GNU General Public License as published by
8;; the Free Software Foundation; either version 3, or (at your option)
9;; any later version.
10;;
11;; GCC is distributed in the hope that it will be useful,
12;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14;; GNU General Public License for more details.
15;;
16;; You should have received a copy of the GNU General Public License
17;; along with GCC; see the file COPYING3.  If not see
18;; <http://www.gnu.org/licenses/>.
19
20(define_c_enum "unspec" [
21  UNSPEC_LFENCE
22  UNSPEC_SFENCE
23  UNSPEC_MFENCE
24
25  UNSPEC_FILD_ATOMIC
26  UNSPEC_FIST_ATOMIC
27
28  ;; __atomic support
29  UNSPEC_LDA
30  UNSPEC_STA
31])
32
33(define_c_enum "unspecv" [
34  UNSPECV_CMPXCHG
35  UNSPECV_XCHG
36  UNSPECV_LOCK
37])
38
39(define_expand "sse2_lfence"
40  [(set (match_dup 0)
41	(unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
42  "TARGET_SSE2"
43{
44  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
45  MEM_VOLATILE_P (operands[0]) = 1;
46})
47
48(define_insn "*sse2_lfence"
49  [(set (match_operand:BLK 0)
50	(unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
51  "TARGET_SSE2"
52  "lfence"
53  [(set_attr "type" "sse")
54   (set_attr "length_address" "0")
55   (set_attr "atom_sse_attr" "lfence")
56   (set_attr "memory" "unknown")])
57
58(define_expand "sse_sfence"
59  [(set (match_dup 0)
60	(unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
61  "TARGET_SSE || TARGET_3DNOW_A"
62{
63  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
64  MEM_VOLATILE_P (operands[0]) = 1;
65})
66
67(define_insn "*sse_sfence"
68  [(set (match_operand:BLK 0)
69	(unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
70  "TARGET_SSE || TARGET_3DNOW_A"
71  "sfence"
72  [(set_attr "type" "sse")
73   (set_attr "length_address" "0")
74   (set_attr "atom_sse_attr" "fence")
75   (set_attr "memory" "unknown")])
76
77(define_expand "sse2_mfence"
78  [(set (match_dup 0)
79	(unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
80  "TARGET_SSE2"
81{
82  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
83  MEM_VOLATILE_P (operands[0]) = 1;
84})
85
86(define_insn "mfence_sse2"
87  [(set (match_operand:BLK 0)
88	(unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
89  "TARGET_64BIT || TARGET_SSE2"
90  "mfence"
91  [(set_attr "type" "sse")
92   (set_attr "length_address" "0")
93   (set_attr "atom_sse_attr" "fence")
94   (set_attr "memory" "unknown")])
95
96(define_insn "mfence_nosse"
97  [(set (match_operand:BLK 0)
98	(unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))
99   (clobber (reg:CC FLAGS_REG))]
100  "!(TARGET_64BIT || TARGET_SSE2)"
101  "lock{%;} or{l}\t{$0, (%%esp)|DWORD PTR [esp], 0}"
102  [(set_attr "memory" "unknown")])
103
104(define_expand "mem_thread_fence"
105  [(match_operand:SI 0 "const_int_operand")]		;; model
106  ""
107{
108  enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
109
110  /* Unless this is a SEQ_CST fence, the i386 memory model is strong
111     enough not to require barriers of any kind.  */
112  if (is_mm_seq_cst (model))
113    {
114      rtx (*mfence_insn)(rtx);
115      rtx mem;
116
117      if (TARGET_64BIT || TARGET_SSE2)
118	mfence_insn = gen_mfence_sse2;
119      else
120	mfence_insn = gen_mfence_nosse;
121
122      mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
123      MEM_VOLATILE_P (mem) = 1;
124
125      emit_insn (mfence_insn (mem));
126    }
127  DONE;
128})
129
130;; ??? From volume 3 section 8.1.1 Guaranteed Atomic Operations,
131;; Only beginning at Pentium family processors do we get any guarantee of
132;; atomicity in aligned 64-bit quantities.  Beginning at P6, we get a
133;; guarantee for 64-bit accesses that do not cross a cacheline boundary.
134;;
135;; Note that the TARGET_CMPXCHG8B test below is a stand-in for "Pentium".
136;;
137;; Importantly, *no* processor makes atomicity guarantees for larger
138;; accesses.  In particular, there's no way to perform an atomic TImode
139;; move, despite the apparent applicability of MOVDQA et al.
140
141(define_mode_iterator ATOMIC
142   [QI HI SI
143    (DI "TARGET_64BIT || (TARGET_CMPXCHG8B && (TARGET_80387 || TARGET_SSE))")
144   ])
145
146(define_expand "atomic_load<mode>"
147  [(set (match_operand:ATOMIC 0 "nonimmediate_operand")
148	(unspec:ATOMIC [(match_operand:ATOMIC 1 "memory_operand")
149			(match_operand:SI 2 "const_int_operand")]
150		       UNSPEC_LDA))]
151  ""
152{
153  /* For DImode on 32-bit, we can use the FPU to perform the load.  */
154  if (<MODE>mode == DImode && !TARGET_64BIT)
155    emit_insn (gen_atomic_loaddi_fpu
156	       (operands[0], operands[1],
157	        assign_386_stack_local (DImode, SLOT_TEMP)));
158  else
159    {
160      rtx dst = operands[0];
161
162      if (MEM_P (dst))
163	dst = gen_reg_rtx (<MODE>mode);
164
165      emit_move_insn (dst, operands[1]);
166
167      /* Fix up the destination if needed.  */
168      if (dst != operands[0])
169	emit_move_insn (operands[0], dst);
170    }
171  DONE;
172})
173
174(define_insn_and_split "atomic_loaddi_fpu"
175  [(set (match_operand:DI 0 "nonimmediate_operand" "=x,m,?r")
176	(unspec:DI [(match_operand:DI 1 "memory_operand" "m,m,m")]
177		   UNSPEC_LDA))
178   (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
179   (clobber (match_scratch:DF 3 "=X,xf,xf"))]
180  "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
181  "#"
182  "&& reload_completed"
183  [(const_int 0)]
184{
185  rtx dst = operands[0], src = operands[1];
186  rtx mem = operands[2], tmp = operands[3];
187
188  if (SSE_REG_P (dst))
189    emit_move_insn (dst, src);
190  else
191    {
192      if (MEM_P (dst))
193	mem = dst;
194
195      if (STACK_REG_P (tmp))
196        {
197	  emit_insn (gen_loaddi_via_fpu (tmp, src));
198	  emit_insn (gen_storedi_via_fpu (mem, tmp));
199	}
200      else
201	{
202	  adjust_reg_mode (tmp, DImode);
203	  emit_move_insn (tmp, src);
204	  emit_move_insn (mem, tmp);
205	}
206
207      if (mem != dst)
208	emit_move_insn (dst, mem);
209    }
210  DONE;
211})
212
213(define_expand "atomic_store<mode>"
214  [(set (match_operand:ATOMIC 0 "memory_operand")
215	(unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
216			(match_operand:SI 2 "const_int_operand")]
217		       UNSPEC_STA))]
218  ""
219{
220  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
221
222  if (<MODE>mode == DImode && !TARGET_64BIT)
223    {
224      /* For DImode on 32-bit, we can use the FPU to perform the store.  */
225      /* Note that while we could perform a cmpxchg8b loop, that turns
226	 out to be significantly larger than this plus a barrier.  */
227      emit_insn (gen_atomic_storedi_fpu
228		 (operands[0], operands[1],
229	          assign_386_stack_local (DImode, SLOT_TEMP)));
230    }
231  else
232    {
233      operands[1] = force_reg (<MODE>mode, operands[1]);
234
235      /* For seq-cst stores, when we lack MFENCE, use XCHG.  */
236      if (is_mm_seq_cst (model) && !(TARGET_64BIT || TARGET_SSE2))
237	{
238	  emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
239						operands[0], operands[1],
240						operands[2]));
241	  DONE;
242	}
243
244      /* Otherwise use a store.  */
245      emit_insn (gen_atomic_store<mode>_1 (operands[0], operands[1],
246					   operands[2]));
247    }
248  /* ... followed by an MFENCE, if required.  */
249  if (is_mm_seq_cst (model))
250    emit_insn (gen_mem_thread_fence (operands[2]));
251  DONE;
252})
253
254(define_insn "atomic_store<mode>_1"
255  [(set (match_operand:SWI 0 "memory_operand" "=m")
256	(unspec:SWI [(match_operand:SWI 1 "<nonmemory_operand>" "<r><i>")
257		     (match_operand:SI 2 "const_int_operand")]
258		    UNSPEC_STA))]
259  ""
260  "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
261
262(define_insn_and_split "atomic_storedi_fpu"
263  [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
264	(unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
265		   UNSPEC_STA))
266   (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
267   (clobber (match_scratch:DF 3 "=X,xf,xf"))]
268  "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
269  "#"
270  "&& reload_completed"
271  [(const_int 0)]
272{
273  rtx dst = operands[0], src = operands[1];
274  rtx mem = operands[2], tmp = operands[3];
275
276  if (!SSE_REG_P (src))
277    {
278      if (REG_P (src))
279	{
280	  emit_move_insn (mem, src);
281	  src = mem;
282	}
283
284      if (STACK_REG_P (tmp))
285	{
286	  emit_insn (gen_loaddi_via_fpu (tmp, src));
287	  emit_insn (gen_storedi_via_fpu (dst, tmp));
288	  DONE;
289	}
290      else
291	{
292	  adjust_reg_mode (tmp, DImode);
293	  emit_move_insn (tmp, src);
294	  src = tmp;
295	}
296    }
297  emit_move_insn (dst, src);
298  DONE;
299})
300
301;; ??? You'd think that we'd be able to perform this via FLOAT + FIX_TRUNC
302;; operations.  But the fix_trunc patterns want way more setup than we want
303;; to provide.  Note that the scratch is DFmode instead of XFmode in order
304;; to make it easy to allocate a scratch in either SSE or FP_REGs above.
305
306(define_insn "loaddi_via_fpu"
307  [(set (match_operand:DF 0 "register_operand" "=f")
308	(unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
309		   UNSPEC_FILD_ATOMIC))]
310  "TARGET_80387"
311  "fild%Z1\t%1"
312  [(set_attr "type" "fmov")
313   (set_attr "mode" "DF")
314   (set_attr "fp_int_src" "true")])
315
316(define_insn "storedi_via_fpu"
317  [(set (match_operand:DI 0 "memory_operand" "=m")
318	(unspec:DI [(match_operand:DF 1 "register_operand" "f")]
319		   UNSPEC_FIST_ATOMIC))]
320  "TARGET_80387"
321{
322  gcc_assert (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != NULL_RTX);
323
324  return "fistp%Z0\t%0";
325}
326  [(set_attr "type" "fmov")
327   (set_attr "mode" "DI")])
328
329(define_expand "atomic_compare_and_swap<mode>"
330  [(match_operand:QI 0 "register_operand")	;; bool success output
331   (match_operand:SWI124 1 "register_operand")	;; oldval output
332   (match_operand:SWI124 2 "memory_operand")	;; memory
333   (match_operand:SWI124 3 "register_operand")	;; expected input
334   (match_operand:SWI124 4 "register_operand")	;; newval input
335   (match_operand:SI 5 "const_int_operand")	;; is_weak
336   (match_operand:SI 6 "const_int_operand")	;; success model
337   (match_operand:SI 7 "const_int_operand")]	;; failure model
338  "TARGET_CMPXCHG"
339{
340  emit_insn
341   (gen_atomic_compare_and_swap<mode>_1
342    (operands[1], operands[2], operands[3], operands[4], operands[6]));
343  ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
344		     const0_rtx);
345  DONE;
346})
347
348(define_mode_iterator CASMODE
349  [(DI "TARGET_64BIT || TARGET_CMPXCHG8B")
350   (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
351(define_mode_attr CASHMODE [(DI "SI") (TI "DI")])
352
353(define_expand "atomic_compare_and_swap<mode>"
354  [(match_operand:QI 0 "register_operand")	;; bool success output
355   (match_operand:CASMODE 1 "register_operand")	;; oldval output
356   (match_operand:CASMODE 2 "memory_operand")	;; memory
357   (match_operand:CASMODE 3 "register_operand")	;; expected input
358   (match_operand:CASMODE 4 "register_operand")	;; newval input
359   (match_operand:SI 5 "const_int_operand")	;; is_weak
360   (match_operand:SI 6 "const_int_operand")	;; success model
361   (match_operand:SI 7 "const_int_operand")]	;; failure model
362  "TARGET_CMPXCHG"
363{
364  if (<MODE>mode == DImode && TARGET_64BIT)
365    {
366      emit_insn
367       (gen_atomic_compare_and_swapdi_1
368	(operands[1], operands[2], operands[3], operands[4], operands[6]));
369    }
370  else
371    {
372      machine_mode hmode = <CASHMODE>mode;
373
374      emit_insn
375       (gen_atomic_compare_and_swap<mode>_doubleword
376        (operands[1], operands[2], operands[3],
377	 gen_lowpart (hmode, operands[4]), gen_highpart (hmode, operands[4]),
378	 operands[6]));
379    }
380
381  ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
382		     const0_rtx);
383  DONE;
384})
385
386;; For double-word compare and swap, we are obliged to play tricks with
387;; the input newval (op3:op4) because the Intel register numbering does
388;; not match the gcc register numbering, so the pair must be CX:BX.
389
390(define_mode_attr doublemodesuffix [(SI "8") (DI "16")])
391
392(define_insn "atomic_compare_and_swap<dwi>_doubleword"
393  [(set (match_operand:<DWI> 0 "register_operand" "=A")
394	(unspec_volatile:<DWI>
395	  [(match_operand:<DWI> 1 "memory_operand" "+m")
396	   (match_operand:<DWI> 2 "register_operand" "0")
397	   (match_operand:DWIH 3 "register_operand" "b")
398	   (match_operand:DWIH 4 "register_operand" "c")
399	   (match_operand:SI 5 "const_int_operand")]
400	  UNSPECV_CMPXCHG))
401   (set (match_dup 1)
402	(unspec_volatile:<DWI> [(const_int 0)] UNSPECV_CMPXCHG))
403   (set (reg:CCZ FLAGS_REG)
404        (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
405  "TARGET_CMPXCHG<doublemodesuffix>B"
406  "lock{%;} %K5cmpxchg<doublemodesuffix>b\t%1")
407
408(define_insn "atomic_compare_and_swap<mode>_1"
409  [(set (match_operand:SWI 0 "register_operand" "=a")
410	(unspec_volatile:SWI
411	  [(match_operand:SWI 1 "memory_operand" "+m")
412	   (match_operand:SWI 2 "register_operand" "0")
413	   (match_operand:SWI 3 "register_operand" "<r>")
414	   (match_operand:SI 4 "const_int_operand")]
415	  UNSPECV_CMPXCHG))
416   (set (match_dup 1)
417	(unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
418   (set (reg:CCZ FLAGS_REG)
419        (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
420  "TARGET_CMPXCHG"
421  "lock{%;} %K4cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
422
423;; For operand 2 nonmemory_operand predicate is used instead of
424;; register_operand to allow combiner to better optimize atomic
425;; additions of constants.
426(define_insn "atomic_fetch_add<mode>"
427  [(set (match_operand:SWI 0 "register_operand" "=<r>")
428	(unspec_volatile:SWI
429	  [(match_operand:SWI 1 "memory_operand" "+m")
430	   (match_operand:SI 3 "const_int_operand")]		;; model
431	  UNSPECV_XCHG))
432   (set (match_dup 1)
433	(plus:SWI (match_dup 1)
434		  (match_operand:SWI 2 "nonmemory_operand" "0")))
435   (clobber (reg:CC FLAGS_REG))]
436  "TARGET_XADD"
437  "lock{%;} %K3xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
438
439;; This peephole2 and following insn optimize
440;; __sync_fetch_and_add (x, -N) == N into just lock {add,sub,inc,dec}
441;; followed by testing of flags instead of lock xadd and comparisons.
442(define_peephole2
443  [(set (match_operand:SWI 0 "register_operand")
444	(match_operand:SWI 2 "const_int_operand"))
445   (parallel [(set (match_dup 0)
446		   (unspec_volatile:SWI
447		     [(match_operand:SWI 1 "memory_operand")
448		      (match_operand:SI 4 "const_int_operand")]
449		     UNSPECV_XCHG))
450	      (set (match_dup 1)
451		   (plus:SWI (match_dup 1)
452			     (match_dup 0)))
453	      (clobber (reg:CC FLAGS_REG))])
454   (set (reg:CCZ FLAGS_REG)
455	(compare:CCZ (match_dup 0)
456		     (match_operand:SWI 3 "const_int_operand")))]
457  "peep2_reg_dead_p (3, operands[0])
458   && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
459      == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
460   && !reg_overlap_mentioned_p (operands[0], operands[1])"
461  [(parallel [(set (reg:CCZ FLAGS_REG)
462		   (compare:CCZ
463		     (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
464					  UNSPECV_XCHG)
465		     (match_dup 3)))
466	      (set (match_dup 1)
467		   (plus:SWI (match_dup 1)
468			     (match_dup 2)))])])
469
470(define_insn "*atomic_fetch_add_cmp<mode>"
471  [(set (reg:CCZ FLAGS_REG)
472	(compare:CCZ
473	  (unspec_volatile:SWI
474	    [(match_operand:SWI 0 "memory_operand" "+m")
475	     (match_operand:SI 3 "const_int_operand")]		;; model
476	    UNSPECV_XCHG)
477	  (match_operand:SWI 2 "const_int_operand" "i")))
478   (set (match_dup 0)
479	(plus:SWI (match_dup 0)
480		  (match_operand:SWI 1 "const_int_operand" "i")))]
481  "(unsigned HOST_WIDE_INT) INTVAL (operands[1])
482   == -(unsigned HOST_WIDE_INT) INTVAL (operands[2])"
483{
484  if (incdec_operand (operands[1], <MODE>mode))
485    {
486      if (operands[1] == const1_rtx)
487	return "lock{%;} %K3inc{<imodesuffix>}\t%0";
488      else
489	{
490	  gcc_assert (operands[1] == constm1_rtx);
491	  return "lock{%;} %K3dec{<imodesuffix>}\t%0";
492	}
493    }
494
495  if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
496    return "lock{%;} %K3sub{<imodesuffix>}\t{%1, %0|%0, %1}";
497
498  return "lock{%;} %K3add{<imodesuffix>}\t{%1, %0|%0, %1}";
499})
500
501;; Recall that xchg implicitly sets LOCK#, so adding it again wastes space.
502;; In addition, it is always a full barrier, so we can ignore the memory model.
503(define_insn "atomic_exchange<mode>"
504  [(set (match_operand:SWI 0 "register_operand" "=<r>")		;; output
505	(unspec_volatile:SWI
506	  [(match_operand:SWI 1 "memory_operand" "+m")		;; memory
507	   (match_operand:SI 3 "const_int_operand")]		;; model
508	  UNSPECV_XCHG))
509   (set (match_dup 1)
510	(match_operand:SWI 2 "register_operand" "0"))]		;; input
511  ""
512  "%K3xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
513
514(define_insn "atomic_add<mode>"
515  [(set (match_operand:SWI 0 "memory_operand" "+m")
516	(unspec_volatile:SWI
517	  [(plus:SWI (match_dup 0)
518		     (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
519	   (match_operand:SI 2 "const_int_operand")]		;; model
520	  UNSPECV_LOCK))
521   (clobber (reg:CC FLAGS_REG))]
522  ""
523{
524  if (incdec_operand (operands[1], <MODE>mode))
525    {
526      if (operands[1] == const1_rtx)
527	return "lock{%;} %K2inc{<imodesuffix>}\t%0";
528      else
529	{
530	  gcc_assert (operands[1] == constm1_rtx);
531	  return "lock{%;} %K2dec{<imodesuffix>}\t%0";
532	}
533    }
534
535  if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
536    return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
537
538  return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
539})
540
541(define_insn "atomic_sub<mode>"
542  [(set (match_operand:SWI 0 "memory_operand" "+m")
543	(unspec_volatile:SWI
544	  [(minus:SWI (match_dup 0)
545		      (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
546	   (match_operand:SI 2 "const_int_operand")]		;; model
547	  UNSPECV_LOCK))
548   (clobber (reg:CC FLAGS_REG))]
549  ""
550{
551  if (incdec_operand (operands[1], <MODE>mode))
552    {
553      if (operands[1] == const1_rtx)
554	return "lock{%;} %K2dec{<imodesuffix>}\t%0";
555      else
556	{
557	  gcc_assert (operands[1] == constm1_rtx);
558	  return "lock{%;} %K2inc{<imodesuffix>}\t%0";
559	}
560    }
561
562  if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
563    return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
564
565  return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
566})
567
568(define_insn "atomic_<logic><mode>"
569  [(set (match_operand:SWI 0 "memory_operand" "+m")
570	(unspec_volatile:SWI
571	  [(any_logic:SWI (match_dup 0)
572			  (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
573	   (match_operand:SI 2 "const_int_operand")]		;; model
574	  UNSPECV_LOCK))
575   (clobber (reg:CC FLAGS_REG))]
576  ""
577  "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
578