1/*	$NetBSD: membar_ops.S,v 1.6 2022/04/09 23:32:52 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "atomic_op_asm.h"
33
34__RCSID("$NetBSD: membar_ops.S,v 1.6 2022/04/09 23:32:52 riastradh Exp $")
35
36	.text
37
38ENTRY(_membar_acquire)
39	/*
40	 * It is tempting to use isync to order load-before-load/store.
41	 * However, isync orders prior loads only if their value flows
42	 * into a control-flow dependency prior to the isync:
43	 *
44	 *	`[I]f an isync follows a conditional Branch instruction
45	 *	 that depends on the value returned by a preceding Load
46	 *	 instruction, the load on which the Branch depends is
47	 *	 performed before any loads caused by instructions
48	 *	 following the isync. This applies even if the effects
49	 *	 of the ``dependency'' are independent of the value
50	 *	 loaded (e.g., the value is compared to itself and the
51	 *	 Branch tests the EQ bit in the selected CR field), and
52	 *	 even if the branch target is the sequentially next
53	 *	 instruction.'
54	 *
55	 *	--PowerPC Virtual Environment Architecture, Book II,
56	 *	  Version 2.01, December 2003, 1.7.1 `Storage Access
57	 *	  Ordering', p. 7.
58	 *
59	 * We are required here, however, to order _all_ prior loads,
60	 * even if they do not flow into any control flow dependency.
61	 * For example:
62	 *
63	 *	x = *p;
64	 *	membar_acquire();
65	 *	if (x) goto foo;
66	 *
67	 * This can't be implemented by:
68	 *
69	 *	lwz	x, p
70	 *	isync
71	 *	cmpwi	x, 0
72	 *	bne	foo
73	 *
74	 * isync doesn't work here because there's no conditional
75	 * dependency on x between the lwz x, p and the isync.
76	 *
77	 * isync would only work if it followed the branch:
78	 *
79	 *	lwz	x, p
80	 *	isync
81	 *	cmpwi	x, 0
82	 *	bne	foo
83	 *	...
84	 * foo:	isync
85	 *	...
86	 *
87	 * lwsync orders everything except store-before-load, so it
88	 * serves here -- see below in membar_release in lwsync.
89	 * Except we can't use it on booke, so use sync for now.
90	 */
91	sync
92	blr
93END(_membar_acquire)
94ATOMIC_OP_ALIAS(membar_acquire,_membar_acquire)
95
96ENTRY(_membar_release)
97	/*
98	 *	`The memory barrier provides an ordering function for
99	 *	 the storage accesses caused by Load, Store, and dcbz
100	 *	 instructions that are executed by the processor
101	 *	 executing the [lwsync] instruction and for which the
102	 *	 specified storage location is in storage that is
103	 *	 Memory Coherence Required and is neither Write Through
104	 *	 Required nor Caching Inhibited.  The applicable pairs
105	 *	 are all pairs a_i, b_j of such accesses except those
106	 *	 in which a_i is an access caused by a Store or dcbz
107	 *	 instruction and b_j is an access caused by a Load
108	 *	 instruction.'
109	 *
110	 *	--PowerPC Virtual Environment Architecture, Book II,
111	 *	  Version 2.01, December 2003, 3.3.3 `Memory Barrier
112	 *	  Instructions', p. 25.
113	 *
114	 * In brief, lwsync is an acquire-release barrier -- it orders
115	 * load-before-load/store and load/store-before-store, but not
116	 * store-before-load.  Except we can't use it on booke, so use
117	 * sync for now.
118	 */
119	sync
120	blr
121END(_membar_release)
122ATOMIC_OP_ALIAS(membar_release,_membar_release)
123
124ENTRY(_membar_sync)
125	/*
126	 * sync, or `heavyweight sync', is a full sequential
127	 * consistency barrier.
128	 */
129	sync
130	blr
131END(_membar_sync)
132ATOMIC_OP_ALIAS(membar_sync,_membar_sync)
133
134ATOMIC_OP_ALIAS(membar_producer,_membar_release)
135STRONG_ALIAS(_membar_producer,_membar_release)
136ATOMIC_OP_ALIAS(membar_consumer,_membar_acquire)
137STRONG_ALIAS(_membar_consumer,_membar_acquire)
138ATOMIC_OP_ALIAS(membar_enter,_membar_sync)
139STRONG_ALIAS(_membar_enter,_membar_sync)
140ATOMIC_OP_ALIAS(membar_exit,_membar_release)
141STRONG_ALIAS(_membar_exit,_membar_release)
142