1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements.  See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License.  You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr_arch_atomic.h"
18
19#ifdef USE_ATOMICS_S390
20
21APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
22{
23#if defined (NEED_ATOMICS_GENERIC64)
24    return apr__atomic_generic64_init(p);
25#else
26    return APR_SUCCESS;
27#endif
28}
29
30APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
31{
32    return *mem;
33}
34
35APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
36{
37    *mem = val;
38}
39
40static APR_INLINE apr_uint32_t atomic_add(volatile apr_uint32_t *mem, apr_uint32_t val)
41{
42    apr_uint32_t prev = *mem, temp;
43
44    asm volatile ("loop_%=:\n"
45                  "    lr  %1,%0\n"
46                  "    alr %1,%3\n"
47                  "    cs  %0,%1,%2\n"
48                  "    jl  loop_%=\n"
49                  : "+d" (prev), "+d" (temp), "=Q" (*mem)
50                  : "d" (val), "m" (*mem)
51                  : "cc", "memory");
52
53    return prev;
54}
55
56APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
57{
58    return atomic_add(mem, val);
59}
60
61APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
62{
63    return atomic_add(mem, 1);
64}
65
66static APR_INLINE apr_uint32_t atomic_sub(volatile apr_uint32_t *mem, apr_uint32_t val)
67{
68    apr_uint32_t prev = *mem, temp;
69
70    asm volatile ("loop_%=:\n"
71                  "    lr  %1,%0\n"
72                  "    slr %1,%3\n"
73                  "    cs  %0,%1,%2\n"
74                  "    jl  loop_%=\n"
75                  : "+d" (prev), "+d" (temp), "=Q" (*mem)
76                  : "d" (val), "m" (*mem)
77                  : "cc", "memory");
78
79    return temp;
80}
81
82APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
83{
84    atomic_sub(mem, val);
85}
86
87APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
88{
89    return atomic_sub(mem, 1);
90}
91
92APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
93                                           apr_uint32_t cmp)
94{
95    asm volatile ("    cs  %0,%2,%1\n"
96                  : "+d" (cmp), "=Q" (*mem)
97                  : "d" (with), "m" (*mem)
98                  : "cc", "memory");
99
100    return cmp;
101}
102
103APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
104{
105    apr_uint32_t prev = *mem;
106
107    asm volatile ("loop_%=:\n"
108                  "    cs  %0,%2,%1\n"
109                  "    jl  loop_%=\n"
110                  : "+d" (prev), "=Q" (*mem)
111                  : "d" (val), "m" (*mem)
112                  : "cc", "memory");
113
114    return prev;
115}
116
117APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
118{
119    void *prev = (void *) cmp;
120#if APR_SIZEOF_VOIDP == 4
121    asm volatile ("    cs  %0,%2,%1\n"
122                  : "+d" (prev), "=Q" (*mem)
123                  : "d" (with), "m" (*mem)
124                  : "cc", "memory");
125#elif APR_SIZEOF_VOIDP == 8
126    asm volatile ("    csg %0,%2,%1\n"
127                  : "+d" (prev), "=Q" (*mem)
128                  : "d" (with), "m" (*mem)
129                  : "cc", "memory");
130#else
131#error APR_SIZEOF_VOIDP value not supported
132#endif
133    return prev;
134}
135
136APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
137{
138    void *prev = (void *) *mem;
139#if APR_SIZEOF_VOIDP == 4
140    asm volatile ("loop_%=:\n"
141                  "    cs  %0,%2,%1\n"
142                  "    jl  loop_%=\n"
143                  : "+d" (prev), "=Q" (*mem)
144                  : "d" (with), "m" (*mem)
145                  : "cc", "memory");
146#elif APR_SIZEOF_VOIDP == 8
147    asm volatile ("loop_%=:\n"
148                  "    csg %0,%2,%1\n"
149                  "    jl  loop_%=\n"
150                  : "+d" (prev), "=Q" (*mem)
151                  : "d" (with), "m" (*mem)
152                  : "cc", "memory");
153#else
154#error APR_SIZEOF_VOIDP value not supported
155#endif
156    return prev;
157}
158
159#endif /* USE_ATOMICS_S390 */
160