1/* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements.  See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License.  You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "apr_arch_atomic.h"
18#include "apr_thread_mutex.h"
19
20#ifdef USE_ATOMICS_GENERIC
21
22#include <stdlib.h>
23
24#if APR_HAS_THREADS
25#   define DECLARE_MUTEX_LOCKED(name, mem)  \
26        apr_thread_mutex_t *name = mutex_hash(mem)
27#   define MUTEX_UNLOCK(name)                                   \
28        do {                                                    \
29            if (apr_thread_mutex_unlock(name) != APR_SUCCESS)   \
30                abort();                                        \
31        } while (0)
32#else
33#   define DECLARE_MUTEX_LOCKED(name, mem)
34#   define MUTEX_UNLOCK(name)
35#   warning Be warned: using stubs for all atomic operations
36#endif
37
38#if APR_HAS_THREADS
39
40static apr_thread_mutex_t **hash_mutex;
41
42#define NUM_ATOMIC_HASH 7
43/* shift by 2 to get rid of alignment issues */
44#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
45
46static apr_status_t atomic_cleanup(void *data)
47{
48    if (hash_mutex == data)
49        hash_mutex = NULL;
50
51    return APR_SUCCESS;
52}
53
54APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
55{
56    int i;
57    apr_status_t rv;
58
59    if (hash_mutex != NULL)
60        return APR_SUCCESS;
61
62    hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
63    apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
64                              apr_pool_cleanup_null);
65
66    for (i = 0; i < NUM_ATOMIC_HASH; i++) {
67        rv = apr_thread_mutex_create(&(hash_mutex[i]),
68                                     APR_THREAD_MUTEX_DEFAULT, p);
69        if (rv != APR_SUCCESS) {
70           return rv;
71        }
72    }
73
74    return apr__atomic_generic64_init(p);
75}
76
77static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint32_t *mem)
78{
79    apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)];
80
81    if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) {
82        abort();
83    }
84
85    return mutex;
86}
87
88#else
89
90APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
91{
92    return apr__atomic_generic64_init(p);
93}
94
95#endif /* APR_HAS_THREADS */
96
97APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
98{
99    return *mem;
100}
101
102APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
103{
104    DECLARE_MUTEX_LOCKED(mutex, mem);
105
106    *mem = val;
107
108    MUTEX_UNLOCK(mutex);
109}
110
111APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
112{
113    apr_uint32_t old_value;
114    DECLARE_MUTEX_LOCKED(mutex, mem);
115
116    old_value = *mem;
117    *mem += val;
118
119    MUTEX_UNLOCK(mutex);
120
121    return old_value;
122}
123
124APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
125{
126    DECLARE_MUTEX_LOCKED(mutex, mem);
127    *mem -= val;
128    MUTEX_UNLOCK(mutex);
129}
130
131APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
132{
133    return apr_atomic_add32(mem, 1);
134}
135
136APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
137{
138    apr_uint32_t new;
139    DECLARE_MUTEX_LOCKED(mutex, mem);
140
141    (*mem)--;
142    new = *mem;
143
144    MUTEX_UNLOCK(mutex);
145
146    return new;
147}
148
149APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
150                              apr_uint32_t cmp)
151{
152    apr_uint32_t prev;
153    DECLARE_MUTEX_LOCKED(mutex, mem);
154
155    prev = *mem;
156    if (prev == cmp) {
157        *mem = with;
158    }
159
160    MUTEX_UNLOCK(mutex);
161
162    return prev;
163}
164
165APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
166{
167    apr_uint32_t prev;
168    DECLARE_MUTEX_LOCKED(mutex, mem);
169
170    prev = *mem;
171    *mem = val;
172
173    MUTEX_UNLOCK(mutex);
174
175    return prev;
176}
177
178APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
179{
180    void *prev;
181    DECLARE_MUTEX_LOCKED(mutex, *mem);
182
183    prev = *(void **)mem;
184    if (prev == cmp) {
185        *mem = with;
186    }
187
188    MUTEX_UNLOCK(mutex);
189
190    return prev;
191}
192
193APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
194{
195    void *prev;
196    DECLARE_MUTEX_LOCKED(mutex, *mem);
197
198    prev = *(void **)mem;
199    *mem = with;
200
201    MUTEX_UNLOCK(mutex);
202
203    return prev;
204}
205
206#endif /* USE_ATOMICS_GENERIC */
207