1/*	$NetBSD: stats.c,v 1.1.1.1 2009/12/13 16:54:17 kardel Exp $	*/
2
3/*
4 * Copyright (C) 2009  Internet Systems Consortium, Inc. ("ISC")
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
11 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
12 * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
13 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
14 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
15 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/* Id: stats.c,v 1.3.6.2 2009/01/29 23:47:44 tbox Exp */
20
21/*! \file */
22
23#include <config.h>
24
25#include <string.h>
26
27#include <isc/atomic.h>
28#include <isc/buffer.h>
29#include <isc/magic.h>
30#include <isc/mem.h>
31#include <isc/platform.h>
32#include <isc/print.h>
33#include <isc/rwlock.h>
34#include <isc/stats.h>
35#include <isc/util.h>
36
37#define ISC_STATS_MAGIC			ISC_MAGIC('S', 't', 'a', 't')
38#define ISC_STATS_VALID(x)		ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
39
40#ifndef ISC_STATS_USEMULTIFIELDS
41#if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
42#define ISC_STATS_USEMULTIFIELDS 1
43#else
44#define ISC_STATS_USEMULTIFIELDS 0
45#endif
46#endif	/* ISC_STATS_USEMULTIFIELDS */
47
48#if ISC_STATS_USEMULTIFIELDS
49typedef struct {
50	isc_uint32_t hi;
51	isc_uint32_t lo;
52} isc_stat_t;
53#else
54typedef isc_uint64_t isc_stat_t;
55#endif
56
57struct isc_stats {
58	/*% Unlocked */
59	unsigned int	magic;
60	isc_mem_t	*mctx;
61	int		ncounters;
62
63	isc_mutex_t	lock;
64	unsigned int	references; /* locked by lock */
65
66	/*%
67	 * Locked by counterlock or unlocked if efficient rwlock is not
68	 * available.
69	 */
70#ifdef ISC_RWLOCK_USEATOMIC
71	isc_rwlock_t	counterlock;
72#endif
73	isc_stat_t	*counters;
74
75	/*%
76	 * We don't want to lock the counters while we are dumping, so we first
77	 * copy the current counter values into a local array.  This buffer
78	 * will be used as the copy destination.  It's allocated on creation
79	 * of the stats structure so that the dump operation won't fail due
80	 * to memory allocation failure.
81	 * XXX: this approach is weird for non-threaded build because the
82	 * additional memory and the copy overhead could be avoided.  We prefer
83	 * simplicity here, however, under the assumption that this function
84	 * should be only rarely called.
85	 */
86	isc_uint64_t	*copiedcounters;
87};
88
89static isc_result_t
90create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
91	isc_stats_t *stats;
92	isc_result_t result = ISC_R_SUCCESS;
93
94	REQUIRE(statsp != NULL && *statsp == NULL);
95
96	stats = isc_mem_get(mctx, sizeof(*stats));
97	if (stats == NULL)
98		return (ISC_R_NOMEMORY);
99
100	result = isc_mutex_init(&stats->lock);
101	if (result != ISC_R_SUCCESS)
102		goto clean_stats;
103
104	stats->counters = isc_mem_get(mctx, sizeof(isc_stat_t) * ncounters);
105	if (stats->counters == NULL) {
106		result = ISC_R_NOMEMORY;
107		goto clean_mutex;
108	}
109	stats->copiedcounters = isc_mem_get(mctx,
110					    sizeof(isc_uint64_t) * ncounters);
111	if (stats->copiedcounters == NULL) {
112		result = ISC_R_NOMEMORY;
113		goto clean_counters;
114	}
115
116#ifdef ISC_RWLOCK_USEATOMIC
117	result = isc_rwlock_init(&stats->counterlock, 0, 0);
118	if (result != ISC_R_SUCCESS)
119		goto clean_copiedcounters;
120#endif
121
122	stats->references = 1;
123	memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
124	stats->mctx = NULL;
125	isc_mem_attach(mctx, &stats->mctx);
126	stats->ncounters = ncounters;
127	stats->magic = ISC_STATS_MAGIC;
128
129	*statsp = stats;
130
131	return (result);
132
133clean_counters:
134	isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
135
136#ifdef ISC_RWLOCK_USEATOMIC
137clean_copiedcounters:
138	isc_mem_put(mctx, stats->copiedcounters,
139		    sizeof(isc_stat_t) * ncounters);
140#endif
141
142clean_mutex:
143	DESTROYLOCK(&stats->lock);
144
145clean_stats:
146	isc_mem_put(mctx, stats, sizeof(*stats));
147
148	return (result);
149}
150
151void
152isc_stats_attach(isc_stats_t *stats, isc_stats_t **statsp) {
153	REQUIRE(ISC_STATS_VALID(stats));
154	REQUIRE(statsp != NULL && *statsp == NULL);
155
156	LOCK(&stats->lock);
157	stats->references++;
158	UNLOCK(&stats->lock);
159
160	*statsp = stats;
161}
162
163void
164isc_stats_detach(isc_stats_t **statsp) {
165	isc_stats_t *stats;
166
167	REQUIRE(statsp != NULL && ISC_STATS_VALID(*statsp));
168
169	stats = *statsp;
170	*statsp = NULL;
171
172	LOCK(&stats->lock);
173	stats->references--;
174	UNLOCK(&stats->lock);
175
176	if (stats->references == 0) {
177		isc_mem_put(stats->mctx, stats->copiedcounters,
178			    sizeof(isc_stat_t) * stats->ncounters);
179		isc_mem_put(stats->mctx, stats->counters,
180			    sizeof(isc_stat_t) * stats->ncounters);
181		DESTROYLOCK(&stats->lock);
182#ifdef ISC_RWLOCK_USEATOMIC
183		isc_rwlock_destroy(&stats->counterlock);
184#endif
185		isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
186	}
187}
188
189int
190isc_stats_ncounters(isc_stats_t *stats) {
191	REQUIRE(ISC_STATS_VALID(stats));
192
193	return (stats->ncounters);
194}
195
196static inline void
197incrementcounter(isc_stats_t *stats, int counter) {
198	isc_int32_t prev;
199
200#ifdef ISC_RWLOCK_USEATOMIC
201	/*
202	 * We use a "read" lock to prevent other threads from reading the
203	 * counter while we "writing" a counter field.  The write access itself
204	 * is protected by the atomic operation.
205	 */
206	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
207#endif
208
209#if ISC_STATS_USEMULTIFIELDS
210	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
211	/*
212	 * If the lower 32-bit field overflows, increment the higher field.
213	 * Note that it's *theoretically* possible that the lower field
214	 * overlaps again before the higher field is incremented.  It doesn't
215	 * matter, however, because we don't read the value until
216	 * isc_stats_copy() is called where the whole process is protected
217	 * by the write (exclusive) lock.
218	 */
219	if (prev == (isc_int32_t)0xffffffff)
220		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
221#elif defined(ISC_PLATFORM_HAVEXADDQ)
222	UNUSED(prev);
223	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
224#else
225	UNUSED(prev);
226	stats->counters[counter]++;
227#endif
228
229#ifdef ISC_RWLOCK_USEATOMIC
230	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
231#endif
232}
233
234static inline void
235decrementcounter(isc_stats_t *stats, int counter) {
236	isc_int32_t prev;
237
238#ifdef ISC_RWLOCK_USEATOMIC
239	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
240#endif
241
242#if ISC_STATS_USEMULTIFIELDS
243	prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
244	if (prev == 0)
245		isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
246				-1);
247#elif defined(ISC_PLATFORM_HAVEXADDQ)
248	UNUSED(prev);
249	isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
250#else
251	UNUSED(prev);
252	stats->counters[counter]--;
253#endif
254
255#ifdef ISC_RWLOCK_USEATOMIC
256	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
257#endif
258}
259
260static void
261copy_counters(isc_stats_t *stats) {
262	int i;
263
264#ifdef ISC_RWLOCK_USEATOMIC
265	/*
266	 * We use a "write" lock before "reading" the statistics counters as
267	 * an exclusive lock.
268	 */
269	isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
270#endif
271
272#if ISC_STATS_USEMULTIFIELDS
273	for (i = 0; i < stats->ncounters; i++) {
274		stats->copiedcounters[i] =
275				(isc_uint64_t)(stats->counters[i].hi) << 32 |
276				stats->counters[i].lo;
277	}
278#else
279	UNUSED(i);
280	memcpy(stats->copiedcounters, stats->counters,
281	       stats->ncounters * sizeof(isc_stat_t));
282#endif
283
284#ifdef ISC_RWLOCK_USEATOMIC
285	isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
286#endif
287}
288
289isc_result_t
290isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
291	REQUIRE(statsp != NULL && *statsp == NULL);
292
293	return (create_stats(mctx, ncounters, statsp));
294}
295
296void
297isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
298	REQUIRE(ISC_STATS_VALID(stats));
299	REQUIRE(counter < stats->ncounters);
300
301	incrementcounter(stats, (int)counter);
302}
303
304void
305isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
306	REQUIRE(ISC_STATS_VALID(stats));
307	REQUIRE(counter < stats->ncounters);
308
309	decrementcounter(stats, (int)counter);
310}
311
312void
313isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
314	       void *arg, unsigned int options)
315{
316	int i;
317
318	REQUIRE(ISC_STATS_VALID(stats));
319
320	copy_counters(stats);
321
322	for (i = 0; i < stats->ncounters; i++) {
323		if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
324		    stats->copiedcounters[i] == 0)
325				continue;
326		dump_fn((isc_statscounter_t)i, stats->copiedcounters[i], arg);
327	}
328}
329