1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms
5 * of the Common Development and Distribution License
6 * (the "License").  You may not use this file except
7 * in compliance with the License.
8 *
9 * You can obtain a copy of the license at
10 * src/OPENSOLARIS.LICENSE
11 * or http://www.opensolaris.org/os/licensing.
12 * See the License for the specific language governing
13 * permissions and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL
16 * HEADER in each file and include the License file at
17 * usr/src/OPENSOLARIS.LICENSE.  If applicable,
18 * add the following below this CDDL HEADER, with the
19 * fields enclosed by brackets "[]" replaced with your
20 * own identifying information: Portions Copyright [yyyy]
21 * [name of copyright owner]
22 *
23 * CDDL HEADER END
24 */
25
26/*
27 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
28 * Use is subject to license terms.
29 */
30
31/*
32 * The "cascade" test case is a multiprocess/multithread batten-passing model
33 * using lock primitives alone for synchronisation. Threads are arranged in a
34 * ring. Each thread has two locks of its own on which it blocks, and is able
35 * to manipulate the two locks belonging to the thread which follows it in the
36 * ring.
37 *
38 * The number of threads (nthreads) is specified by the generic libMicro -P/-T
39 * options. With nthreads == 1 (the default) the uncontended case can be timed.
40 *
41 * The main logic is generic and allows any simple blocking API to be tested.
42 * The API-specific component is clearly indicated.
43 */
44
45#include <unistd.h>
46#include <stdlib.h>
47#include <stdio.h>
48#include <pthread.h>
49#include <sys/mman.h>
50
51#include "libmicro.h"
52
53typedef struct {
54	int			ts_once;
55	int			ts_id;
56	int			ts_us0;		/* our lock indices */
57	int			ts_us1;
58	int			ts_them0;	/* their lock indices */
59	int			ts_them1;
60} tsd_t;
61
62static int			nthreads;
63
64/*
65 * API-specific code BEGINS here
66 */
67
68static int			opts = 0;
69static int			nlocks;
70static pthread_mutex_t	*locks;
71
72int
73benchmark_init()
74{
75	lm_tsdsize = sizeof (tsd_t);
76
77	(void) sprintf(lm_optstr, "s");
78
79	lm_defN = "cscd_mutex";
80
81	(void) sprintf(lm_usage,
82	    "       [-s] (force PTHREAD_PROCESS_SHARED)\n"
83	    "notes: thread cascade using pthread_mutexes\n");
84
85	return (0);
86}
87
88/*ARGSUSED*/
89int
90benchmark_optswitch(int opt, char *optarg)
91{
92	switch (opt) {
93	case 's':
94		opts = 1;
95		break;
96	default:
97		return (-1);
98	}
99	return (0);
100}
101
102int
103benchmark_initrun()
104{
105	int			i;
106	int			e = 0;
107	pthread_mutexattr_t	ma;
108
109	nthreads = lm_optP * lm_optT;
110	nlocks = nthreads * 2;
111	/*LINTED*/
112	locks = (pthread_mutex_t *)mmap(NULL,
113	    nlocks * sizeof (pthread_mutex_t),
114	    PROT_READ | PROT_WRITE,
115	    MAP_ANON | MAP_SHARED,
116	    -1, 0L);
117	if (locks == MAP_FAILED) {
118		return (1);
119	}
120
121	(void) pthread_mutexattr_init(&ma);
122	if (lm_optP > 1 || opts) {
123		(void) pthread_mutexattr_setpshared(&ma,
124		    PTHREAD_PROCESS_SHARED);
125	} else {
126		(void) pthread_mutexattr_setpshared(&ma,
127		    PTHREAD_PROCESS_PRIVATE);
128	}
129
130	for (i = 0; i < nlocks; i++) {
131		(void) pthread_mutex_init(&locks[i], &ma);
132	}
133
134	return (e);
135}
136
137int
138block(int index)
139{
140	return (pthread_mutex_lock(&locks[index]) == -1);
141}
142
143int
144unblock(int index)
145{
146	return (pthread_mutex_unlock(&locks[index]) == -1);
147}
148
149/*
150 * API-specific code ENDS here
151 */
152
153int
154benchmark_initbatch(void *tsd)
155{
156	tsd_t			*ts = (tsd_t *)tsd;
157	int			e = 0;
158
159	if (ts->ts_once == 0) {
160		int		us, them;
161
162		us = (getpindex() * lm_optT) + gettindex();
163		them = (us + 1) % (lm_optP * lm_optT);
164
165		ts->ts_id = us;
166
167		/* lock index asignment for us and them */
168		ts->ts_us0 = (us * 2);
169		ts->ts_us1 = (us * 2) + 1;
170		if (us < nthreads - 1) {
171			/* straight-thru connection to them */
172			ts->ts_them0 = (them * 2);
173			ts->ts_them1 = (them * 2) + 1;
174		} else {
175			/* cross-over connection to them */
176			ts->ts_them0 = (them * 2) + 1;
177			ts->ts_them1 = (them * 2);
178		}
179
180		ts->ts_once = 1;
181	}
182
183	/* block their first move */
184	e += block(ts->ts_them0);
185
186	return (e);
187}
188
189int
190benchmark(void *tsd, result_t *res)
191{
192	tsd_t			*ts = (tsd_t *)tsd;
193	int			i;
194	int			e = 0;
195
196	/* wait to be unblocked (id == 0 will not block) */
197	e += block(ts->ts_us0);
198
199	for (i = 0; i < lm_optB; i += 2) {
200		/* allow them to block us again */
201		e += unblock(ts->ts_us0);
202
203		/* block their next + 1 move */
204		e += block(ts->ts_them1);
205
206		/* unblock their next move */
207		e += unblock(ts->ts_them0);
208
209		/* wait for them to unblock us */
210		e += block(ts->ts_us1);
211
212		/* repeat with locks reversed */
213		e += unblock(ts->ts_us1);
214		e += block(ts->ts_them0);
215		e += unblock(ts->ts_them1);
216		e += block(ts->ts_us0);
217	}
218
219	/* finish batch with nothing blocked */
220	e += unblock(ts->ts_them0);
221	e += unblock(ts->ts_us0);
222
223	res->re_count = i;
224	res->re_errors = e;
225
226	return (0);
227}
228