1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * a) Redistributions of source code must retain the above copyright notice,
12 *   this list of conditions and the following disclaimer.
13 *
14 * b) Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in
16 *   the documentation and/or other materials provided with the distribution.
17 *
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 *    contributors may be used to endorse or promote products derived
20 *    from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#ifndef _NETINET_SCTP_LOCK_BSD_H_
39#define _NETINET_SCTP_LOCK_BSD_H_
40
41/*
42 * General locking concepts: The goal of our locking is to of course provide
43 * consistency and yet minimize overhead. We will attempt to use
44 * non-recursive locks which are supposed to be quite inexpensive. Now in
45 * order to do this the goal is that most functions are not aware of locking.
46 * Once we have a TCB we lock it and unlock when we are through. This means
47 * that the TCB lock is kind-of a "global" lock when working on an
48 * association. Caution must be used when asserting a TCB_LOCK since if we
49 * recurse we deadlock.
50 *
51 * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
52 * to contain the lock and unlock within the function that needs to lock it.
53 * This sometimes mean we do extra locks and unlocks and lose a bit of
54 * efficiency, but if the performance statements about non-recursive locks are
55 * true this should not be a problem.  One issue that arises with this only
56 * lock when needed is that if an implicit association setup is done we have
57 * a problem. If at the time I lookup an association I have NULL in the tcb
58 * return, by the time I call to create the association some other processor
59 * could have created it. This is what the CREATE lock on the endpoint.
60 * Places where we will be implicitly creating the association OR just
61 * creating an association (the connect call) will assert the CREATE_INP
62 * lock. This will assure us that during all the lookup of INP and INFO if
63 * another creator is also locking/looking up we can gate the two to
64 * synchronize. So the CREATE_INP lock is also another one we must use
65 * extreme caution in locking to make sure we don't hit a re-entrancy issue.
66 *
67 */
68
69/*
70 * When working with the global SCTP lists we lock and unlock the INP_INFO
71 * lock. So when we go to lookup an association we will want to do a
72 * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
73 * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
74 */
75
76#define SCTP_IPI_COUNT_INIT()
77
78#define SCTP_STATLOG_INIT_LOCK()
79#define SCTP_STATLOG_DESTROY()
80#define SCTP_STATLOG_LOCK()
81#define SCTP_STATLOG_UNLOCK()
82
83#define SCTP_INP_INFO_LOCK_INIT() do {					\
84	rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");		\
85} while (0)
86
87#define SCTP_INP_INFO_LOCK_DESTROY() do { 				\
88	if (rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) {			\
89		rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx));		\
90	}								\
91	rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx));			\
92} while (0)
93
94#define SCTP_INP_INFO_RLOCK() do { 					\
95	rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx));				\
96} while (0)
97
98#define SCTP_INP_INFO_WLOCK() do { 					\
99	rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx));				\
100} while (0)
101
102#define SCTP_INP_INFO_RUNLOCK() do {					\
103	rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx));			\
104} while (0)
105
106#define SCTP_INP_INFO_WUNLOCK() do {					\
107	rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx));			\
108} while (0)
109
110#define SCTP_MCORE_QLOCK_INIT(cpstr) do {				\
111	mtx_init(&(cpstr)->que_mtx, "sctp-mcore_queue","queue_lock",	\
112	         MTX_DEF | MTX_DUPOK);					\
113} while (0)
114
115#define SCTP_MCORE_QDESTROY(cpstr) do {					\
116	if (mtx_owned(&(cpstr)->core_mtx)) {				\
117		mtx_unlock(&(cpstr)->que_mtx);				\
118	}								\
119	mtx_destroy(&(cpstr)->que_mtx);					\
120} while (0)
121
122#define SCTP_MCORE_QLOCK(cpstr) do {					\
123	mtx_lock(&(cpstr)->que_mtx);					\
124} while (0)
125
126#define SCTP_MCORE_QUNLOCK(cpstr) do {					\
127	mtx_unlock(&(cpstr)->que_mtx);					\
128} while (0)
129
130#define SCTP_MCORE_LOCK_INIT(cpstr) do {				\
131	mtx_init(&(cpstr)->core_mtx, "sctp-cpulck","cpu_proc_lock",	\
132	         MTX_DEF | MTX_DUPOK);					\
133} while (0)
134
135#define SCTP_MCORE_DESTROY(cpstr) do {					\
136	if (mtx_owned(&(cpstr)->core_mtx)) {				\
137		mtx_unlock(&(cpstr)->core_mtx);				\
138	}								\
139	mtx_destroy(&(cpstr)->core_mtx);				\
140} while (0)
141
142#define SCTP_MCORE_LOCK(cpstr) do {					\
143	mtx_lock(&(cpstr)->core_mtx);					\
144} while (0)
145
146#define SCTP_MCORE_UNLOCK(cpstr) do {					\
147	mtx_unlock(&(cpstr)->core_mtx);					\
148} while (0)
149
150#define SCTP_IPI_ADDR_INIT() do {					\
151	rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr");		\
152} while (0)
153
154#define SCTP_IPI_ADDR_DESTROY() do {					\
155	if (rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) {			\
156		rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx));		\
157	}								\
158	rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx));			\
159} while (0)
160
161#define SCTP_IPI_ADDR_RLOCK()	do { 					\
162	rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
163} while (0)
164
165#define SCTP_IPI_ADDR_WLOCK()	do { 					\
166	rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
167} while (0)
168
169#define SCTP_IPI_ADDR_RUNLOCK() do {					\
170	rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
171} while (0)
172
173#define SCTP_IPI_ADDR_WUNLOCK() do {					\
174	rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx));			\
175} while (0)
176
177#define SCTP_IPI_ADDR_LOCK_ASSERT() do {				\
178	rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_LOCKED);		\
179} while (0)
180
181#define SCTP_IPI_ADDR_WLOCK_ASSERT() do {				\
182	rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_WLOCKED);		\
183} while (0)
184
185#define SCTP_IPI_ITERATOR_WQ_INIT() do {				\
186	mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq",	\
187	         "sctp_it_wq", MTX_DEF);				\
188} while (0)
189
190#define SCTP_IPI_ITERATOR_WQ_DESTROY() do {				\
191	mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
192} while (0)
193
194#define SCTP_IPI_ITERATOR_WQ_LOCK() do { 				\
195	mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
196} while (0)
197
198#define SCTP_IPI_ITERATOR_WQ_UNLOCK() do {				\
199	mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx);			\
200} while (0)
201
202#define SCTP_IP_PKTLOG_INIT() do {					\
203	mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog",	\
204	         "packetlog", MTX_DEF);					\
205} while (0)
206
207#define SCTP_IP_PKTLOG_DESTROY() do {					\
208	mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
209} while (0)
210
211#define SCTP_IP_PKTLOG_LOCK()	do { 					\
212	mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
213} while (0)
214
215#define SCTP_IP_PKTLOG_UNLOCK() do {					\
216	mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx));			\
217} while (0)
218
219/*
220 * The INP locks we will use for locking an SCTP endpoint, so for example if
221 * we want to change something at the endpoint level for example random_store
222 * or cookie secrets we lock the INP level.
223 */
224
225#define SCTP_INP_READ_INIT(_inp) do {					\
226	mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr",		\
227	         MTX_DEF | MTX_DUPOK);					\
228} while (0)
229
230#define SCTP_INP_READ_DESTROY(_inp) do {				\
231	mtx_destroy(&(_inp)->inp_rdata_mtx);				\
232} while (0)
233
234#define SCTP_INP_READ_LOCK(_inp) do {					\
235	mtx_lock(&(_inp)->inp_rdata_mtx);				\
236} while (0)
237
238#define SCTP_INP_READ_UNLOCK(_inp) do {					\
239	mtx_unlock(&(_inp)->inp_rdata_mtx);				\
240} while (0)
241
242#define SCTP_INP_LOCK_INIT(_inp) do {					\
243	mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp",			\
244	         MTX_DEF | MTX_DUPOK);					\
245} while (0)
246
247#define SCTP_INP_LOCK_DESTROY(_inp) do {				\
248	mtx_destroy(&(_inp)->inp_mtx);					\
249} while (0)
250
251#define SCTP_INP_LOCK_CONTENDED(_inp)					\
252	((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
253
254#define SCTP_INP_READ_CONTENDED(_inp)					\
255	((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
256
257#ifdef SCTP_LOCK_LOGGING
258#define SCTP_INP_RLOCK(_inp)	do { 					\
259	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
260		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);		\
261	mtx_lock(&(_inp)->inp_mtx);					\
262} while (0)
263
264#define SCTP_INP_WLOCK(_inp)	do { 					\
265	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
266		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP);		\
267	mtx_lock(&(_inp)->inp_mtx);					\
268} while (0)
269#else
270#define SCTP_INP_RLOCK(_inp) do { 					\
271	mtx_lock(&(_inp)->inp_mtx);					\
272} while (0)
273
274#define SCTP_INP_WLOCK(_inp) do { 					\
275	mtx_lock(&(_inp)->inp_mtx);					\
276} while (0)
277#endif
278
279#define SCTP_INP_RUNLOCK(_inp) do {					\
280	mtx_unlock(&(_inp)->inp_mtx);					\
281} while (0)
282
283#define SCTP_INP_WUNLOCK(_inp) do {					\
284	mtx_unlock(&(_inp)->inp_mtx);					\
285} while (0)
286
287#define SCTP_INP_RLOCK_ASSERT(_inp) do {				\
288	KASSERT(mtx_owned(&(_inp)->inp_mtx),				\
289	        ("Don't own INP read lock"));				\
290} while (0)
291
292#define SCTP_INP_WLOCK_ASSERT(_inp) do {				\
293	KASSERT(mtx_owned(&(_inp)->inp_mtx),				\
294	        ("Don't own INP write lock"));				\
295} while (0)
296
297#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
298#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
299
300#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) do {				\
301	mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create",	\
302		 MTX_DEF | MTX_DUPOK);					\
303} while (0)
304
305#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) do {			\
306	mtx_destroy(&(_inp)->inp_create_mtx);				\
307} while (0)
308
309#ifdef SCTP_LOCK_LOGGING
310#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
311	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
312		sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE);	\
313	mtx_lock(&(_inp)->inp_create_mtx);				\
314} while (0)
315#else
316#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
317	mtx_lock(&(_inp)->inp_create_mtx);				\
318} while (0)
319#endif
320
321#define SCTP_ASOC_CREATE_UNLOCK(_inp) do {				\
322	mtx_unlock(&(_inp)->inp_create_mtx);				\
323} while (0)
324
325#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp)				\
326	((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
327
328#define SCTP_TCB_SEND_LOCK_INIT(_tcb) do {				\
329	mtx_init(&(_tcb)->tcb_send_mtx, "sctp-send-tcb", "tcbs",	\
330	         MTX_DEF | MTX_DUPOK);					\
331} while (0)
332
333#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) do {				\
334	mtx_destroy(&(_tcb)->tcb_send_mtx);				\
335} while (0)
336
337#define SCTP_TCB_SEND_LOCK(_tcb) do {					\
338	mtx_lock(&(_tcb)->tcb_send_mtx);				\
339} while (0)
340
341#define SCTP_TCB_SEND_UNLOCK(_tcb) do {					\
342	mtx_unlock(&(_tcb)->tcb_send_mtx);				\
343} while (0)
344
345/*
346 * For the majority of things (once we have found the association) we will
347 * lock the actual association mutex. This will protect all the assoiciation
348 * level queues and streams and such. We will need to lock the socket layer
349 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
350 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
351 */
352
353#define SCTP_TCB_LOCK_INIT(_tcb) do {					\
354	mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb",			\
355	         MTX_DEF | MTX_DUPOK);					\
356} while (0)
357
358#define SCTP_TCB_LOCK_DESTROY(_tcb) do {				\
359	mtx_destroy(&(_tcb)->tcb_mtx);					\
360} while (0)
361
362#ifdef SCTP_LOCK_LOGGING
363#define SCTP_TCB_LOCK(_tcb) do {					\
364	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
365		sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);	\
366	mtx_lock(&(_tcb)->tcb_mtx);					\
367} while (0)
368#else
369#define SCTP_TCB_LOCK(_tcb) do {					\
370	mtx_lock(&(_tcb)->tcb_mtx);					\
371} while (0)
372
373#endif
374
375#define SCTP_TCB_TRYLOCK(_tcb) 						\
376	mtx_trylock(&(_tcb)->tcb_mtx)
377
378#define SCTP_TCB_UNLOCK(_tcb) do {					\
379	mtx_unlock(&(_tcb)->tcb_mtx);					\
380} while (0)
381
382#define SCTP_TCB_UNLOCK_IFOWNED(_tcb) do {				\
383	if (mtx_owned(&(_tcb)->tcb_mtx))				\
384		mtx_unlock(&(_tcb)->tcb_mtx);				\
385} while (0)
386
387#define SCTP_TCB_LOCK_ASSERT(_tcb) do {					\
388	KASSERT(mtx_owned(&(_tcb)->tcb_mtx),				\
389	        ("Don't own TCB lock"));				\
390} while (0)
391
392#define SCTP_ITERATOR_LOCK_INIT() do {					\
393	mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF);	\
394} while (0)
395
396#define SCTP_ITERATOR_LOCK_DESTROY() do {				\
397	mtx_destroy(&sctp_it_ctl.it_mtx);				\
398} while (0)
399
400#define SCTP_ITERATOR_LOCK() \
401	do {								\
402		KASSERT(!mtx_owned(&sctp_it_ctl.it_mtx),		\
403		        ("Own the iterator lock"));			\
404		mtx_lock(&sctp_it_ctl.it_mtx);				\
405	} while (0)
406
407#define SCTP_ITERATOR_UNLOCK() do {					\
408	mtx_unlock(&sctp_it_ctl.it_mtx);				\
409} while (0)
410
411#define SCTP_WQ_ADDR_INIT() do {					\
412	mtx_init(&SCTP_BASE_INFO(wq_addr_mtx),				\
413	         "sctp-addr-wq","sctp_addr_wq", MTX_DEF);		\
414} while (0)
415
416#define SCTP_WQ_ADDR_DESTROY() do  {					\
417	if (mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) {			\
418		mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx));		\
419	}								\
420	mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
421} while (0)
422
423#define SCTP_WQ_ADDR_LOCK()	do {					\
424	mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx));				\
425} while (0)
426
427#define SCTP_WQ_ADDR_UNLOCK() do {					\
428		mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx));		\
429} while (0)
430
431#define SCTP_WQ_ADDR_LOCK_ASSERT() do {					\
432	KASSERT(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx)),		\
433	        ("Don't own the ADDR-WQ lock"));			\
434} while (0)
435
436#define SCTP_INCR_EP_COUNT() do {					\
437	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1);		\
438} while (0)
439
440#define SCTP_DECR_EP_COUNT() do {					\
441	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1);		\
442} while (0)
443
444#define SCTP_INCR_ASOC_COUNT() do {					\
445	atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1);		\
446} while (0)
447
448#define SCTP_DECR_ASOC_COUNT() do {					\
449	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1);	\
450} while (0)
451
452#define SCTP_INCR_LADDR_COUNT() do {					\
453	atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1);		\
454} while (0)
455
456#define SCTP_DECR_LADDR_COUNT() do {					\
457	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); 	\
458} while (0)
459
460#define SCTP_INCR_RADDR_COUNT() do {					\
461	atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1);		\
462} while (0)
463
464#define SCTP_DECR_RADDR_COUNT() do {					\
465	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1);	\
466} while (0)
467
468#define SCTP_INCR_CHK_COUNT() do {					\
469	atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1);		\
470} while (0)
471
472#define SCTP_DECR_CHK_COUNT() do {					\
473	KASSERT(SCTP_BASE_INFO(ipi_count_chunk) > 0,			\
474	        ("ipi_count_chunk would become negative"));		\
475	if (SCTP_BASE_INFO(ipi_count_chunk) != 0)			\
476		atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk),	\
477		                    1);					\
478} while (0)
479
480#define SCTP_INCR_READQ_COUNT() do {					\
481	atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1);		\
482} while (0)
483
484#define SCTP_DECR_READQ_COUNT() do {					\
485	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1);	\
486} while (0)
487
488#define SCTP_INCR_STRMOQ_COUNT() do {					\
489	atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1);		\
490} while (0)
491
492#define SCTP_DECR_STRMOQ_COUNT() do {					\
493	atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1);	\
494} while (0)
495
496#if defined(SCTP_SO_LOCK_TESTING)
497#define SCTP_INP_SO(sctpinp)						\
498	(sctpinp)->ip_inp.inp.inp_socket
499#define SCTP_SOCKET_LOCK(so, refcnt)
500#define SCTP_SOCKET_UNLOCK(so, refcnt)
501#endif
502
503#endif
504