1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 *    products derived from this software without specific prior written
23 *    permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38/*
39 * XXX: How do we in general know that objects referenced in events
40 * have not been destroyed before we get around to handle the event ?
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD$");
45
46#include <sys/param.h>
47#include <sys/malloc.h>
48#include <sys/systm.h>
49#include <sys/kernel.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/errno.h>
54#include <sys/time.h>
55#include <geom/geom.h>
56#include <geom/geom_int.h>
57
58#include <machine/stdarg.h>
59
60TAILQ_HEAD(event_tailq_head, g_event);
61
62static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
63static u_int g_pending_events;
64static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
65static struct mtx g_eventlock;
66static int g_wither_work;
67
68#define G_N_EVENTREFS		20
69
70struct g_event {
71	TAILQ_ENTRY(g_event)	events;
72	g_event_t		*func;
73	void			*arg;
74	int			flag;
75	void			*ref[G_N_EVENTREFS];
76};
77
78#define EV_DONE		0x80000
79#define EV_WAKEUP	0x40000
80#define EV_CANCELED	0x20000
81#define EV_INPROGRESS	0x10000
82
83void
84g_waitidle(void)
85{
86
87	g_topology_assert_not();
88
89	mtx_lock(&g_eventlock);
90	TSWAIT("GEOM events");
91	while (!TAILQ_EMPTY(&g_events))
92		msleep(&g_pending_events, &g_eventlock, PPAUSE,
93		    "g_waitidle", hz/5);
94	TSUNWAIT("GEOM events");
95	mtx_unlock(&g_eventlock);
96	curthread->td_pflags &= ~TDP_GEOM;
97}
98
99#if 0
100void
101g_waitidlelock(void)
102{
103
104	g_topology_assert();
105	mtx_lock(&g_eventlock);
106	while (!TAILQ_EMPTY(&g_events)) {
107		g_topology_unlock();
108		msleep(&g_pending_events, &g_eventlock, PPAUSE,
109		    "g_waitidlel", hz/5);
110		g_topology_lock();
111	}
112	mtx_unlock(&g_eventlock);
113}
114#endif
115
116struct g_attrchanged_args {
117	struct g_provider *pp;
118	const char *attr;
119};
120
121static void
122g_attr_changed_event(void *arg, int flag)
123{
124	struct g_attrchanged_args *args;
125	struct g_provider *pp;
126	struct g_consumer *cp;
127	struct g_consumer *next_cp;
128
129	args = arg;
130	pp = args->pp;
131
132	g_topology_assert();
133	if (flag != EV_CANCEL && g_shutdown == 0) {
134
135		/*
136		 * Tell all consumers of the change.
137		 */
138		LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) {
139			if (cp->geom->attrchanged != NULL)
140				cp->geom->attrchanged(cp, args->attr);
141		}
142	}
143	g_free(args);
144}
145
146int
147g_attr_changed(struct g_provider *pp, const char *attr, int flag)
148{
149	struct g_attrchanged_args *args;
150	int error;
151
152	args = g_malloc(sizeof *args, flag);
153	if (args == NULL)
154		return (ENOMEM);
155	args->pp = pp;
156	args->attr = attr;
157	error = g_post_event(g_attr_changed_event, args, flag, pp, NULL);
158	if (error != 0)
159		g_free(args);
160	return (error);
161}
162
163void
164g_orphan_provider(struct g_provider *pp, int error)
165{
166
167	/* G_VALID_PROVIDER(pp)  We likely lack topology lock */
168	g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
169	    pp, pp->name, error);
170	KASSERT(error != 0,
171	    ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
172	     pp, pp->name));
173
174	pp->error = error;
175	mtx_lock(&g_eventlock);
176	KASSERT(!(pp->flags & G_PF_ORPHAN),
177	    ("g_orphan_provider(%p(%s)), already an orphan", pp, pp->name));
178	pp->flags |= G_PF_ORPHAN;
179	TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
180	mtx_unlock(&g_eventlock);
181	wakeup(&g_wait_event);
182}
183
184/*
185 * This function is called once on each provider which the event handler
186 * finds on its g_doorstep.
187 */
188
189static void
190g_orphan_register(struct g_provider *pp)
191{
192	struct g_consumer *cp, *cp2;
193	int wf;
194
195	g_topology_assert();
196	G_VALID_PROVIDER(pp);
197	g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
198
199	g_cancel_event(pp);
200
201	wf = pp->flags & G_PF_WITHER;
202	pp->flags &= ~G_PF_WITHER;
203
204	/*
205	 * Tell all consumers the bad news.
206	 * Don't be surprised if they self-destruct.
207	 */
208	LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) {
209		KASSERT(cp->geom->orphan != NULL,
210		    ("geom %s has no orphan, class %s",
211		    cp->geom->name, cp->geom->class->name));
212		/*
213		 * XXX: g_dev_orphan method does deferred destroying
214		 * and it is possible, that other event could already
215		 * call the orphan method. Check consumer's flags to
216		 * do not schedule it twice.
217		 */
218		if (cp->flags & G_CF_ORPHAN)
219			continue;
220		cp->flags |= G_CF_ORPHAN;
221		cp->geom->orphan(cp);
222	}
223	if (LIST_EMPTY(&pp->consumers) && wf)
224		g_destroy_provider(pp);
225	else
226		pp->flags |= wf;
227#ifdef notyet
228	cp = LIST_FIRST(&pp->consumers);
229	if (cp != NULL)
230		return;
231	if (pp->geom->flags & G_GEOM_WITHER)
232		g_destroy_provider(pp);
233#endif
234}
235
236static int
237one_event(void)
238{
239	struct g_event *ep;
240	struct g_provider *pp;
241
242	g_topology_assert();
243	mtx_lock(&g_eventlock);
244	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
245		if (pp->nstart == pp->nend)
246			break;
247	}
248	if (pp != NULL) {
249		G_VALID_PROVIDER(pp);
250		TAILQ_REMOVE(&g_doorstep, pp, orphan);
251		mtx_unlock(&g_eventlock);
252		g_orphan_register(pp);
253		return (1);
254	}
255
256	ep = TAILQ_FIRST(&g_events);
257	if (ep == NULL) {
258		wakeup(&g_pending_events);
259		return (0);
260	}
261	if (ep->flag & EV_INPROGRESS) {
262		mtx_unlock(&g_eventlock);
263		return (1);
264	}
265	ep->flag |= EV_INPROGRESS;
266	mtx_unlock(&g_eventlock);
267	g_topology_assert();
268	ep->func(ep->arg, 0);
269	g_topology_assert();
270	mtx_lock(&g_eventlock);
271	TSRELEASE("GEOM events");
272	TAILQ_REMOVE(&g_events, ep, events);
273	ep->flag &= ~EV_INPROGRESS;
274	if (ep->flag & EV_WAKEUP) {
275		ep->flag |= EV_DONE;
276		mtx_unlock(&g_eventlock);
277		wakeup(ep);
278	} else {
279		mtx_unlock(&g_eventlock);
280		g_free(ep);
281	}
282	return (1);
283}
284
285void
286g_run_events()
287{
288
289	for (;;) {
290		g_topology_lock();
291		while (one_event())
292			;
293		mtx_assert(&g_eventlock, MA_OWNED);
294		if (g_wither_work) {
295			g_wither_work = 0;
296			mtx_unlock(&g_eventlock);
297			g_wither_washer();
298			g_topology_unlock();
299		} else {
300			g_topology_unlock();
301			msleep(&g_wait_event, &g_eventlock, PRIBIO | PDROP,
302			    "-", TAILQ_EMPTY(&g_doorstep) ? 0 : hz / 10);
303		}
304	}
305	/* NOTREACHED */
306}
307
308void
309g_cancel_event(void *ref)
310{
311	struct g_event *ep, *epn;
312	struct g_provider *pp;
313	u_int n;
314
315	mtx_lock(&g_eventlock);
316	TAILQ_FOREACH(pp, &g_doorstep, orphan) {
317		if (pp != ref)
318			continue;
319		TAILQ_REMOVE(&g_doorstep, pp, orphan);
320		break;
321	}
322	TAILQ_FOREACH_SAFE(ep, &g_events, events, epn) {
323		if (ep->flag & EV_INPROGRESS)
324			continue;
325		for (n = 0; n < G_N_EVENTREFS; n++) {
326			if (ep->ref[n] == NULL)
327				break;
328			if (ep->ref[n] != ref)
329				continue;
330			TSRELEASE("GEOM events");
331			TAILQ_REMOVE(&g_events, ep, events);
332			ep->func(ep->arg, EV_CANCEL);
333			mtx_assert(&g_eventlock, MA_OWNED);
334			if (ep->flag & EV_WAKEUP) {
335				ep->flag |= (EV_DONE|EV_CANCELED);
336				wakeup(ep);
337			} else {
338				g_free(ep);
339			}
340			break;
341		}
342	}
343	if (TAILQ_EMPTY(&g_events))
344		wakeup(&g_pending_events);
345	mtx_unlock(&g_eventlock);
346}
347
348static int
349g_post_event_x(g_event_t *func, void *arg, int flag, int wuflag, struct g_event **epp, va_list ap)
350{
351	struct g_event *ep;
352	void *p;
353	u_int n;
354
355	g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d, %d)",
356	    func, arg, flag, wuflag);
357	KASSERT(wuflag == 0 || wuflag == EV_WAKEUP,
358	    ("Wrong wuflag in g_post_event_x(0x%x)", wuflag));
359	ep = g_malloc(sizeof *ep, flag | M_ZERO);
360	if (ep == NULL)
361		return (ENOMEM);
362	ep->flag = wuflag;
363	for (n = 0; n < G_N_EVENTREFS; n++) {
364		p = va_arg(ap, void *);
365		if (p == NULL)
366			break;
367		g_trace(G_T_TOPOLOGY, "  ref %p", p);
368		ep->ref[n] = p;
369	}
370	KASSERT(p == NULL, ("Too many references to event"));
371	ep->func = func;
372	ep->arg = arg;
373	mtx_lock(&g_eventlock);
374	TSHOLD("GEOM events");
375	TAILQ_INSERT_TAIL(&g_events, ep, events);
376	mtx_unlock(&g_eventlock);
377	wakeup(&g_wait_event);
378	if (epp != NULL)
379		*epp = ep;
380	curthread->td_pflags |= TDP_GEOM;
381	return (0);
382}
383
384int
385g_post_event(g_event_t *func, void *arg, int flag, ...)
386{
387	va_list ap;
388	int i;
389
390	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
391	    ("Wrong flag to g_post_event"));
392	va_start(ap, flag);
393	i = g_post_event_x(func, arg, flag, 0, NULL, ap);
394	va_end(ap);
395	return (i);
396}
397
398void
399g_do_wither()
400{
401
402	mtx_lock(&g_eventlock);
403	g_wither_work = 1;
404	mtx_unlock(&g_eventlock);
405	wakeup(&g_wait_event);
406}
407
408/*
409 * XXX: It might actually be useful to call this function with topology held.
410 * XXX: This would ensure that the event gets created before anything else
411 * XXX: changes.  At present all users have a handle on things in some other
412 * XXX: way, so this remains an XXX for now.
413 */
414
415int
416g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
417{
418	va_list ap;
419	struct g_event *ep;
420	int error;
421
422	g_topology_assert_not();
423	KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
424	    ("Wrong flag to g_post_event"));
425	va_start(ap, flag);
426	error = g_post_event_x(func, arg, flag, EV_WAKEUP, &ep, ap);
427	va_end(ap);
428	if (error)
429		return (error);
430
431	mtx_lock(&g_eventlock);
432	while (!(ep->flag & EV_DONE))
433		msleep(ep, &g_eventlock, PRIBIO, "g_waitfor_event", hz);
434	if (ep->flag & EV_CANCELED)
435		error = EAGAIN;
436	mtx_unlock(&g_eventlock);
437
438	g_free(ep);
439	return (error);
440}
441
442void
443g_event_init()
444{
445
446	mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
447}
448