1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * Kernel framework functions for the fcode interpreter
31 */
32
33#include <sys/types.h>
34#include <sys/conf.h>
35#include <sys/debug.h>
36#include <sys/kmem.h>
37#include <sys/ddi.h>
38#include <sys/sunddi.h>
39#include <sys/sunndi.h>
40#include <sys/esunddi.h>
41#include <sys/ksynch.h>
42#include <sys/modctl.h>
43#include <sys/errno.h>
44#include <sys/fcode.h>
45
46#ifdef	DEBUG
47int fcode_debug = 0;
48#else
49int fcode_debug = 0;
50#endif
51
52static kmutex_t fc_request_lock;
53static kmutex_t fc_resource_lock;
54static kmutex_t fc_hash_lock;
55static kmutex_t fc_device_tree_lock;
56static kmutex_t fc_phandle_lock;
57static kcondvar_t fc_request_cv;
58static struct fc_request *fc_request_head;
59static int fc_initialized;
60
61static void fcode_timer(void *);
62
63int fcode_timeout = 300;	/* seconds */
64
65int fcodem_unloadable;
66
67extern int hz;
68
69/*
70 * Initialize the fcode interpreter framework ... must be called
71 * prior to activating any of the fcode interpreter framework including
72 * the driver.
73 */
74static void
75fcode_init(void)
76{
77	if (fc_initialized)
78		return;
79
80	mutex_init(&fc_request_lock, NULL, MUTEX_DRIVER, NULL);
81	mutex_init(&fc_resource_lock, NULL, MUTEX_DRIVER, NULL);
82	mutex_init(&fc_hash_lock, NULL, MUTEX_DRIVER, NULL);
83	mutex_init(&fc_device_tree_lock, NULL, MUTEX_DRIVER, NULL);
84	mutex_init(&fc_phandle_lock, NULL, MUTEX_DRIVER, NULL);
85	cv_init(&fc_request_cv, NULL, CV_DRIVER, NULL);
86	++fc_initialized;
87}
88
89static void
90fcode_fini(void)
91{
92	mutex_destroy(&fc_request_lock);
93	mutex_destroy(&fc_resource_lock);
94	mutex_destroy(&fc_hash_lock);
95	cv_destroy(&fc_request_cv);
96	fc_initialized = 0;
97}
98
99/*
100 * Module linkage information for the kernel.
101 */
102static struct modlmisc modlmisc = {
103	&mod_miscops, "FCode framework 1.13"
104};
105
106static struct modlinkage modlinkage = {
107	MODREV_1, (void *)&modlmisc, NULL
108};
109
110int
111_init(void)
112{
113	int error;
114
115	fcode_init();
116	if ((error = mod_install(&modlinkage)) != 0)
117		fcode_fini();
118	return (error);
119}
120
121int
122_fini(void)
123{
124	int error = EBUSY;
125
126	if (fcodem_unloadable)
127		if ((error = mod_remove(&modlinkage)) == 0)
128			fcode_fini();
129
130	return (error);
131}
132
133int
134_info(struct modinfo *modinfop)
135{
136	return (mod_info(&modlinkage, modinfop));
137}
138
139/*
140 * Framework function to invoke the interpreter. Wait and return when the
141 * interpreter is done. See fcode.h for details.
142 */
143int
144fcode_interpreter(dev_info_t *ap, fc_ops_t *ops, fco_handle_t handle)
145{
146	struct fc_request *fp, *qp;
147	int error;
148
149	ASSERT(fc_initialized);
150	ASSERT(ap);
151	ASSERT(ops);
152	ASSERT(handle);
153
154	/*
155	 * Create a request structure
156	 */
157	fp = kmem_zalloc(sizeof (struct fc_request), KM_SLEEP);
158
159	fp->next = NULL;
160	fp->busy = FC_R_INIT;
161	fp->error = FC_SUCCESS;
162	fp->ap_dip = ap;
163	fp->ap_ops = ops;
164	fp->handle = handle;
165
166	/*
167	 * Add the request to the end of the request list.
168	 */
169	mutex_enter(&fc_request_lock);
170
171	if (fc_request_head == NULL)
172		fc_request_head = fp;
173	else {
174		for (qp = fc_request_head; qp->next != NULL; qp = qp->next)
175			/* empty */;
176		qp->next = fp;
177	}
178	mutex_exit(&fc_request_lock);
179
180	/*
181	 * log a message (ie: i_ddi_log_event) indicating that a request
182	 * has been queued to start the userland fcode interpreter.
183	 * This call is the glue to the eventd and automates the process.
184	 */
185
186	/*
187	 * Signal the driver if it's waiting for a request to be queued.
188	 */
189	cv_broadcast(&fc_request_cv);
190
191	/*
192	 * Wait for the request to be serviced
193	 */
194	mutex_enter(&fc_request_lock);
195	fp->timeout = timeout(fcode_timer, fp, hz * fcode_timeout);
196	while (fp->busy != FC_R_DONE)
197		cv_wait(&fc_request_cv, &fc_request_lock);
198
199	if (fp->timeout) {
200		(void) untimeout(fp->timeout);
201		fp->timeout = NULL;
202	}
203
204	/*
205	 * Remove the request from the queue (while still holding the lock)
206	 */
207	if (fc_request_head == fp)
208		fc_request_head = fp->next;
209	else {
210		for (qp = fc_request_head; qp->next != fp; qp = qp->next)
211			/* empty */;
212		qp->next = fp->next;
213	}
214	mutex_exit(&fc_request_lock);
215
216	FC_DEBUG1(2, CE_CONT, "fcode_interpreter: request finished, fp %p\n",
217	    fp);
218
219	/*
220	 * Free the request structure and return any errors.
221	 */
222	error = fp->error;
223	kmem_free(fp, sizeof (struct fc_request));
224	return (error);
225}
226
227/*
228 * Timeout requests thet don't get picked up by the interpreter.  This
229 * would happen if the daemon is not running.  If the timer goes off
230 * and it's state is not FC_R_INIT, then the interpreter has picked up the
231 * request.
232 */
233static void
234fcode_timer(void *arg)
235{
236	struct fc_request *fp = arg;
237
238	mutex_enter(&fc_request_lock);
239	fp->timeout = 0;
240	if (fp->busy == FC_R_INIT) {
241		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
242		    "interpreter - Interpreter did not pick up request\n");
243		fp->busy = FC_R_DONE;
244		fp->error = FC_TIMEOUT;
245		mutex_exit(&fc_request_lock);
246		cv_broadcast(&fc_request_cv);
247		return;
248	} else if (fp->error != FC_SUCCESS) {
249		/*
250		 * An error was detected, but didn't close the driver.
251		 * This will allow the process to error out, returning
252		 * the interpreter error code instead of FC_TIMEOUT.
253		 */
254		fp->busy = FC_R_DONE;
255		cv_broadcast(&fc_request_cv);
256		mutex_exit(&fc_request_lock);
257		return;
258	} else {
259		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
260		    "interpreter - Interpreter is executing request\n");
261	}
262	mutex_exit(&fc_request_lock);
263}
264
265/*
266 * This is the function the driver calls to wait for and get
267 * a request.  The call should be interruptable since it's done
268 * at read(2) time, so allow for signals to interrupt us.
269 *
270 * Return NULL if the wait was interrupted, else return a pointer
271 * to the fc_request structure (marked as busy).
272 *
273 * Note that we have to check for a request first, before waiting,
274 * in case the request is already queued. In this case, the signal
275 * may have already been delivered.
276 */
277struct fc_request *
278fc_get_request(void)
279{
280	struct fc_request *fp;
281
282	ASSERT(fc_initialized);
283
284	mutex_enter(&fc_request_lock);
285
286	/*CONSTANTCONDITION*/
287	while (1) {
288		for (fp = fc_request_head; fp != NULL; fp = fp->next) {
289			if (fp->busy == FC_R_INIT) {
290				fp->busy = FC_R_BUSY;
291				mutex_exit(&fc_request_lock);
292				return (fp);
293			}
294		}
295		if (cv_wait_sig(&fc_request_cv, &fc_request_lock) == 0) {
296			mutex_exit(&fc_request_lock);
297			return (NULL);
298		}
299	}
300	/*NOTREACHED*/
301}
302
303/*
304 * This is the function the driver calls when it's finished with
305 * a request.  Mark the request as done and signal the thread that
306 * enqueued the request.
307 */
308void
309fc_finish_request(struct fc_request *fp)
310{
311	ASSERT(fc_initialized);
312	ASSERT(fp);
313	ASSERT(fp->busy == FC_R_BUSY);
314
315	mutex_enter(&fc_request_lock);
316	fp->busy = FC_R_DONE;
317	mutex_exit(&fc_request_lock);
318
319	cv_broadcast(&fc_request_cv);
320}
321
322/*
323 * Generic resource list management subroutines
324 */
325void
326fc_add_resource(fco_handle_t rp, struct fc_resource *ip)
327{
328	ASSERT(rp);
329	ASSERT(ip);
330
331	mutex_enter(&fc_resource_lock);
332	ip->next = NULL;
333	if (rp->head != NULL)
334		ip->next = rp->head;
335	rp->head = ip;
336	mutex_exit(&fc_resource_lock);
337}
338
339void
340fc_rem_resource(fco_handle_t rp, struct fc_resource *ip)
341{
342	struct fc_resource *fp;
343
344	ASSERT(rp);
345	ASSERT(ip);
346
347	if (rp->head == NULL)  {
348		cmn_err(CE_CONT, "fc_rem_resource: NULL list head!\n");
349		return;
350	}
351
352	mutex_enter(&fc_resource_lock);
353	if (rp->head == ip) {
354		rp->head = ip->next;
355		mutex_exit(&fc_resource_lock);
356		return;
357	}
358
359	for (fp = rp->head; fp && (fp->next != ip); fp = fp->next)
360		/* empty */;
361
362	if (fp == NULL)  {
363		mutex_exit(&fc_resource_lock);
364		cmn_err(CE_CONT, "fc_rem_resource: Item not on list!\n");
365		return;
366	}
367
368	fp->next = ip->next;
369	mutex_exit(&fc_resource_lock);
370}
371
372/*ARGSUSED*/
373void
374fc_lock_resource_list(fco_handle_t rp)
375{
376	mutex_enter(&fc_resource_lock);
377}
378
379/*ARGSUSED*/
380void
381fc_unlock_resource_list(fco_handle_t rp)
382{
383	mutex_exit(&fc_resource_lock);
384}
385
386/*
387 * Common helper ops and subroutines
388 */
389/*ARGSUSED*/
390int
391fc_syntax_error(fc_ci_t *cp, char *msg)
392{
393	cp->error = fc_int2cell(-1);
394	cp->nresults = fc_int2cell(0);
395	return (0);
396}
397
398/*ARGSUSED*/
399int
400fc_priv_error(fc_ci_t *cp, char *msg)
401{
402	cp->priv_error = fc_int2cell(-1);
403	cp->error = fc_int2cell(0);
404	cp->nresults = fc_int2cell(0);
405	return (0);
406}
407
408/*ARGSUSED*/
409int
410fc_success_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
411{
412	cp->priv_error = cp->error = fc_int2cell(0);
413	return (0);
414}
415
416/*
417 * fc_fail_op: This 'handles' a request by specifically failing it,
418 * as opposed to not handling it and returning '-1' to indicate
419 * 'service unknown' and allowing somebody else in the chain to
420 * handle it.
421 */
422/*ARGSUSED*/
423int
424fc_fail_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
425{
426	cmn_err(CE_CONT, "fcode ops: fail service name <%s>\n",
427	    (char *)fc_cell2ptr(cp->svc_name));
428
429	cp->nresults = fc_int2cell(0);
430	cp->error = fc_int2cell(-1);
431	return (0);
432}
433
434/*
435 * Functions to manage the set of handles we give to the interpreter.
436 * The handles are opaque and internally represent dev_info_t pointers.
437 */
438struct fc_phandle_entry **
439fc_handle_to_phandle_head(fco_handle_t rp)
440{
441	while (rp->next_handle)
442		rp = rp->next_handle;
443
444	return (&rp->ptable);
445}
446
447/*ARGSUSED*/
448void
449fc_phandle_table_alloc(struct fc_phandle_entry **head)
450{
451}
452
453void
454fc_phandle_table_free(struct fc_phandle_entry **head)
455{
456	struct fc_phandle_entry *ip, *np;
457
458	/*
459	 * Free each entry in the table.
460	 */
461	for (ip = *head; ip; ip = np) {
462		np = ip->next;
463		kmem_free(ip, sizeof (struct fc_phandle_entry));
464	}
465	*head = NULL;
466}
467
468dev_info_t *
469fc_phandle_to_dip(struct fc_phandle_entry **head, fc_phandle_t handle)
470{
471	struct fc_phandle_entry *ip;
472
473	mutex_enter(&fc_hash_lock);
474
475	for (ip = *head; ip; ip = ip->next)
476		if (ip->h == handle)
477			break;
478
479	mutex_exit(&fc_hash_lock);
480
481	return (ip ? ip->dip : NULL);
482}
483
484fc_phandle_t
485fc_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip)
486{
487	struct fc_phandle_entry *hp, *np;
488	fc_phandle_t h;
489
490	ASSERT(dip);
491	h = (fc_phandle_t)ddi_get_nodeid(dip);
492
493	/*
494	 * Just in case, allocate a new entry ...
495	 */
496	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
497
498	mutex_enter(&fc_hash_lock);
499
500	/*
501	 * If we already have this dip in the table, just return the handle
502	 */
503	for (hp = *head; hp; hp = hp->next) {
504		if (hp->dip == dip) {
505			mutex_exit(&fc_hash_lock);
506			kmem_free(np, sizeof (struct fc_phandle_entry));
507			return (h);
508		}
509	}
510
511	/*
512	 * Insert this entry to the list of known entries
513	 */
514	np->next = *head;
515	np->dip = dip;
516	np->h = h;
517	*head = np;
518	mutex_exit(&fc_hash_lock);
519	return (h);
520}
521
522/*
523 * We won't need this function once the ddi is modified to handle
524 * unique non-prom nodeids.  For now, this allows us to add a given
525 * nodeid to the device tree without dereferencing the value in the
526 * devinfo node, so we have a parallel mechanism.
527 */
528void
529fc_add_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip,
530    fc_phandle_t h)
531{
532	struct fc_phandle_entry *hp, *np;
533
534	ASSERT(dip);
535
536	/*
537	 * Just in case, allocate a new entry ...
538	 */
539	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
540
541	mutex_enter(&fc_hash_lock);
542
543	/*
544	 * If we already have this dip in the table, just return the handle
545	 */
546	for (hp = *head; hp; hp = hp->next) {
547		if (hp->dip == dip) {
548			mutex_exit(&fc_hash_lock);
549			kmem_free(np, sizeof (struct fc_phandle_entry));
550			return;
551		}
552	}
553
554	/*
555	 * Insert this entry to the list of known entries
556	 */
557	np->next = *head;
558	np->dip = dip;
559	np->h = h;
560	*head = np;
561	mutex_exit(&fc_hash_lock);
562}
563
564/*
565 * Functions to manage our copy of our subtree.
566 *
567 * The head of the device tree is always stored in the last 'handle'
568 * in the handle chain.
569 */
570struct fc_device_tree **
571fc_handle_to_dtree_head(fco_handle_t rp)
572{
573	while (rp->next_handle)
574		rp = rp->next_handle;
575
576	return (&rp->dtree);
577}
578
579struct fc_device_tree *
580fc_handle_to_dtree(fco_handle_t rp)
581{
582	struct fc_device_tree **head = fc_handle_to_dtree_head(rp);
583
584	return (*head);
585}
586
587/*
588 * The root of the subtree is the attachment point ...
589 * Thus, there is never an empty device tree.
590 */
591void
592fc_create_device_tree(dev_info_t *ap, struct fc_device_tree **head)
593{
594	struct fc_device_tree *dp;
595
596	dp = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
597	dp->dip = ap;
598	*head = dp;
599}
600
601#ifdef	notdef
602static void
603fc_remove_subtree(struct fc_device_tree *dp)
604{
605	struct fc_device_tree *np;
606
607	if (dp->child) {
608		fc_remove_subtree(dp->child);
609		dp->child = NULL;
610	}
611
612	/*
613	 * Remove each peer node, working our way backwards from the
614	 * last peer node to the first peer node.
615	 */
616	if (dp->peer != NULL) {
617		for (np = dp->peer; np->peer; np = dp->peer) {
618			for (/* empty */; np->peer; np = np->peer)
619				/* empty */;
620			fc_remove_subtree(np->peer);
621			np->peer = NULL;
622		}
623		fc_remove_subtree(dp->peer)
624		dp->peer = NULL;
625	}
626
627	ASSERT((dp->child == NULL) && (dp->peer == NULL));
628	kmem_free(dp, sizeof (struct fc_device_tree));
629}
630
631void
632fc_remove_device_tree(struct fc_device_tree **head)
633{
634	ASSERT(head && (*head != NULL));
635
636	fc_remove_subtree(*head);
637	*head = NULL;
638}
639#endif	/* notdef */
640
641void
642fc_remove_device_tree(struct fc_device_tree **head)
643{
644	struct fc_device_tree *dp;
645
646	ASSERT(head && (*head != NULL));
647
648	dp = *head;
649
650	if (dp->child)
651		fc_remove_device_tree(&dp->child);
652
653	if (dp->peer)
654		fc_remove_device_tree(&dp->peer);
655
656	ASSERT((dp->child == NULL) && (dp->peer == NULL));
657
658	kmem_free(dp, sizeof (struct fc_device_tree));
659	*head = NULL;
660}
661
662struct fc_device_tree *
663fc_find_node(dev_info_t *dip, struct fc_device_tree *hp)
664{
665	struct fc_device_tree *p;
666
667	while (hp) {
668		if (hp->dip == dip)
669			return (hp);
670
671		if (hp->child)
672			if ((p = fc_find_node(dip, hp->child)) != NULL)
673				return (p);
674
675		hp = hp->peer;
676	}
677	return (NULL);
678}
679
680void
681fc_add_child(dev_info_t *child, dev_info_t *parent, struct fc_device_tree *hp)
682{
683	struct fc_device_tree *p, *q;
684
685	q = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
686	q->dip = child;
687
688	mutex_enter(&fc_device_tree_lock);
689
690#ifdef	DEBUG
691	/* XXX: Revisit ASSERT vs PANIC */
692	p = fc_find_node(child, hp);
693	ASSERT(p == NULL);
694#endif
695
696	p = fc_find_node(parent, hp);
697	ASSERT(p != NULL);
698
699	q->peer = p->child;
700	p->child = q;
701
702	mutex_exit(&fc_device_tree_lock);
703}
704
705void
706fc_remove_child(dev_info_t *child, struct fc_device_tree *head)
707{
708	struct fc_device_tree *p, *c, *n;
709	dev_info_t *parent = ddi_get_parent(child);
710
711	mutex_enter(&fc_device_tree_lock);
712
713	p = fc_find_node(parent, head);
714	ASSERT(p != NULL);
715
716	/*
717	 * Find the child within the parent's subtree ...
718	 */
719	c = fc_find_node(child, p);
720	ASSERT(c != NULL);
721	ASSERT(c->child == NULL);
722
723	/*
724	 * If it's the first child, remove it, otherwise
725	 * remove it from the child's peer list.
726	 */
727	if (p->child == c) {
728		p->child = c->peer;
729	} else {
730		int found = 0;
731		for (n = p->child; n->peer; n = n->peer) {
732			if (n->peer == c) {
733				n->peer = c->peer;
734				found = 1;
735				break;
736			}
737		}
738		if (!found)
739			cmn_err(CE_PANIC, "fc_remove_child: not found\n");
740	}
741	mutex_exit(&fc_device_tree_lock);
742
743	kmem_free(c, sizeof (struct fc_device_tree));
744}
745
746dev_info_t *
747fc_child_node(dev_info_t *parent, struct fc_device_tree *hp)
748{
749	struct fc_device_tree *p;
750	dev_info_t *dip = NULL;
751
752	mutex_enter(&fc_device_tree_lock);
753	p = fc_find_node(parent, hp);
754	if (p && p->child)
755		dip = p->child->dip;
756	mutex_exit(&fc_device_tree_lock);
757
758	return (dip);
759}
760
761dev_info_t *
762fc_peer_node(dev_info_t *devi, struct fc_device_tree *hp)
763{
764	struct fc_device_tree *p;
765	dev_info_t *dip = NULL;
766
767	mutex_enter(&fc_device_tree_lock);
768	p = fc_find_node(devi, hp);
769	if (p && p->peer)
770		dip = p->peer->dip;
771	mutex_exit(&fc_device_tree_lock);
772
773	return (dip);
774}
775