rmc_comm.c revision 7656:2621e50fdf4a
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * The "rmc_comm" driver provides access to the RMC so that its clients need
27 * not be concerned with the details of the access mechanism, which in this
28 * case is implemented via a packet-based protocol over a serial link via a
29 * 16550 compatible serial port.
30 */
31
32
33/*
34 *  Header files
35 */
36#include <sys/conf.h>
37#include <sys/membar.h>
38#include <sys/modctl.h>
39#include <sys/strlog.h>
40#include <sys/types.h>
41#include <sys/sunddi.h>
42#include <sys/ddi.h>
43#include <sys/rmc_comm_dp_boot.h>
44#include <sys/rmc_comm_dp.h>
45#include <sys/rmc_comm_drvintf.h>
46#include <sys/rmc_comm.h>
47#include <sys/cpu_sgnblk_defs.h>
48
49/*
50 * Local definitions
51 */
52
53#define	ddi_driver_major(dip)	ddi_name_to_major(ddi_binding_name(dip))
54
55#define	MYNAME			"rmc_comm"
56#define	NOMAJOR			(~(major_t)0)
57#define	DUMMY_VALUE		(~(int8_t)0)
58
59/*
60 * Local data
61 */
62static void *rmc_comm_statep;
63static major_t rmc_comm_major = NOMAJOR;
64static kmutex_t rmc_comm_attach_lock;
65static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] =
66{
67	DDI_DEVICE_ATTR_V0,
68	DDI_STRUCTURE_LE_ACC,
69	DDI_STRICTORDER_ACC
70};
71static int watchdog_was_active;
72extern int watchdog_activated;
73extern int watchdog_enable;
74
75/*
76 * prototypes
77 */
78
79extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t);
80static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t);
81static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t);
82static void sio_check_fault_status(struct rmc_comm_state *);
83static boolean_t sio_data_ready(struct rmc_comm_state *);
84static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t);
85static uint_t rmc_comm_hi_intr(caddr_t);
86static uint_t rmc_comm_softint(caddr_t);
87static void rmc_comm_cyclic(void *);
88static void rmc_comm_hw_reset(struct rmc_comm_state *);
89static void rmc_comm_offline(struct rmc_comm_state *);
90static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *);
91static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int,
92    boolean_t, boolean_t, boolean_t);
93static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t);
94static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t);
95
96/*
97 * for client leaf drivers to register their desire for rmc_comm
98 * to stay attached
99 */
100int
101rmc_comm_register()
102{
103	struct rmc_comm_state *rcs;
104
105	mutex_enter(&rmc_comm_attach_lock);
106	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
107	if ((rcs == NULL) || (!rcs->is_attached)) {
108		mutex_exit(&rmc_comm_attach_lock);
109		return (DDI_FAILURE);
110	}
111	rcs->n_registrations++;
112	mutex_exit(&rmc_comm_attach_lock);
113	return (DDI_SUCCESS);
114}
115
116void
117rmc_comm_unregister()
118{
119	struct rmc_comm_state *rcs;
120
121	mutex_enter(&rmc_comm_attach_lock);
122	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
123	ASSERT(rcs != NULL);
124	ASSERT(rcs->n_registrations != 0);
125	rcs->n_registrations--;
126	mutex_exit(&rmc_comm_attach_lock);
127}
128
129/*
130 * to get the soft state structure of a specific instance
131 */
132struct rmc_comm_state *
133rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller)
134{
135	struct rmc_comm_state *rcs = NULL;
136	dev_info_t *sdip = NULL;
137	major_t dmaj = NOMAJOR;
138
139	if (dip != NULL) {
140		/*
141		 * Use the instance number from the <dip>; also,
142		 * check that it really corresponds to this driver
143		 */
144		instance = ddi_get_instance(dip);
145		dmaj = ddi_driver_major(dip);
146		if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR)
147			rmc_comm_major = dmaj;
148		else if (dmaj != rmc_comm_major) {
149			cmn_err(CE_WARN,
150			    "%s: major number mismatch (%d vs. %d) in %s(),"
151			    "probably due to child misconfiguration",
152			    MYNAME, rmc_comm_major, dmaj, caller);
153			instance = -1;
154		}
155	}
156	if (instance >= 0)
157		rcs = ddi_get_soft_state(rmc_comm_statep, instance);
158	if (rcs != NULL) {
159		sdip = rcs->dip;
160		if (dip == NULL && sdip == NULL)
161			rcs = NULL;
162		else if (dip != NULL && sdip != NULL && sdip != dip) {
163			cmn_err(CE_WARN,
164			    "%s: devinfo mismatch (%p vs. %p) in %s(), "
165			    "probably due to child misconfiguration", MYNAME,
166			    (void *)dip, (void *)sdip, caller);
167			rcs = NULL;
168		}
169	}
170
171	return (rcs);
172}
173
174
175/*
176 * Lowest-level serial I/O chip register read/write
177 */
178static void
179sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
180{
181	DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
182
183	if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
184		/*
185		 * The chip is mapped as "I/O" (e.g. with the side-effect
186		 * bit on SPARC), therefore accesses are required to be
187		 * in-order, with no value cacheing.  However, there can
188		 * still be write-behind buffering, so it is not guaranteed
189		 * that a write actually reaches the chip in a given time.
190		 *
191		 * To force the access right through to the chip, we follow
192		 * the write with another write (to the SCRATCH register)
193		 * and a read (of the value just written to the SCRATCH
194		 * register).  The SCRATCH register is specifically provided
195		 * for temporary data and has no effect on the SIO's own
196		 * operation, making it ideal as a synchronising mechanism.
197		 *
198		 * If we didn't do this, it would be possible that the new
199		 * value wouldn't reach the chip (and have the *intended*
200		 * side-effects, such as disabling interrupts), for such a
201		 * long time that the processor could execute a *lot* of
202		 * instructions - including exiting the interrupt service
203		 * routine and re-enabling interrupts.  This effect was
204		 * observed to lead to spurious (unclaimed) interrupts in
205		 * some circumstances.
206		 *
207		 * This will no longer be needed once "synchronous" access
208		 * handles are available (see PSARC/2000/269 and 2000/531).
209		 */
210		ddi_put8(rcs->sd_state.sio_handle,
211		    rcs->sd_state.sio_regs + reg, val);
212		ddi_put8(rcs->sd_state.sio_handle,
213		    rcs->sd_state.sio_regs + SIO_SCR, val);
214		membar_sync();
215		(void) ddi_get8(rcs->sd_state.sio_handle,
216		    rcs->sd_state.sio_regs + SIO_SCR);
217	}
218}
219
220static uint8_t
221sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
222{
223	uint8_t val;
224
225	if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
226		val = ddi_get8(rcs->sd_state.sio_handle,
227		    rcs->sd_state.sio_regs + reg);
228	else
229		val = DUMMY_VALUE;
230	DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
231	return (val);
232}
233
234static void
235sio_check_fault_status(struct rmc_comm_state *rcs)
236{
237	rcs->sd_state.sio_fault =
238	    ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
239}
240
241boolean_t
242rmc_comm_faulty(struct rmc_comm_state *rcs)
243{
244	if (!rcs->sd_state.sio_fault)
245		sio_check_fault_status(rcs);
246	return (rcs->sd_state.sio_fault);
247}
248
249/*
250 * Check for data ready.
251 */
252static boolean_t
253sio_data_ready(struct rmc_comm_state *rcs)
254{
255	uint8_t status;
256
257	/*
258	 * Data is available if the RXDA bit in the LSR is nonzero
259	 * (if reading it didn't incur a fault).
260	 */
261	status = sio_get_reg(rcs, SIO_LSR);
262	return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
263}
264
265/*
266 * Enable/disable interrupts
267 */
268static void
269rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
270{
271	uint8_t val;
272
273	val = newstate ? SIO_IER_RXHDL_IE : 0;
274	sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
275	rcs->sd_state.hw_int_enabled = newstate;
276}
277
278/*
279 * High-level interrupt handler:
280 *	Checks whether initialisation is complete (to avoid a race
281 *	with mutex_init()), and whether chip interrupts are enabled.
282 *	If not, the interrupt's not for us, so just return UNCLAIMED.
283 *	Otherwise, disable the interrupt, trigger a softint, and return
284 *	CLAIMED.  The softint handler will then do all the real work.
285 *
286 *	NOTE: the chip interrupt capability is only re-enabled once the
287 *	receive code has run, but that can be called from a poll loop
288 *	or cyclic callback as well as from the softint.  So it's *not*
289 *	guaranteed that there really is a chip interrupt pending here,
290 *	'cos the work may already have been done and the reason for the
291 *	interrupt gone away before we get here.
292 *
293 *	OTOH, if we come through here twice without the receive code
294 *	having run in between, that's definitely wrong.  In such an
295 *	event, we would notice that chip interrupts haven't yet been
296 *	re-enabled and return UNCLAIMED, allowing the system's jabber
297 *	protect code (if any) to do its job.
298 */
299static uint_t
300rmc_comm_hi_intr(caddr_t arg)
301{
302	struct rmc_comm_state *rcs = (void *)arg;
303	uint_t claim;
304
305	claim = DDI_INTR_UNCLAIMED;
306	if (rcs->sd_state.cycid != NULL) {
307		/*
308		 * Handle the case where this interrupt fires during
309		 * panic processing.  If that occurs, then a thread
310		 * in rmc_comm might have been idled while holding
311		 * hw_mutex.  If so, that thread will never make
312		 * progress, and so we do not want to unconditionally
313		 * grab hw_mutex.
314		 */
315		if (ddi_in_panic() != 0) {
316			if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
317				return (claim);
318			}
319		} else {
320			mutex_enter(rcs->sd_state.hw_mutex);
321		}
322		if (rcs->sd_state.hw_int_enabled) {
323			rmc_comm_set_irq(rcs, B_FALSE);
324			ddi_trigger_softintr(rcs->sd_state.softid);
325			claim = DDI_INTR_CLAIMED;
326		}
327		mutex_exit(rcs->sd_state.hw_mutex);
328	}
329	return (claim);
330}
331
332/*
333 * Packet receive handler
334 *
335 * This routine should be called from the low-level softint, or the
336 * cyclic callback, or rmc_comm_cmd() (for polled operation), with the
337 * low-level mutex already held.
338 */
339void
340rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
341{
342	uint8_t data;
343
344	DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
345
346	/*
347	 * Check for access faults before starting the receive
348	 * loop (we don't want to cause bus errors or suchlike
349	 * unpleasantness in the event that the SIO has died).
350	 */
351	if (!rmc_comm_faulty(rcs)) {
352
353		char *rx_buf = rcs->sd_state.serdev_rx_buf;
354		uint16_t rx_buflen = 0;
355
356		/*
357		 * Read bytes from the FIFO until they're all gone
358		 * or our buffer overflows (which must be an error)
359		 */
360
361		/*
362		 * At the moment, the receive buffer is overwritten any
363		 * time data is received from the serial device.
364		 * This should not pose problems (probably!) as the data
365		 * protocol is half-duplex
366		 * Otherwise, a circular buffer must be implemented!
367		 */
368		mutex_enter(rcs->sd_state.hw_mutex);
369		while (sio_data_ready(rcs)) {
370			data = sio_get_reg(rcs, SIO_RXD);
371			rx_buf[rx_buflen++] = data;
372			if (rx_buflen >= SIO_MAX_RXBUF_SIZE)
373				break;
374		}
375		rcs->sd_state.serdev_rx_count = rx_buflen;
376
377		DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
378
379		rmc_comm_set_irq(rcs, B_TRUE);
380		mutex_exit(rcs->sd_state.hw_mutex);
381
382		/*
383		 * call up the data protocol receive handler
384		 */
385		rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
386	}
387}
388
389/*
390 * Low-level softint handler
391 *
392 * This routine should be triggered whenever there's a byte to be read
393 */
394static uint_t
395rmc_comm_softint(caddr_t arg)
396{
397	struct rmc_comm_state *rcs = (void *)arg;
398
399	mutex_enter(rcs->dp_state.dp_mutex);
400	rmc_comm_serdev_receive(rcs);
401	mutex_exit(rcs->dp_state.dp_mutex);
402	return (DDI_INTR_CLAIMED);
403}
404
405/*
406 * Cyclic handler: just calls the receive routine, in case interrupts
407 * are not being delivered and in order to handle command timeout
408 */
409static void
410rmc_comm_cyclic(void *arg)
411{
412	struct rmc_comm_state *rcs = (void *)arg;
413
414	mutex_enter(rcs->dp_state.dp_mutex);
415	rmc_comm_serdev_receive(rcs);
416	mutex_exit(rcs->dp_state.dp_mutex);
417}
418
419/*
420 * Serial protocol
421 *
422 * This routine builds a command and sets it in progress.
423 */
424void
425rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
426{
427	uint8_t *p;
428	uint8_t status;
429
430	/*
431	 * Check and update the SIO h/w fault status before accessing
432	 * the chip registers.  If there's a (new or previous) fault,
433	 * we'll run through the protocol but won't really touch the
434	 * hardware and all commands will timeout.  If a previously
435	 * discovered fault has now gone away (!), then we can (try to)
436	 * proceed with the new command (probably a probe).
437	 */
438	sio_check_fault_status(rcs);
439
440	/*
441	 * Send the command now by stuffing the packet into the Tx FIFO.
442	 */
443	DATASCOPE(rcs, 'S', buf, buflen)
444
445	mutex_enter(rcs->sd_state.hw_mutex);
446	p = (uint8_t *)buf;
447	while (p < (uint8_t *)&buf[buflen]) {
448
449		/*
450		 * before writing to the TX holding register, we make sure that
451		 * it is empty. In this case, there will be no chance to
452		 * overflow the serial device FIFO (but, on the other hand,
453		 * it may introduce some latency)
454		 */
455		status = sio_get_reg(rcs, SIO_LSR);
456		while ((status & SIO_LSR_XHRE) == 0) {
457			drv_usecwait(100);
458			status = sio_get_reg(rcs, SIO_LSR);
459		}
460		sio_put_reg(rcs, SIO_TXD, *p++);
461	}
462	mutex_exit(rcs->sd_state.hw_mutex);
463}
464
465/*
466 * wait for the tx fifo to drain - used for urgent nowait requests
467 */
468void
469rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
470{
471	uint8_t status;
472
473	mutex_enter(rcs->sd_state.hw_mutex);
474	status = sio_get_reg(rcs, SIO_LSR);
475	while ((status & SIO_LSR_XHRE) == 0) {
476		drv_usecwait(100);
477		status = sio_get_reg(rcs, SIO_LSR);
478	}
479	mutex_exit(rcs->sd_state.hw_mutex);
480}
481
482/*
483 * Hardware setup - put the SIO chip in the required operational
484 * state,  with all our favourite parameters programmed correctly.
485 * This routine leaves all SIO interrupts disabled.
486 */
487
488static void
489rmc_comm_hw_reset(struct rmc_comm_state *rcs)
490{
491	uint16_t divisor;
492
493	/*
494	 * Disable interrupts, soft reset Tx and Rx circuitry,
495	 * reselect standard modes (bits/char, parity, etc).
496	 */
497	rmc_comm_set_irq(rcs, B_FALSE);
498	sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
499	sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
500
501	/*
502	 * Select the proper baud rate; if the value is invalid
503	 * (presumably 0, i.e. not specified, but also if the
504	 * "baud" property is set to some silly value), we assume
505	 * the default.
506	 */
507	if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
508		divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) *
509		    rcs->baud_divisor_factor;
510	} else {
511		divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
512		    rcs->baud_divisor_factor;
513	}
514
515	/*
516	 * According to the datasheet, it is forbidden for the divisor
517	 * register to be zero.  So when loading the register in two
518	 * steps, we have to make sure that the temporary value formed
519	 * between loads is nonzero.  However, we can't rely on either
520	 * half already having a nonzero value, as the datasheet also
521	 * says that these registers are indeterminate after a reset!
522	 * So, we explicitly set the low byte to a non-zero value first;
523	 * then we can safely load the high byte, and then the correct
524	 * value for the low byte, without the result ever being zero.
525	 */
526	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
527	sio_put_reg(rcs, SIO_LBGDL, 0xff);
528	sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
529	sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
530	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
531
532	/*
533	 * Program the remaining device registers as required
534	 */
535	sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
536	sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
537}
538
539/*
540 * Higher-level setup & teardown
541 */
542static void
543rmc_comm_offline(struct rmc_comm_state *rcs)
544{
545	if (rcs->sd_state.sio_handle != NULL)
546		ddi_regs_map_free(&rcs->sd_state.sio_handle);
547	rcs->sd_state.sio_handle = NULL;
548	rcs->sd_state.sio_regs = NULL;
549}
550
551static int
552rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
553{
554	ddi_acc_handle_t h;
555	caddr_t p;
556	int nregs;
557	int err;
558
559	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS)
560		nregs = 0;
561	switch (nregs) {
562	default:
563	case 1:
564		/*
565		 *  regset 0 represents the SIO operating registers
566		 */
567		err = ddi_regs_map_setup(dip, 0, &p, 0, 0,
568		    rmc_comm_dev_acc_attr, &h);
569		if (err != DDI_SUCCESS)
570			return (EIO);
571		rcs->sd_state.sio_handle = h;
572		rcs->sd_state.sio_regs = (void *)p;
573		break;
574	case 0:
575		/*
576		 *  If no registers are defined, succeed vacuously;
577		 *  commands will be accepted, but we fake the accesses.
578		 */
579		break;
580	}
581
582	/*
583	 * Now that the registers are mapped, we can initialise the SIO h/w
584	 */
585	rmc_comm_hw_reset(rcs);
586	return (0);
587}
588
589
590/*
591 * Initialization of the serial device (data structure, mutex, cv, hardware
592 * and so on). It is called from the attach routine.
593 */
594
595int
596rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
597{
598	int err = DDI_SUCCESS;
599
600	rcs->sd_state.cycid = NULL;
601
602	/*
603	 *  Online the hardware ...
604	 */
605	err = rmc_comm_online(rcs, dip);
606	if (err != 0)
607		return (-1);
608
609	/*
610	 * call ddi_get_soft_iblock_cookie() to retrieve the
611	 * the interrupt block cookie so that the mutexes are initialized
612	 * before adding the interrupt (to avoid a potential race condition).
613	 */
614
615	err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
616	    &rcs->dp_state.dp_iblk);
617	if (err != DDI_SUCCESS)
618		return (-1);
619
620	err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
621	if (err != DDI_SUCCESS)
622		return (-1);
623
624	/*
625	 * initialize mutex here before adding hw/sw interrupt handlers
626	 */
627	mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
628	    rcs->dp_state.dp_iblk);
629
630	mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
631	    rcs->sd_state.hw_iblk);
632
633	/*
634	 * Install soft and hard interrupt handler(s)
635	 *
636	 * the soft intr. handler will need the data protocol lock (dp_mutex)
637	 * So, data protocol mutex and iblock cookie are created/initialized
638	 * here
639	 */
640
641	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
642	    &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
643	if (err != DDI_SUCCESS) {
644		mutex_destroy(rcs->dp_state.dp_mutex);
645		mutex_destroy(rcs->sd_state.hw_mutex);
646		return (-1);
647	}
648
649	/*
650	 * hardware interrupt
651	 */
652
653	if (rcs->sd_state.sio_handle != NULL) {
654		err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
655		    rmc_comm_hi_intr, (caddr_t)rcs);
656
657		/*
658		 * did we successfully install the h/w interrupt handler?
659		 */
660		if (err != DDI_SUCCESS) {
661			ddi_remove_softintr(rcs->sd_state.softid);
662			mutex_destroy(rcs->dp_state.dp_mutex);
663			mutex_destroy(rcs->sd_state.hw_mutex);
664			return (-1);
665		}
666	}
667
668	/*
669	 * Start periodical callbacks
670	 */
671	rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
672	    5 * RMC_COMM_ONE_SEC, DDI_IPL_1);
673	return (0);
674}
675
676/*
677 * Termination of the serial device (data structure, mutex, cv, hardware
678 * and so on). It is called from the detach routine.
679 */
680
681void
682rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
683{
684	rmc_comm_hw_reset(rcs);
685
686	if (rcs->sd_state.cycid != NULL) {
687		ddi_periodic_delete(rcs->sd_state.cycid);
688		rcs->sd_state.cycid = NULL;
689
690		if (rcs->sd_state.sio_handle != NULL)
691			ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
692
693		ddi_remove_softintr(rcs->sd_state.softid);
694
695		mutex_destroy(rcs->sd_state.hw_mutex);
696
697		mutex_destroy(rcs->dp_state.dp_mutex);
698	}
699	rmc_comm_offline(rcs);
700}
701
702/*
703 * device driver entry routines (init/fini, attach/detach, ...)
704 */
705
706/*
707 *  Clean up on detach or failure of attach
708 */
709static void
710rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
711    boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init)
712{
713	if (rcs != NULL) {
714		/*
715		 * disable interrupts now
716		 */
717		rmc_comm_set_irq(rcs, B_FALSE);
718
719		/*
720		 * driver interface termination (if it has been initialized)
721		 */
722		if (drvi_init)
723			rmc_comm_drvintf_fini(rcs);
724
725		/*
726		 * data protocol termination (if it has been initialized)
727		 */
728		if (dp_init)
729			rmc_comm_dp_fini(rcs);
730
731		/*
732		 * serial device termination (if it has been initialized)
733		 */
734		if (sd_init)
735			rmc_comm_serdev_fini(rcs, dip);
736
737		ddi_set_driver_private(dip, NULL);
738	}
739	ddi_soft_state_free(rmc_comm_statep, instance);
740}
741
742/*
743 *  Autoconfiguration routines
744 */
745
746static int
747rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
748{
749	struct rmc_comm_state *rcs = NULL;
750	sig_state_t *current_sgn_p;
751	int instance;
752
753	/*
754	 * only allow one instance
755	 */
756	instance = ddi_get_instance(dip);
757	if (instance != 0)
758		return (DDI_FAILURE);
759
760	switch (cmd) {
761	default:
762		return (DDI_FAILURE);
763
764	case DDI_RESUME:
765		if ((rcs = rmc_comm_getstate(dip, instance,
766		    "rmc_comm_attach")) == NULL)
767			return (DDI_FAILURE);	/* this "can't happen" */
768
769		rmc_comm_hw_reset(rcs);
770		rmc_comm_set_irq(rcs, B_TRUE);
771		rcs->dip = dip;
772
773		mutex_enter(&tod_lock);
774		if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
775		    watchdog_was_active) {
776			(void) tod_ops.tod_set_watchdog_timer(0);
777		}
778		mutex_exit(&tod_lock);
779
780		mutex_enter(rcs->dp_state.dp_mutex);
781		dp_reset(rcs, INITIAL_SEQID, 1, 1);
782		mutex_exit(rcs->dp_state.dp_mutex);
783
784		current_sgn_p = (sig_state_t *)modgetsymvalue(
785		    "current_sgn", 0);
786		if ((current_sgn_p != NULL) &&
787		    (current_sgn_p->state_t.sig != 0)) {
788			CPU_SIGNATURE(current_sgn_p->state_t.sig,
789			    current_sgn_p->state_t.state,
790			    current_sgn_p->state_t.sub_state, -1);
791		}
792		return (DDI_SUCCESS);
793
794	case DDI_ATTACH:
795		break;
796	}
797
798	/*
799	 *  Allocate the soft-state structure
800	 */
801	if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
802		return (DDI_FAILURE);
803	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
804	    NULL) {
805		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
806		return (DDI_FAILURE);
807	}
808	ddi_set_driver_private(dip, rcs);
809
810	rcs->dip = NULL;
811
812	/*
813	 *  Set various options from .conf properties
814	 */
815	rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
816	    "baud-rate", 0);
817	rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
818	    "debug", 0);
819
820	/*
821	 * the baud divisor factor tells us how to scale the result of
822	 * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
823	 * use the standard 24MHz uart clock
824	 */
825	rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
826	    DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);
827
828	/*
829	 * try to be reasonable if the scale factor contains a silly value
830	 */
831	if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
832	    (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
833		rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
834
835	/*
836	 * initialize serial device
837	 */
838	if (rmc_comm_serdev_init(rcs, dip) != 0) {
839		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
840		return (DDI_FAILURE);
841	}
842
843	/*
844	 * initialize data protocol
845	 */
846	rmc_comm_dp_init(rcs);
847
848	/*
849	 * initialize driver interface
850	 */
851	if (rmc_comm_drvintf_init(rcs) != 0) {
852		rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
853		return (DDI_FAILURE);
854	}
855
856	/*
857	 *  Initialise devinfo-related fields
858	 */
859	rcs->majornum = ddi_driver_major(dip);
860	rcs->instance = instance;
861	rcs->dip = dip;
862
863	/*
864	 * enable interrupts now
865	 */
866	rmc_comm_set_irq(rcs, B_TRUE);
867
868	/*
869	 *  All done, report success
870	 */
871	ddi_report_dev(dip);
872	mutex_enter(&rmc_comm_attach_lock);
873	rcs->is_attached = B_TRUE;
874	mutex_exit(&rmc_comm_attach_lock);
875	return (DDI_SUCCESS);
876}
877
878static int
879rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
880{
881	struct rmc_comm_state *rcs;
882	int instance;
883
884	instance = ddi_get_instance(dip);
885	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
886		return (DDI_FAILURE);	/* this "can't happen" */
887
888	switch (cmd) {
889	case DDI_SUSPEND:
890		mutex_enter(&tod_lock);
891		if (watchdog_enable && watchdog_activated &&
892		    tod_ops.tod_clear_watchdog_timer != NULL) {
893			watchdog_was_active = 1;
894			(void) tod_ops.tod_clear_watchdog_timer();
895		} else {
896			watchdog_was_active = 0;
897		}
898		mutex_exit(&tod_lock);
899
900		rcs->dip = NULL;
901		rmc_comm_hw_reset(rcs);
902
903		return (DDI_SUCCESS);
904
905	case DDI_DETACH:
906		/*
907		 * reject detach if any client(s) still registered
908		 */
909		mutex_enter(&rmc_comm_attach_lock);
910		if (rcs->n_registrations != 0) {
911			mutex_exit(&rmc_comm_attach_lock);
912			return (DDI_FAILURE);
913		}
914		/*
915		 * Committed to complete the detach;
916		 * mark as no longer attached, to prevent new clients
917		 * registering (as part of a coincident attach)
918		 */
919		rcs->is_attached = B_FALSE;
920		mutex_exit(&rmc_comm_attach_lock);
921		rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
922		return (DDI_SUCCESS);
923
924	default:
925		return (DDI_FAILURE);
926	}
927}
928
929/*ARGSUSED*/
930static int
931rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
932{
933	struct rmc_comm_state *rcs;
934
935	if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
936		return (DDI_FAILURE);
937	rmc_comm_hw_reset(rcs);
938	return (DDI_SUCCESS);
939}
940
941/*
942 * System interface structures
943 */
944static struct dev_ops rmc_comm_dev_ops =
945{
946	DEVO_REV,
947	0,				/* refcount		*/
948	nodev,				/* getinfo		*/
949	nulldev,			/* identify		*/
950	nulldev,			/* probe		*/
951	rmc_comm_attach,		/* attach		*/
952	rmc_comm_detach,		/* detach		*/
953	rmc_comm_reset,			/* reset		*/
954	(struct cb_ops *)NULL,		/* driver operations	*/
955	(struct bus_ops *)NULL,		/* bus operations	*/
956	nulldev,			/* power()		*/
957	ddi_quiesce_not_supported,	/* devo_quiesce */
958};
959
960static struct modldrv modldrv =
961{
962	&mod_driverops,
963	"rmc_comm driver",
964	&rmc_comm_dev_ops
965};
966
967static struct modlinkage modlinkage =
968{
969	MODREV_1,
970	{
971		&modldrv,
972		NULL
973	}
974};
975
976/*
977 *  Dynamic loader interface code
978 */
979int
980_init(void)
981{
982	int err;
983
984	mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
985	err = ddi_soft_state_init(&rmc_comm_statep,
986	    sizeof (struct rmc_comm_state), 0);
987	if (err == DDI_SUCCESS)
988		if ((err = mod_install(&modlinkage)) != 0) {
989			ddi_soft_state_fini(&rmc_comm_statep);
990		}
991	if (err != DDI_SUCCESS)
992		mutex_destroy(&rmc_comm_attach_lock);
993	return (err);
994}
995
996int
997_info(struct modinfo *mip)
998{
999	return (mod_info(&modlinkage, mip));
1000}
1001
1002int
1003_fini(void)
1004{
1005	int err;
1006
1007	if ((err = mod_remove(&modlinkage)) == 0) {
1008		ddi_soft_state_fini(&rmc_comm_statep);
1009		rmc_comm_major = NOMAJOR;
1010		mutex_destroy(&rmc_comm_attach_lock);
1011	}
1012	return (err);
1013}
1014