pcicmu.c revision 7656:2621e50fdf4a
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27/*
28 * OPL CMU-CH PCI nexus driver.
29 *
30 */
31
32#include <sys/types.h>
33#include <sys/sysmacros.h>
34#include <sys/systm.h>
35#include <sys/intreg.h>
36#include <sys/intr.h>
37#include <sys/machsystm.h>
38#include <sys/conf.h>
39#include <sys/stat.h>
40#include <sys/kmem.h>
41#include <sys/async.h>
42#include <sys/ivintr.h>
43#include <sys/sunddi.h>
44#include <sys/sunndi.h>
45#include <sys/ndifm.h>
46#include <sys/ontrap.h>
47#include <sys/ddi_impldefs.h>
48#include <sys/ddi_subrdefs.h>
49#include <sys/epm.h>
50#include <sys/spl.h>
51#include <sys/fm/util.h>
52#include <sys/fm/util.h>
53#include <sys/fm/protocol.h>
54#include <sys/fm/io/pci.h>
55#include <sys/fm/io/sun4upci.h>
56#include <sys/pcicmu/pcicmu.h>
57
58#include <sys/cmn_err.h>
59#include <sys/time.h>
60#include <sys/pci.h>
61#include <sys/modctl.h>
62#include <sys/open.h>
63#include <sys/errno.h>
64#include <sys/file.h>
65
66
67uint32_t pcmu_spurintr_duration = 60000000; /* One minute */
68
69/*
70 * The variable controls the default setting of the command register
71 * for pci devices.  See pcmu_init_child() for details.
72 *
73 * This flags also controls the setting of bits in the bridge control
74 * register pci to pci bridges.  See pcmu_init_child() for details.
75 */
76ushort_t pcmu_command_default = PCI_COMM_SERR_ENABLE |
77				PCI_COMM_WAIT_CYC_ENAB |
78				PCI_COMM_PARITY_DETECT |
79				PCI_COMM_ME |
80				PCI_COMM_MAE |
81				PCI_COMM_IO;
82/*
83 * The following driver parameters are defined as variables to allow
84 * patching for debugging and tuning.  Flags that can be set on a per
85 * PBM basis are bit fields where the PBM device instance number maps
86 * to the bit position.
87 */
88#ifdef DEBUG
89uint64_t pcmu_debug_flags = 0;
90#endif
91uint_t ecc_error_intr_enable = 1;
92
93uint_t pcmu_ecc_afsr_retries = 100;	/* XXX - what's a good value? */
94
95uint_t pcmu_intr_retry_intv = 5;	/* for interrupt retry reg */
96uint_t pcmu_panic_on_fatal_errors = 1;	/* should be 1 at beta */
97
98hrtime_t pcmu_intrpend_timeout = 5ll * NANOSEC;	/* 5 seconds in nanoseconds */
99
100uint64_t pcmu_errtrig_pa = 0x0;
101
102
103/*
104 * The following value is the number of consecutive unclaimed interrupts that
105 * will be tolerated for a particular ino_p before the interrupt is deemed to
106 * be jabbering and is blocked.
107 */
108uint_t pcmu_unclaimed_intr_max = 20;
109
110/*
111 * function prototypes for dev ops routines:
112 */
113static int pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
114static int pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
115static int pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
116    void *arg, void **result);
117static int pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp);
118static int pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp);
119static int pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
120						cred_t *credp, int *rvalp);
121static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
122    int flags, char *name, caddr_t valuep, int *lengthp);
123static int pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args);
124static int pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args,
125    void *result);
126
127static int map_pcmu_registers(pcmu_t *, dev_info_t *);
128static void unmap_pcmu_registers(pcmu_t *);
129static void pcmu_pbm_clear_error(pcmu_pbm_t *);
130
131static int pcmu_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
132    void *, void *);
133static int pcmu_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
134    off_t, off_t, caddr_t *);
135static int pcmu_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
136    ddi_intr_handle_impl_t *, void *);
137
138static uint32_t pcmu_identity_init(pcmu_t *pcmu_p);
139static int pcmu_intr_setup(pcmu_t *pcmu_p);
140static void pcmu_pbm_errstate_get(pcmu_t *pcmu_p,
141    pcmu_pbm_errstate_t *pbm_err_p);
142static int pcmu_obj_setup(pcmu_t *pcmu_p);
143static void pcmu_obj_destroy(pcmu_t *pcmu_p);
144static void pcmu_obj_resume(pcmu_t *pcmu_p);
145static void pcmu_obj_suspend(pcmu_t *pcmu_p);
146
147static void u2u_ittrans_init(pcmu_t *, u2u_ittrans_data_t **);
148static void u2u_ittrans_resume(u2u_ittrans_data_t **);
149static void u2u_ittrans_uninit(u2u_ittrans_data_t *);
150
151static pcmu_ksinfo_t	*pcmu_name_kstat;
152
153/*
154 * bus ops and dev ops structures:
155 */
156static struct bus_ops pcmu_bus_ops = {
157	BUSO_REV,
158	pcmu_map,
159	0,
160	0,
161	0,
162	i_ddi_map_fault,
163	0,
164	0,
165	0,
166	0,
167	0,
168	0,
169	0,
170	0,
171	pcmu_ctlops,
172	ddi_bus_prop_op,
173	ndi_busop_get_eventcookie,	/* (*bus_get_eventcookie)(); */
174	ndi_busop_add_eventcall,	/* (*bus_add_eventcall)(); */
175	ndi_busop_remove_eventcall,	/* (*bus_remove_eventcall)(); */
176	ndi_post_event,			/* (*bus_post_event)(); */
177	NULL,				/* (*bus_intr_ctl)(); */
178	NULL,				/* (*bus_config)(); */
179	NULL,				/* (*bus_unconfig)(); */
180	NULL,				/* (*bus_fm_init)(); */
181	NULL,				/* (*bus_fm_fini)(); */
182	NULL,				/* (*bus_fm_access_enter)(); */
183	NULL,				/* (*bus_fm_access_fini)(); */
184	NULL,				/* (*bus_power)(); */
185	pcmu_intr_ops			/* (*bus_intr_op)(); */
186};
187
188struct cb_ops pcmu_cb_ops = {
189	pcmu_open,			/* open */
190	pcmu_close,			/* close */
191	nodev,				/* strategy */
192	nodev,				/* print */
193	nodev,				/* dump */
194	nodev,				/* read */
195	nodev,				/* write */
196	pcmu_ioctl,			/* ioctl */
197	nodev,				/* devmap */
198	nodev,				/* mmap */
199	nodev,				/* segmap */
200	nochpoll,			/* poll */
201	pcmu_prop_op,			/* cb_prop_op */
202	NULL,				/* streamtab */
203	D_NEW | D_MP | D_HOTPLUG,	/* Driver compatibility flag */
204	CB_REV,				/* rev */
205	nodev,				/* int (*cb_aread)() */
206	nodev				/* int (*cb_awrite)() */
207};
208
209static struct dev_ops pcmu_ops = {
210	DEVO_REV,
211	0,
212	pcmu_info,
213	nulldev,
214	0,
215	pcmu_attach,
216	pcmu_detach,
217	nodev,
218	&pcmu_cb_ops,
219	&pcmu_bus_ops,
220	0,
221	ddi_quiesce_not_needed,		/* quiesce */
222
223};
224
225/*
226 * module definitions:
227 */
228extern struct mod_ops mod_driverops;
229
230static struct modldrv modldrv = {
231	&mod_driverops,				/* Type of module - driver */
232	"OPL CMU-CH PCI Nexus driver",	/* Name of module. */
233	&pcmu_ops,				/* driver ops */
234};
235
236static struct modlinkage modlinkage = {
237	MODREV_1, (void *)&modldrv, NULL
238};
239
240/*
241 * driver global data:
242 */
243void *per_pcmu_state;			/* per-pbm soft state pointer */
244kmutex_t pcmu_global_mutex;		/* attach/detach common struct lock */
245errorq_t *pcmu_ecc_queue = NULL;	/* per-system ecc handling queue */
246
247extern void pcmu_child_cfg_save(dev_info_t *dip);
248extern void pcmu_child_cfg_restore(dev_info_t *dip);
249
250int
251_init(void)
252{
253	int e;
254
255	/*
256	 * Initialize per-pci bus soft state pointer.
257	 */
258	e = ddi_soft_state_init(&per_pcmu_state, sizeof (pcmu_t), 1);
259	if (e != 0)
260		return (e);
261
262	/*
263	 * Initialize global mutexes.
264	 */
265	mutex_init(&pcmu_global_mutex, NULL, MUTEX_DRIVER, NULL);
266
267	/*
268	 * Create the performance kstats.
269	 */
270	pcmu_kstat_init();
271
272	/*
273	 * Install the module.
274	 */
275	e = mod_install(&modlinkage);
276	if (e != 0) {
277		ddi_soft_state_fini(&per_pcmu_state);
278		mutex_destroy(&pcmu_global_mutex);
279	}
280	return (e);
281}
282
283int
284_fini(void)
285{
286	int e;
287
288	/*
289	 * Remove the module.
290	 */
291	e = mod_remove(&modlinkage);
292	if (e != 0) {
293		return (e);
294	}
295
296	/*
297	 * Destroy pcmu_ecc_queue, and set it to NULL.
298	 */
299	if (pcmu_ecc_queue) {
300		errorq_destroy(pcmu_ecc_queue);
301		pcmu_ecc_queue = NULL;
302	}
303
304	/*
305	 * Destroy the performance kstats.
306	 */
307	pcmu_kstat_fini();
308
309	/*
310	 * Free the per-pci and per-CMU-CH soft state info and destroy
311	 * mutex for per-CMU-CH soft state.
312	 */
313	ddi_soft_state_fini(&per_pcmu_state);
314	mutex_destroy(&pcmu_global_mutex);
315	return (e);
316}
317
318int
319_info(struct modinfo *modinfop)
320{
321	return (mod_info(&modlinkage, modinfop));
322}
323
324/*ARGSUSED*/
325static int
326pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
327{
328	int	instance = getminor((dev_t)arg) >> 8;
329	pcmu_t	*pcmu_p = get_pcmu_soft_state(instance);
330
331	switch (infocmd) {
332	case DDI_INFO_DEVT2INSTANCE:
333		*result = (void *)(uintptr_t)instance;
334		return (DDI_SUCCESS);
335
336	case DDI_INFO_DEVT2DEVINFO:
337		if (pcmu_p == NULL)
338			return (DDI_FAILURE);
339		*result = (void *)pcmu_p->pcmu_dip;
340		return (DDI_SUCCESS);
341
342	default:
343		return (DDI_FAILURE);
344	}
345}
346
347
348/* device driver entry points */
349/*
350 * attach entry point:
351 */
352static int
353pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
354{
355	pcmu_t *pcmu_p;
356	int instance = ddi_get_instance(dip);
357
358	switch (cmd) {
359	case DDI_ATTACH:
360		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_ATTACH\n");
361
362		/*
363		 * Allocate and get the per-pci soft state structure.
364		 */
365		if (alloc_pcmu_soft_state(instance) != DDI_SUCCESS) {
366			cmn_err(CE_WARN, "%s%d: can't allocate pci state",
367			    ddi_driver_name(dip), instance);
368			goto err_bad_pcmu_softstate;
369		}
370		pcmu_p = get_pcmu_soft_state(instance);
371		pcmu_p->pcmu_dip = dip;
372		mutex_init(&pcmu_p->pcmu_mutex, NULL, MUTEX_DRIVER, NULL);
373		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
374		pcmu_p->pcmu_open_count = 0;
375
376		/*
377		 * Get key properties of the pci bridge node.
378		 */
379		if (get_pcmu_properties(pcmu_p, dip) == DDI_FAILURE) {
380			goto err_bad_pcmu_prop;
381		}
382
383		/*
384		 * Map in the registers.
385		 */
386		if (map_pcmu_registers(pcmu_p, dip) == DDI_FAILURE) {
387			goto err_bad_reg_prop;
388		}
389		if (pcmu_obj_setup(pcmu_p) != DDI_SUCCESS) {
390			goto err_bad_objs;
391		}
392
393		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
394		    (uint_t)instance<<8 | 0xff,
395		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
396			goto err_bad_devctl_node;
397		}
398
399		/*
400		 * Due to unresolved hardware issues, disable PCIPM until
401		 * the problem is fully understood.
402		 *
403		 * pcmu_pwr_setup(pcmu_p, dip);
404		 */
405
406		ddi_report_dev(dip);
407
408		pcmu_p->pcmu_state = PCMU_ATTACHED;
409		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "attach success\n");
410		break;
411
412err_bad_objs:
413		ddi_remove_minor_node(dip, "devctl");
414err_bad_devctl_node:
415		unmap_pcmu_registers(pcmu_p);
416err_bad_reg_prop:
417		free_pcmu_properties(pcmu_p);
418err_bad_pcmu_prop:
419		mutex_destroy(&pcmu_p->pcmu_mutex);
420		free_pcmu_soft_state(instance);
421err_bad_pcmu_softstate:
422		return (DDI_FAILURE);
423
424	case DDI_RESUME:
425		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_RESUME\n");
426
427		/*
428		 * Make sure the CMU-CH control registers
429		 * are configured properly.
430		 */
431		pcmu_p = get_pcmu_soft_state(instance);
432		mutex_enter(&pcmu_p->pcmu_mutex);
433
434		/*
435		 * Make sure this instance has been suspended.
436		 */
437		if (pcmu_p->pcmu_state != PCMU_SUSPENDED) {
438			PCMU_DBG0(PCMU_DBG_ATTACH, dip,
439			    "instance NOT suspended\n");
440			mutex_exit(&pcmu_p->pcmu_mutex);
441			return (DDI_FAILURE);
442		}
443		pcmu_obj_resume(pcmu_p);
444		pcmu_p->pcmu_state = PCMU_ATTACHED;
445
446		pcmu_child_cfg_restore(dip);
447
448		mutex_exit(&pcmu_p->pcmu_mutex);
449		break;
450
451	default:
452		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "unsupported attach op\n");
453		return (DDI_FAILURE);
454	}
455
456	return (DDI_SUCCESS);
457}
458
459/*
460 * detach entry point:
461 */
462static int
463pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
464{
465	int instance = ddi_get_instance(dip);
466	pcmu_t *pcmu_p = get_pcmu_soft_state(instance);
467	int len;
468
469	/*
470	 * Make sure we are currently attached
471	 */
472	if (pcmu_p->pcmu_state != PCMU_ATTACHED) {
473		PCMU_DBG0(PCMU_DBG_ATTACH, dip,
474		    "failed - instance not attached\n");
475		return (DDI_FAILURE);
476	}
477
478	mutex_enter(&pcmu_p->pcmu_mutex);
479
480	switch (cmd) {
481	case DDI_DETACH:
482		PCMU_DBG0(PCMU_DBG_DETACH, dip, "DDI_DETACH\n");
483		pcmu_obj_destroy(pcmu_p);
484
485		/*
486		 * Free the pci soft state structure and the rest of the
487		 * resources it's using.
488		 */
489		free_pcmu_properties(pcmu_p);
490		unmap_pcmu_registers(pcmu_p);
491		mutex_exit(&pcmu_p->pcmu_mutex);
492		mutex_destroy(&pcmu_p->pcmu_mutex);
493		free_pcmu_soft_state(instance);
494
495		/* Free the interrupt-priorities prop if we created it. */
496		if (ddi_getproplen(DDI_DEV_T_ANY, dip,
497		    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
498		    "interrupt-priorities", &len) == DDI_PROP_SUCCESS) {
499			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
500			    "interrupt-priorities");
501		}
502		return (DDI_SUCCESS);
503
504	case DDI_SUSPEND:
505		pcmu_child_cfg_save(dip);
506		pcmu_obj_suspend(pcmu_p);
507		pcmu_p->pcmu_state = PCMU_SUSPENDED;
508
509		mutex_exit(&pcmu_p->pcmu_mutex);
510		return (DDI_SUCCESS);
511
512	default:
513		PCMU_DBG0(PCMU_DBG_DETACH, dip, "unsupported detach op\n");
514		mutex_exit(&pcmu_p->pcmu_mutex);
515		return (DDI_FAILURE);
516	}
517}
518
519/* ARGSUSED3 */
520static int
521pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp)
522{
523	pcmu_t *pcmu_p;
524
525	if (otyp != OTYP_CHR) {
526		return (EINVAL);
527	}
528
529	/*
530	 * Get the soft state structure for the device.
531	 */
532	pcmu_p = DEV_TO_SOFTSTATE(*devp);
533	if (pcmu_p == NULL) {
534		return (ENXIO);
535	}
536
537	/*
538	 * Handle the open by tracking the device state.
539	 */
540	PCMU_DBG2(PCMU_DBG_OPEN, pcmu_p->pcmu_dip,
541	    "devp=%x: flags=%x\n", devp, flags);
542	mutex_enter(&pcmu_p->pcmu_mutex);
543	if (flags & FEXCL) {
544		if (pcmu_p->pcmu_soft_state != PCMU_SOFT_STATE_CLOSED) {
545			mutex_exit(&pcmu_p->pcmu_mutex);
546			PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
547			return (EBUSY);
548		}
549		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN_EXCL;
550	} else {
551		if (pcmu_p->pcmu_soft_state == PCMU_SOFT_STATE_OPEN_EXCL) {
552			mutex_exit(&pcmu_p->pcmu_mutex);
553			PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
554			return (EBUSY);
555		}
556		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN;
557	}
558	pcmu_p->pcmu_open_count++;
559	mutex_exit(&pcmu_p->pcmu_mutex);
560	return (0);
561}
562
563
564/* ARGSUSED */
565static int
566pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp)
567{
568	pcmu_t *pcmu_p;
569
570	if (otyp != OTYP_CHR) {
571		return (EINVAL);
572	}
573
574	pcmu_p = DEV_TO_SOFTSTATE(dev);
575	if (pcmu_p == NULL) {
576		return (ENXIO);
577	}
578
579	PCMU_DBG2(PCMU_DBG_CLOSE, pcmu_p->pcmu_dip,
580	    "dev=%x: flags=%x\n", dev, flags);
581	mutex_enter(&pcmu_p->pcmu_mutex);
582	pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
583	pcmu_p->pcmu_open_count = 0;
584	mutex_exit(&pcmu_p->pcmu_mutex);
585	return (0);
586}
587
588/* ARGSUSED */
589static int
590pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
591    cred_t *credp, int *rvalp)
592{
593	pcmu_t *pcmu_p;
594	dev_info_t *dip;
595	struct devctl_iocdata *dcp;
596	uint_t bus_state;
597	int rv = 0;
598
599	pcmu_p = DEV_TO_SOFTSTATE(dev);
600	if (pcmu_p == NULL) {
601		return (ENXIO);
602	}
603
604	dip = pcmu_p->pcmu_dip;
605	PCMU_DBG2(PCMU_DBG_IOCTL, dip, "dev=%x: cmd=%x\n", dev, cmd);
606
607	/*
608	 * We can use the generic implementation for these ioctls
609	 */
610	switch (cmd) {
611	case DEVCTL_DEVICE_GETSTATE:
612	case DEVCTL_DEVICE_ONLINE:
613	case DEVCTL_DEVICE_OFFLINE:
614	case DEVCTL_BUS_GETSTATE:
615		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
616	}
617
618	/*
619	 * read devctl ioctl data
620	 */
621	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
622		return (EFAULT);
623
624	switch (cmd) {
625
626	case DEVCTL_DEVICE_RESET:
627		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_DEVICE_RESET\n");
628		rv = ENOTSUP;
629		break;
630
631
632	case DEVCTL_BUS_QUIESCE:
633		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_QUIESCE\n");
634		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
635			if (bus_state == BUS_QUIESCED) {
636				break;
637			}
638		}
639		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
640		break;
641
642	case DEVCTL_BUS_UNQUIESCE:
643		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_UNQUIESCE\n");
644		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
645			if (bus_state == BUS_ACTIVE) {
646				break;
647			}
648		}
649		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
650		break;
651
652	case DEVCTL_BUS_RESET:
653		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESET\n");
654		rv = ENOTSUP;
655		break;
656
657	case DEVCTL_BUS_RESETALL:
658		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESETALL\n");
659		rv = ENOTSUP;
660		break;
661
662	default:
663		rv = ENOTTY;
664	}
665
666	ndi_dc_freehdl(dcp);
667	return (rv);
668}
669
670static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
671    int flags, char *name, caddr_t valuep, int *lengthp)
672{
673	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
674}
675/* bus driver entry points */
676
677/*
678 * bus map entry point:
679 *
680 *	if map request is for an rnumber
681 *		get the corresponding regspec from device node
682 *	build a new regspec in our parent's format
683 *	build a new map_req with the new regspec
684 *	call up the tree to complete the mapping
685 */
686static int
687pcmu_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
688	off_t off, off_t len, caddr_t *addrp)
689{
690	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
691	struct regspec p_regspec;
692	ddi_map_req_t p_mapreq;
693	int reglen, rval, r_no;
694	pci_regspec_t reloc_reg, *rp = &reloc_reg;
695
696	PCMU_DBG2(PCMU_DBG_MAP, dip, "rdip=%s%d:",
697	    ddi_driver_name(rdip), ddi_get_instance(rdip));
698
699	if (mp->map_flags & DDI_MF_USER_MAPPING) {
700		return (DDI_ME_UNIMPLEMENTED);
701	}
702
703	switch (mp->map_type) {
704	case DDI_MT_REGSPEC:
705		reloc_reg = *(pci_regspec_t *)mp->map_obj.rp;	/* dup whole */
706		break;
707
708	case DDI_MT_RNUMBER:
709		r_no = mp->map_obj.rnumber;
710		PCMU_DBG1(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, " r#=%x", r_no);
711
712		if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS,
713		    "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS) {
714			return (DDI_ME_RNUMBER_RANGE);
715		}
716
717		if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
718			kmem_free(rp, reglen);
719			return (DDI_ME_RNUMBER_RANGE);
720		}
721		rp += r_no;
722		break;
723
724	default:
725		return (DDI_ME_INVAL);
726	}
727	PCMU_DBG0(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, "\n");
728
729	/* use "assigned-addresses" to relocate regspec within pci space */
730	if (rval = pcmu_reloc_reg(dip, rdip, pcmu_p, rp)) {
731		goto done;
732	}
733
734	/* adjust regspec according to mapping request */
735	if (len) {
736		rp->pci_size_low = (uint_t)len;
737	}
738	rp->pci_phys_low += off;
739
740	/* use "ranges" to translate relocated pci regspec into parent space */
741	if (rval = pcmu_xlate_reg(pcmu_p, rp, &p_regspec)) {
742		goto done;
743	}
744
745	p_mapreq = *mp;		/* dup the whole structure */
746	p_mapreq.map_type = DDI_MT_REGSPEC;
747	p_mapreq.map_obj.rp = &p_regspec;
748	rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
749
750done:
751	if (mp->map_type == DDI_MT_RNUMBER) {
752		kmem_free(rp - r_no, reglen);
753	}
754	return (rval);
755}
756
757#ifdef  DEBUG
758int	pcmu_peekfault_cnt = 0;
759int	pcmu_pokefault_cnt = 0;
760#endif  /* DEBUG */
761
762static int
763pcmu_do_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
764{
765	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
766	int err = DDI_SUCCESS;
767	on_trap_data_t otd;
768
769	mutex_enter(&pcbm_p->pcbm_pokeflt_mutex);
770	pcbm_p->pcbm_ontrap_data = &otd;
771
772	/* Set up protected environment. */
773	if (!on_trap(&otd, OT_DATA_ACCESS)) {
774		uintptr_t tramp = otd.ot_trampoline;
775
776		otd.ot_trampoline = (uintptr_t)&poke_fault;
777		err = do_poke(in_args->size, (void *)in_args->dev_addr,
778		    (void *)in_args->host_addr);
779		otd.ot_trampoline = tramp;
780	} else {
781		err = DDI_FAILURE;
782	}
783
784	/*
785	 * Read the async fault register for the PBM to see it sees
786	 * a master-abort.
787	 */
788	pcmu_pbm_clear_error(pcbm_p);
789
790	if (otd.ot_trap & OT_DATA_ACCESS) {
791		err = DDI_FAILURE;
792	}
793
794	/* Take down protected environment. */
795	no_trap();
796
797	pcbm_p->pcbm_ontrap_data = NULL;
798	mutex_exit(&pcbm_p->pcbm_pokeflt_mutex);
799
800#ifdef  DEBUG
801	if (err == DDI_FAILURE)
802		pcmu_pokefault_cnt++;
803#endif
804	return (err);
805}
806
807
808static int
809pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
810{
811	return (pcmu_do_poke(pcmu_p, in_args));
812}
813
814/* ARGSUSED */
815static int
816pcmu_do_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
817{
818	int err = DDI_SUCCESS;
819	on_trap_data_t otd;
820
821	if (!on_trap(&otd, OT_DATA_ACCESS)) {
822		uintptr_t tramp = otd.ot_trampoline;
823
824		otd.ot_trampoline = (uintptr_t)&peek_fault;
825		err = do_peek(in_args->size, (void *)in_args->dev_addr,
826		    (void *)in_args->host_addr);
827		otd.ot_trampoline = tramp;
828	} else
829		err = DDI_FAILURE;
830
831	no_trap();
832
833#ifdef  DEBUG
834	if (err == DDI_FAILURE)
835		pcmu_peekfault_cnt++;
836#endif
837	return (err);
838}
839
840
841static int
842pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args, void *result)
843{
844	result = (void *)in_args->host_addr;
845	return (pcmu_do_peek(pcmu_p, in_args));
846}
847
848/*
849 * control ops entry point:
850 *
851 * Requests handled completely:
852 *	DDI_CTLOPS_INITCHILD	see pcmu_init_child() for details
853 *	DDI_CTLOPS_UNINITCHILD
854 *	DDI_CTLOPS_REPORTDEV	see report_dev() for details
855 *	DDI_CTLOPS_XLATE_INTRS	nothing to do
856 *	DDI_CTLOPS_IOMIN	cache line size if streaming otherwise 1
857 *	DDI_CTLOPS_REGSIZE
858 *	DDI_CTLOPS_NREGS
859 *	DDI_CTLOPS_NINTRS
860 *	DDI_CTLOPS_DVMAPAGESIZE
861 *	DDI_CTLOPS_POKE
862 *	DDI_CTLOPS_PEEK
863 *	DDI_CTLOPS_QUIESCE
864 *	DDI_CTLOPS_UNQUIESCE
865 *
866 * All others passed to parent.
867 */
868static int
869pcmu_ctlops(dev_info_t *dip, dev_info_t *rdip,
870	ddi_ctl_enum_t op, void *arg, void *result)
871{
872	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
873
874	switch (op) {
875	case DDI_CTLOPS_INITCHILD:
876		return (pcmu_init_child(pcmu_p, (dev_info_t *)arg));
877
878	case DDI_CTLOPS_UNINITCHILD:
879		return (pcmu_uninit_child(pcmu_p, (dev_info_t *)arg));
880
881	case DDI_CTLOPS_REPORTDEV:
882		return (pcmu_report_dev(rdip));
883
884	case DDI_CTLOPS_IOMIN:
885		/*
886		 * If we are using the streaming cache, align at
887		 * least on a cache line boundary. Otherwise use
888		 * whatever alignment is passed in.
889		 */
890		return (DDI_SUCCESS);
891
892	case DDI_CTLOPS_REGSIZE:
893		*((off_t *)result) = pcmu_get_reg_set_size(rdip, *((int *)arg));
894		return (DDI_SUCCESS);
895
896	case DDI_CTLOPS_NREGS:
897		*((uint_t *)result) = pcmu_get_nreg_set(rdip);
898		return (DDI_SUCCESS);
899
900	case DDI_CTLOPS_DVMAPAGESIZE:
901		*((ulong_t *)result) = 0;
902		return (DDI_SUCCESS);
903
904	case DDI_CTLOPS_POKE:
905		return (pcmu_ctlops_poke(pcmu_p, (peekpoke_ctlops_t *)arg));
906
907	case DDI_CTLOPS_PEEK:
908		return (pcmu_ctlops_peek(pcmu_p, (peekpoke_ctlops_t *)arg,
909		    result));
910
911	case DDI_CTLOPS_AFFINITY:
912		break;
913
914	case DDI_CTLOPS_QUIESCE:
915		return (DDI_FAILURE);
916
917	case DDI_CTLOPS_UNQUIESCE:
918		return (DDI_FAILURE);
919
920	default:
921		break;
922	}
923
924	/*
925	 * Now pass the request up to our parent.
926	 */
927	PCMU_DBG2(PCMU_DBG_CTLOPS, dip,
928	    "passing request to parent: rdip=%s%d\n",
929	    ddi_driver_name(rdip), ddi_get_instance(rdip));
930	return (ddi_ctlops(dip, rdip, op, arg, result));
931}
932
933
934/* ARGSUSED */
935static int
936pcmu_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
937    ddi_intr_handle_impl_t *hdlp, void *result)
938{
939	pcmu_t		*pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
940	int		ret = DDI_SUCCESS;
941
942	switch (intr_op) {
943	case DDI_INTROP_GETCAP:
944		/* GetCap will always fail for all non PCI devices */
945		(void) pci_intx_get_cap(rdip, (int *)result);
946		break;
947	case DDI_INTROP_SETCAP:
948		ret = DDI_ENOTSUP;
949		break;
950	case DDI_INTROP_ALLOC:
951		*(int *)result = hdlp->ih_scratch1;
952		break;
953	case DDI_INTROP_FREE:
954		break;
955	case DDI_INTROP_GETPRI:
956		*(int *)result = hdlp->ih_pri ? hdlp->ih_pri : 0;
957		break;
958	case DDI_INTROP_SETPRI:
959		break;
960	case DDI_INTROP_ADDISR:
961		ret = pcmu_add_intr(dip, rdip, hdlp);
962		break;
963	case DDI_INTROP_REMISR:
964		ret = pcmu_remove_intr(dip, rdip, hdlp);
965		break;
966	case DDI_INTROP_ENABLE:
967		ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
968		    PCMU_INTR_STATE_ENABLE);
969		break;
970	case DDI_INTROP_DISABLE:
971		ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
972		    PCMU_INTR_STATE_DISABLE);
973		break;
974	case DDI_INTROP_SETMASK:
975		ret = pci_intx_set_mask(rdip);
976		break;
977	case DDI_INTROP_CLRMASK:
978		ret = pci_intx_clr_mask(rdip);
979		break;
980	case DDI_INTROP_GETPENDING:
981		ret = pci_intx_get_pending(rdip, (int *)result);
982		break;
983	case DDI_INTROP_NINTRS:
984	case DDI_INTROP_NAVAIL:
985		*(int *)result = i_ddi_get_intx_nintrs(rdip);
986		break;
987	case DDI_INTROP_SUPPORTED_TYPES:
988		/* PCI nexus driver supports only fixed interrupts */
989		*(int *)result = i_ddi_get_intx_nintrs(rdip) ?
990		    DDI_INTR_TYPE_FIXED : 0;
991		break;
992	default:
993		ret = DDI_ENOTSUP;
994		break;
995	}
996
997	return (ret);
998}
999
1000/*
1001 * CMU-CH specifics implementation:
1002 *	interrupt mapping register
1003 *	PBM configuration
1004 *	ECC and PBM error handling
1005 */
1006
1007/* called by pcmu_attach() DDI_ATTACH to initialize pci objects */
1008static int
1009pcmu_obj_setup(pcmu_t *pcmu_p)
1010{
1011	int ret;
1012
1013	mutex_enter(&pcmu_global_mutex);
1014	pcmu_p->pcmu_rev = ddi_prop_get_int(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
1015	    DDI_PROP_DONTPASS, "module-revision#", 0);
1016
1017	pcmu_ib_create(pcmu_p);
1018	pcmu_cb_create(pcmu_p);
1019	pcmu_ecc_create(pcmu_p);
1020	pcmu_pbm_create(pcmu_p);
1021	pcmu_err_create(pcmu_p);
1022	if ((ret = pcmu_intr_setup(pcmu_p)) != DDI_SUCCESS)
1023		goto done;
1024
1025	/*
1026	 * Due to a hardware bug, do not create kstat for DC systems
1027	 * with PCI hw revision less than 5.
1028	 */
1029	if ((strncmp(ddi_binding_name(pcmu_p->pcmu_dip),
1030	    PCICMU_OPL_DC_BINDING_NAME, strlen(PCICMU_OPL_DC_BINDING_NAME))
1031	    != 0) || (pcmu_p->pcmu_rev > 4)) {
1032		pcmu_kstat_create(pcmu_p);
1033	}
1034done:
1035	mutex_exit(&pcmu_global_mutex);
1036	if (ret != DDI_SUCCESS) {
1037		cmn_err(CE_NOTE, "Interrupt register failure, returning 0x%x\n",
1038		    ret);
1039	}
1040	return (ret);
1041}
1042
1043/* called by pcmu_detach() DDI_DETACH to destroy pci objects */
1044static void
1045pcmu_obj_destroy(pcmu_t *pcmu_p)
1046{
1047	mutex_enter(&pcmu_global_mutex);
1048
1049	pcmu_kstat_destroy(pcmu_p);
1050	pcmu_pbm_destroy(pcmu_p);
1051	pcmu_err_destroy(pcmu_p);
1052	pcmu_ecc_destroy(pcmu_p);
1053	pcmu_cb_destroy(pcmu_p);
1054	pcmu_ib_destroy(pcmu_p);
1055	pcmu_intr_teardown(pcmu_p);
1056
1057	mutex_exit(&pcmu_global_mutex);
1058}
1059
1060/* called by pcmu_attach() DDI_RESUME to (re)initialize pci objects */
1061static void
1062pcmu_obj_resume(pcmu_t *pcmu_p)
1063{
1064	mutex_enter(&pcmu_global_mutex);
1065
1066	pcmu_ib_configure(pcmu_p->pcmu_ib_p);
1067	pcmu_ecc_configure(pcmu_p);
1068	pcmu_ib_resume(pcmu_p->pcmu_ib_p);
1069	u2u_ittrans_resume((u2u_ittrans_data_t **)
1070	    &(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie));
1071
1072	pcmu_pbm_configure(pcmu_p->pcmu_pcbm_p);
1073
1074	pcmu_cb_resume(pcmu_p->pcmu_cb_p);
1075
1076	pcmu_pbm_resume(pcmu_p->pcmu_pcbm_p);
1077
1078	mutex_exit(&pcmu_global_mutex);
1079}
1080
1081/* called by pcmu_detach() DDI_SUSPEND to suspend pci objects */
1082static void
1083pcmu_obj_suspend(pcmu_t *pcmu_p)
1084{
1085	mutex_enter(&pcmu_global_mutex);
1086
1087	pcmu_pbm_suspend(pcmu_p->pcmu_pcbm_p);
1088	pcmu_ib_suspend(pcmu_p->pcmu_ib_p);
1089	pcmu_cb_suspend(pcmu_p->pcmu_cb_p);
1090
1091	mutex_exit(&pcmu_global_mutex);
1092}
1093
1094static int
1095pcmu_intr_setup(pcmu_t *pcmu_p)
1096{
1097	dev_info_t *dip = pcmu_p->pcmu_dip;
1098	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
1099	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
1100	int i, no_of_intrs;
1101
1102	/*
1103	 * Get the interrupts property.
1104	 */
1105	if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1106	    "interrupts", (caddr_t)&pcmu_p->pcmu_inos,
1107	    &pcmu_p->pcmu_inos_len) != DDI_SUCCESS) {
1108		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
1109		    ddi_driver_name(dip), ddi_get_instance(dip));
1110	}
1111
1112	/*
1113	 * figure out number of interrupts in the "interrupts" property
1114	 * and convert them all into ino.
1115	 */
1116	i = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "#interrupt-cells", 1);
1117	i = CELLS_1275_TO_BYTES(i);
1118	no_of_intrs = pcmu_p->pcmu_inos_len / i;
1119	for (i = 0; i < no_of_intrs; i++) {
1120		pcmu_p->pcmu_inos[i] =
1121		    PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[i]);
1122	}
1123
1124	pcb_p->pcb_no_of_inos = no_of_intrs;
1125	if (i = pcmu_ecc_register_intr(pcmu_p)) {
1126		goto teardown;
1127	}
1128
1129	intr_dist_add(pcmu_cb_intr_dist, pcb_p);
1130	pcmu_ecc_enable_intr(pcmu_p);
1131
1132	if (i = pcmu_pbm_register_intr(pcbm_p)) {
1133		intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
1134		goto teardown;
1135	}
1136	intr_dist_add(pcmu_pbm_intr_dist, pcbm_p);
1137	pcmu_ib_intr_enable(pcmu_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
1138
1139	intr_dist_add_weighted(pcmu_ib_intr_dist_all, pcmu_p->pcmu_ib_p);
1140	return (DDI_SUCCESS);
1141teardown:
1142	pcmu_intr_teardown(pcmu_p);
1143	return (i);
1144}
1145
1146/*
1147 * pcmu_fix_ranges - fixes the config space entry of the "ranges"
1148 *	property on CMU-CH platforms
1149 */
1150void
1151pcmu_fix_ranges(pcmu_ranges_t *rng_p, int rng_entries)
1152{
1153	int i;
1154	for (i = 0; i < rng_entries; i++, rng_p++) {
1155		if ((rng_p->child_high & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG)
1156			rng_p->parent_low |= rng_p->child_high;
1157	}
1158}
1159
1160/*
1161 * map_pcmu_registers
1162 *
1163 * This function is called from the attach routine to map the registers
1164 * accessed by this driver.
1165 *
1166 * used by: pcmu_attach()
1167 *
1168 * return value: DDI_FAILURE on failure
1169 */
1170static int
1171map_pcmu_registers(pcmu_t *pcmu_p, dev_info_t *dip)
1172{
1173	ddi_device_acc_attr_t attr;
1174
1175	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1176	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1177
1178	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
1179	if (ddi_regs_map_setup(dip, 0, &pcmu_p->pcmu_address[0], 0, 0,
1180	    &attr, &pcmu_p->pcmu_ac[0]) != DDI_SUCCESS) {
1181		cmn_err(CE_WARN, "%s%d: unable to map reg entry 0\n",
1182		    ddi_driver_name(dip), ddi_get_instance(dip));
1183		return (DDI_FAILURE);
1184	}
1185
1186	/*
1187	 * We still use pcmu_address[2]
1188	 */
1189	if (ddi_regs_map_setup(dip, 2, &pcmu_p->pcmu_address[2], 0, 0,
1190	    &attr, &pcmu_p->pcmu_ac[2]) != DDI_SUCCESS) {
1191		cmn_err(CE_WARN, "%s%d: unable to map reg entry 2\n",
1192		    ddi_driver_name(dip), ddi_get_instance(dip));
1193		ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1194		return (DDI_FAILURE);
1195	}
1196
1197	/*
1198	 * The second register set contains the bridge's configuration
1199	 * header.  This header is at the very beginning of the bridge's
1200	 * configuration space.  This space has litte-endian byte order.
1201	 */
1202	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1203	if (ddi_regs_map_setup(dip, 1, &pcmu_p->pcmu_address[1], 0,
1204	    PCI_CONF_HDR_SIZE, &attr, &pcmu_p->pcmu_ac[1]) != DDI_SUCCESS) {
1205
1206		cmn_err(CE_WARN, "%s%d: unable to map reg entry 1\n",
1207		    ddi_driver_name(dip), ddi_get_instance(dip));
1208		ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1209		return (DDI_FAILURE);
1210	}
1211	PCMU_DBG2(PCMU_DBG_ATTACH, dip, "address (%p,%p)\n",
1212	    pcmu_p->pcmu_address[0], pcmu_p->pcmu_address[1]);
1213	return (DDI_SUCCESS);
1214}
1215
1216/*
1217 * unmap_pcmu_registers:
1218 *
1219 * This routine unmap the registers mapped by map_pcmu_registers.
1220 *
1221 * used by: pcmu_detach()
1222 *
1223 * return value: none
1224 */
1225static void
1226unmap_pcmu_registers(pcmu_t *pcmu_p)
1227{
1228	ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1229	ddi_regs_map_free(&pcmu_p->pcmu_ac[1]);
1230	ddi_regs_map_free(&pcmu_p->pcmu_ac[2]);
1231}
1232
1233/*
1234 * These convenience wrappers relies on map_pcmu_registers() to setup
1235 * pcmu_address[0-2] correctly at first.
1236 */
1237static uintptr_t
1238get_reg_base(pcmu_t *pcmu_p)
1239{
1240	return ((uintptr_t)pcmu_p->pcmu_address[2]);
1241}
1242
1243/* The CMU-CH config reg base is always the 2nd reg entry */
1244static uintptr_t
1245get_config_reg_base(pcmu_t *pcmu_p)
1246{
1247	return ((uintptr_t)(pcmu_p->pcmu_address[1]));
1248}
1249
1250uint64_t
1251ib_get_map_reg(pcmu_ib_mondo_t mondo, uint32_t cpu_id)
1252{
1253	return ((mondo) | (cpu_id << PCMU_INTR_MAP_REG_TID_SHIFT) |
1254	    PCMU_INTR_MAP_REG_VALID);
1255
1256}
1257
1258uint32_t
1259ib_map_reg_get_cpu(volatile uint64_t reg)
1260{
1261	return ((reg & PCMU_INTR_MAP_REG_TID) >>
1262	    PCMU_INTR_MAP_REG_TID_SHIFT);
1263}
1264
1265uint64_t *
1266ib_intr_map_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
1267{
1268	uint64_t *addr;
1269
1270	ASSERT(ino & 0x20);
1271	addr = (uint64_t *)(pib_p->pib_obio_intr_map_regs +
1272	    (((uint_t)ino & 0x1f) << 3));
1273	return (addr);
1274}
1275
1276uint64_t *
1277ib_clear_intr_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
1278{
1279	uint64_t *addr;
1280
1281	ASSERT(ino & 0x20);
1282	addr = (uint64_t *)(pib_p->pib_obio_clear_intr_regs +
1283	    (((uint_t)ino & 0x1f) << 3));
1284	return (addr);
1285}
1286
1287uintptr_t
1288pcmu_ib_setup(pcmu_ib_t *pib_p)
1289{
1290	pcmu_t *pcmu_p = pib_p->pib_pcmu_p;
1291	uintptr_t a = get_reg_base(pcmu_p);
1292
1293	pib_p->pib_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
1294	pib_p->pib_max_ino = PCMU_MAX_INO;
1295	pib_p->pib_obio_intr_map_regs = a + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
1296	pib_p->pib_obio_clear_intr_regs =
1297	    a + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
1298	return (a);
1299}
1300
1301/*
1302 * Return the cpuid to to be used for an ino.
1303 *
1304 * On multi-function pci devices, functions have separate devinfo nodes and
1305 * interrupts.
1306 *
1307 * This function determines if there is already an established slot-oriented
1308 * interrupt-to-cpu binding established, if there is then it returns that
1309 * cpu.  Otherwise a new cpu is selected by intr_dist_cpuid().
1310 *
1311 * The devinfo node we are trying to associate a cpu with is
1312 * ino_p->pino_ih_head->ih_dip.
1313 */
1314uint32_t
1315pcmu_intr_dist_cpuid(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p)
1316{
1317	dev_info_t	*rdip = ino_p->pino_ih_head->ih_dip;
1318	dev_info_t	*prdip = ddi_get_parent(rdip);
1319	pcmu_ib_ino_info_t	*sino_p;
1320	dev_info_t	*sdip;
1321	dev_info_t	*psdip;
1322	char		*buf1 = NULL, *buf2 = NULL;
1323	char		*s1, *s2, *s3;
1324	int		l2;
1325	int		cpu_id;
1326
1327	/* must be CMU-CH driver parent (not ebus) */
1328	if (strcmp(ddi_driver_name(prdip), "pcicmu") != 0)
1329		goto newcpu;
1330
1331	/*
1332	 * From PCI 1275 binding: 2.2.1.3 Unit Address representation:
1333	 *   Since the "unit-number" is the address that appears in on Open
1334	 *   Firmware 'device path', it follows that only the DD and DD,FF
1335	 *   forms of the text representation can appear in a 'device path'.
1336	 *
1337	 * The rdip unit address is of the form "DD[,FF]".  Define two
1338	 * unit address strings that represent same-slot use: "DD" and "DD,".
1339	 * The first compare uses strcmp, the second uses strncmp.
1340	 */
1341	s1 = ddi_get_name_addr(rdip);
1342	if (s1 == NULL) {
1343		goto newcpu;
1344	}
1345
1346	buf1 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strcmp */
1347	buf2 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strncmp */
1348	s1 = strcpy(buf1, s1);
1349	s2 = strcpy(buf2, s1);
1350
1351	s1 = strrchr(s1, ',');
1352	if (s1) {
1353		*s1 = '\0';			/* have "DD,FF" */
1354		s1 = buf1;			/* search via strcmp "DD" */
1355
1356		s2 = strrchr(s2, ',');
1357		*(s2 + 1) = '\0';
1358		s2 = buf2;
1359		l2 = strlen(s2);		/* search via strncmp "DD," */
1360	} else {
1361		(void) strcat(s2, ",");		/* have "DD" */
1362		l2 = strlen(s2);		/* search via strncmp "DD," */
1363	}
1364
1365	/*
1366	 * Search the established ino list for devinfo nodes bound
1367	 * to an ino that matches one of the slot use strings.
1368	 */
1369	ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
1370	for (sino_p = pib_p->pib_ino_lst; sino_p; sino_p = sino_p->pino_next) {
1371		/* skip self and non-established */
1372		if ((sino_p == ino_p) || (sino_p->pino_established == 0))
1373			continue;
1374
1375		/* skip non-siblings */
1376		sdip = sino_p->pino_ih_head->ih_dip;
1377		psdip = ddi_get_parent(sdip);
1378		if (psdip != prdip)
1379			continue;
1380
1381		/* must be CMU-CH driver parent (not ebus) */
1382		if (strcmp(ddi_driver_name(psdip), "pcicmu") != 0)
1383			continue;
1384
1385		s3 = ddi_get_name_addr(sdip);
1386		if ((s1 && (strcmp(s1, s3) == 0)) ||
1387		    (strncmp(s2, s3, l2) == 0)) {
1388			extern int intr_dist_debug;
1389
1390			if (intr_dist_debug) {
1391				cmn_err(CE_CONT, "intr_dist: "
1392				    "pcicmu`pcmu_intr_dist_cpuid "
1393				    "%s#%d %s: cpu %d established "
1394				    "by %s#%d %s\n", ddi_driver_name(rdip),
1395				    ddi_get_instance(rdip),
1396				    ddi_deviname(rdip, buf1),
1397				    sino_p->pino_cpuid,
1398				    ddi_driver_name(sdip),
1399				    ddi_get_instance(sdip),
1400				    ddi_deviname(sdip, buf2));
1401			}
1402			break;
1403		}
1404	}
1405
1406	/* If a slot use match is found then use established cpu */
1407	if (sino_p) {
1408		cpu_id = sino_p->pino_cpuid;	/* target established cpu */
1409		goto out;
1410	}
1411
1412newcpu:	cpu_id = intr_dist_cpuid();		/* target new cpu */
1413
1414out:	if (buf1)
1415		kmem_free(buf1, MAXNAMELEN);
1416	if (buf2)
1417		kmem_free(buf2, MAXNAMELEN);
1418	return (cpu_id);
1419}
1420
1421void
1422pcmu_cb_teardown(pcmu_t *pcmu_p)
1423{
1424	pcmu_cb_t	*pcb_p = pcmu_p->pcmu_cb_p;
1425
1426	u2u_ittrans_uninit((u2u_ittrans_data_t *)pcb_p->pcb_ittrans_cookie);
1427}
1428
1429int
1430pcmu_ecc_add_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
1431{
1432	uint32_t mondo;
1433
1434	mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
1435	    pcmu_p->pcmu_inos[inum]);
1436
1437	VERIFY(add_ivintr(mondo, pcmu_pil[inum], (intrfunc)pcmu_ecc_intr,
1438	    (caddr_t)eii_p, NULL, NULL) == 0);
1439
1440	return (PCMU_ATTACH_RETCODE(PCMU_ECC_OBJ,
1441	    PCMU_OBJ_INTR_ADD, DDI_SUCCESS));
1442}
1443
1444/* ARGSUSED */
1445void
1446pcmu_ecc_rem_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
1447{
1448	uint32_t mondo;
1449
1450	mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
1451	    pcmu_p->pcmu_inos[inum]);
1452
1453	VERIFY(rem_ivintr(mondo, pcmu_pil[inum]) == 0);
1454}
1455
1456void
1457pcmu_pbm_configure(pcmu_pbm_t *pcbm_p)
1458{
1459	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1460	dev_info_t *dip = pcmu_p->pcmu_dip;
1461
1462#define	pbm_err	((PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_PE_SHIFT) |	\
1463		(PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_SE_SHIFT))
1464#define	csr_err	(PCI_STAT_PERROR | PCI_STAT_S_PERROR |		\
1465		PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |	\
1466		PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR)
1467
1468	/*
1469	 * Clear any PBM errors.
1470	 */
1471	*pcbm_p->pcbm_async_flt_status_reg = pbm_err;
1472
1473	/*
1474	 * Clear error bits in configuration status register.
1475	 */
1476	PCMU_DBG1(PCMU_DBG_ATTACH, dip,
1477	    "pcmu_pbm_configure: conf status reg=%x\n", csr_err);
1478
1479	pcbm_p->pcbm_config_header->ch_status_reg = csr_err;
1480
1481	PCMU_DBG1(PCMU_DBG_ATTACH, dip,
1482	    "pcmu_pbm_configure: conf status reg==%x\n",
1483	    pcbm_p->pcbm_config_header->ch_status_reg);
1484
1485	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
1486	    (int)pcbm_p->pcbm_config_header->ch_latency_timer_reg);
1487#undef	pbm_err
1488#undef	csr_err
1489}
1490
1491uint_t
1492pcmu_pbm_disable_errors(pcmu_pbm_t *pcbm_p)
1493{
1494	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1495	pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
1496
1497	/*
1498	 * Disable error and streaming byte hole interrupts via the
1499	 * PBM control register.
1500	 */
1501	*pcbm_p->pcbm_ctrl_reg &= ~PCMU_PCI_CTRL_ERR_INT_EN;
1502
1503	/*
1504	 * Disable error interrupts via the interrupt mapping register.
1505	 */
1506	pcmu_ib_intr_disable(pib_p,
1507	    pcmu_p->pcmu_inos[CBNINTR_PBM], PCMU_IB_INTR_NOWAIT);
1508	return (BF_NONE);
1509}
1510
1511void
1512pcmu_cb_setup(pcmu_t *pcmu_p)
1513{
1514	uint64_t csr, csr_pa, pa;
1515	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
1516
1517	pcb_p->pcb_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
1518	pa = (uint64_t)hat_getpfnum(kas.a_hat, pcmu_p->pcmu_address[0]);
1519	pcb_p->pcb_base_pa  = pa = pa >> (32 - MMU_PAGESHIFT) << 32;
1520	pcb_p->pcb_map_pa = pa + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
1521	pcb_p->pcb_clr_pa = pa + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
1522	pcb_p->pcb_obsta_pa = pa + PCMU_IB_OBIO_INTR_STATE_DIAG_REG;
1523
1524	csr_pa = pa + PCMU_CB_CONTROL_STATUS_REG_OFFSET;
1525	csr = lddphysio(csr_pa);
1526
1527	/*
1528	 * Clear any pending address parity errors.
1529	 */
1530	if (csr & PCMU_CB_CONTROL_STATUS_APERR) {
1531		csr |= PCMU_CB_CONTROL_STATUS_APERR;
1532		cmn_err(CE_WARN, "clearing UPA address parity error\n");
1533	}
1534	csr |= PCMU_CB_CONTROL_STATUS_APCKEN;
1535	csr &= ~PCMU_CB_CONTROL_STATUS_IAP;
1536	stdphysio(csr_pa, csr);
1537
1538	u2u_ittrans_init(pcmu_p,
1539	    (u2u_ittrans_data_t **)&pcb_p->pcb_ittrans_cookie);
1540}
1541
1542void
1543pcmu_ecc_setup(pcmu_ecc_t *pecc_p)
1544{
1545	pecc_p->pecc_ue.pecc_errpndg_mask = 0;
1546	pecc_p->pecc_ue.pecc_offset_mask = PCMU_ECC_UE_AFSR_DW_OFFSET;
1547	pecc_p->pecc_ue.pecc_offset_shift = PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1548	pecc_p->pecc_ue.pecc_size_log2 = 3;
1549}
1550
1551static uintptr_t
1552get_pbm_reg_base(pcmu_t *pcmu_p)
1553{
1554	return ((uintptr_t)(pcmu_p->pcmu_address[0]));
1555}
1556
1557void
1558pcmu_pbm_setup(pcmu_pbm_t *pcbm_p)
1559{
1560	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1561
1562	/*
1563	 * Get the base virtual address for the PBM control block.
1564	 */
1565	uintptr_t a = get_pbm_reg_base(pcmu_p);
1566
1567	/*
1568	 * Get the virtual address of the PCI configuration header.
1569	 * This should be mapped little-endian.
1570	 */
1571	pcbm_p->pcbm_config_header =
1572	    (config_header_t *)get_config_reg_base(pcmu_p);
1573
1574	/*
1575	 * Get the virtual addresses for control, error and diag
1576	 * registers.
1577	 */
1578	pcbm_p->pcbm_ctrl_reg = (uint64_t *)(a + PCMU_PCI_CTRL_REG_OFFSET);
1579	pcbm_p->pcbm_diag_reg = (uint64_t *)(a + PCMU_PCI_DIAG_REG_OFFSET);
1580	pcbm_p->pcbm_async_flt_status_reg =
1581	    (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1582	pcbm_p->pcbm_async_flt_addr_reg =
1583	    (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1584}
1585
1586/*ARGSUSED*/
1587void
1588pcmu_pbm_teardown(pcmu_pbm_t *pcbm_p)
1589{
1590}
1591
1592int
1593pcmu_get_numproxy(dev_info_t *dip)
1594{
1595	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1596	    "#upa-interrupt-proxies", 1));
1597}
1598
1599int
1600pcmu_get_portid(dev_info_t *dip)
1601{
1602	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1603	    "portid", -1));
1604}
1605
1606/*
1607 * CMU-CH Performance Events.
1608 */
1609static pcmu_kev_mask_t
1610pcicmu_pcmu_events[] = {
1611	{"pio_cycles_b", 0xf},		{"interrupts", 0x11},
1612	{"upa_inter_nack", 0x12},	{"pio_reads", 0x13},
1613	{"pio_writes", 0x14},
1614	{"clear_pic", 0x1f}
1615};
1616
1617/*
1618 * Create the picN kstat's.
1619 */
1620void
1621pcmu_kstat_init()
1622{
1623	pcmu_name_kstat = (pcmu_ksinfo_t *)kmem_alloc(sizeof (pcmu_ksinfo_t),
1624	    KM_NOSLEEP);
1625
1626	if (pcmu_name_kstat == NULL) {
1627		cmn_err(CE_WARN, "pcicmu : no space for kstat\n");
1628	} else {
1629		pcmu_name_kstat->pic_no_evs =
1630		    sizeof (pcicmu_pcmu_events) / sizeof (pcmu_kev_mask_t);
1631		pcmu_name_kstat->pic_shift[0] = PCMU_SHIFT_PIC0;
1632		pcmu_name_kstat->pic_shift[1] = PCMU_SHIFT_PIC1;
1633		pcmu_create_name_kstat("pcmup",
1634		    pcmu_name_kstat, pcicmu_pcmu_events);
1635	}
1636}
1637
1638/*
1639 * Called from _fini()
1640 */
1641void
1642pcmu_kstat_fini()
1643{
1644	if (pcmu_name_kstat != NULL) {
1645		pcmu_delete_name_kstat(pcmu_name_kstat);
1646		kmem_free(pcmu_name_kstat, sizeof (pcmu_ksinfo_t));
1647		pcmu_name_kstat = NULL;
1648	}
1649}
1650
1651/*
1652 * Create the performance 'counters' kstat.
1653 */
1654void
1655pcmu_add_upstream_kstat(pcmu_t *pcmu_p)
1656{
1657	pcmu_cntr_pa_t	*cntr_pa_p = &pcmu_p->pcmu_uks_pa;
1658	uint64_t regbase = va_to_pa((void *)get_reg_base(pcmu_p));
1659
1660	cntr_pa_p->pcr_pa = regbase + PCMU_PERF_PCR_OFFSET;
1661	cntr_pa_p->pic_pa = regbase + PCMU_PERF_PIC_OFFSET;
1662	pcmu_p->pcmu_uksp = pcmu_create_cntr_kstat(pcmu_p, "pcmup",
1663	    NUM_OF_PICS, pcmu_cntr_kstat_pa_update, cntr_pa_p);
1664}
1665
1666/*
1667 * u2u_ittrans_init() is caled from in pci.c's pcmu_cb_setup() per CMU.
1668 * Second argument "ittrans_cookie" is address of pcb_ittrans_cookie in
1669 * pcb_p member. allocated interrupt block is returned in it.
1670 */
1671static void
1672u2u_ittrans_init(pcmu_t *pcmu_p, u2u_ittrans_data_t **ittrans_cookie)
1673{
1674
1675	u2u_ittrans_data_t *u2u_trans_p;
1676	ddi_device_acc_attr_t attr;
1677	int ret;
1678	int board;
1679
1680	/*
1681	 * Allocate the data structure to support U2U's
1682	 * interrupt target translations.
1683	 */
1684	u2u_trans_p = (u2u_ittrans_data_t *)
1685	    kmem_zalloc(sizeof (u2u_ittrans_data_t), KM_SLEEP);
1686
1687	/*
1688	 * Get other properties, "board#"
1689	 */
1690	board = ddi_getprop(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
1691	    DDI_PROP_DONTPASS, "board#", -1);
1692
1693	u2u_trans_p->u2u_board = board;
1694
1695	if (board == -1) {
1696		/* this cannot happen on production systems */
1697		cmn_err(CE_PANIC, "u2u:Invalid property;board = %d", board);
1698	}
1699
1700	/*
1701	 * Initialize interrupt target translations mutex.
1702	 */
1703	mutex_init(&(u2u_trans_p->u2u_ittrans_lock), "u2u_ittrans_lock",
1704	    MUTEX_DEFAULT, NULL);
1705
1706	/*
1707	 * Get U2U's registers space by ddi_regs_map_setup(9F)
1708	 */
1709	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1710	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1711	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
1712
1713	ret = ddi_regs_map_setup(pcmu_p->pcmu_dip,
1714	    REGS_INDEX_OF_U2U, (caddr_t *)(&(u2u_trans_p->u2u_regs_base)),
1715	    0, 0, &attr, &(u2u_trans_p->u2u_acc));
1716
1717	/*
1718	 * check result of ddi_regs_map_setup().
1719	 */
1720	if (ret != DDI_SUCCESS) {
1721		cmn_err(CE_PANIC, "u2u%d: registers map setup failed", board);
1722	}
1723
1724	/*
1725	 * Read Port-id(1 byte) in u2u
1726	 */
1727	u2u_trans_p->u2u_port_id = *(volatile int32_t *)
1728	    (u2u_trans_p->u2u_regs_base + U2U_PID_REGISTER_OFFSET);
1729
1730	if (pcmu_p->pcmu_id != u2u_trans_p->u2u_port_id) {
1731		cmn_err(CE_PANIC, "u2u%d: Invalid Port-ID", board);
1732	}
1733
1734	*ittrans_cookie = u2u_trans_p;
1735}
1736
1737/*
1738 * u2u_ittras_resume() is called from pcmu_obj_resume() at DDI_RESUME entry.
1739 */
1740static void
1741u2u_ittrans_resume(u2u_ittrans_data_t **ittrans_cookie)
1742{
1743
1744	u2u_ittrans_data_t *u2u_trans_p;
1745	u2u_ittrans_id_t *ittrans_id_p;
1746	uintptr_t  data_reg_addr;
1747	int ix;
1748
1749	u2u_trans_p = *ittrans_cookie;
1750
1751	/*
1752	 * Set U2U Data Register
1753	 */
1754	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1755		ittrans_id_p = &(u2u_trans_p->u2u_ittrans_id[ix]);
1756		data_reg_addr = u2u_trans_p->u2u_regs_base +
1757		    U2U_DATA_REGISTER_OFFSET + (ix * sizeof (uint64_t));
1758		if (ittrans_id_p->u2u_ino_map_reg == NULL) {
1759			/* This index was not set */
1760			continue;
1761		}
1762		*(volatile uint32_t *) (data_reg_addr) =
1763		    (uint32_t)ittrans_id_p->u2u_tgt_cpu_id;
1764
1765	}
1766}
1767
1768/*
1769 * u2u_ittras_uninit() is called from ib_destroy() at detach,
1770 * or occuring error in attach.
1771 */
1772static void
1773u2u_ittrans_uninit(u2u_ittrans_data_t *ittrans_cookie)
1774{
1775
1776	if (ittrans_cookie == NULL) {
1777		return;	/* not support */
1778	}
1779
1780	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1781		return;	 /* illeagal case */
1782	}
1783
1784	ddi_regs_map_free(&(ittrans_cookie->u2u_acc));
1785	mutex_destroy(&(ittrans_cookie->u2u_ittrans_lock));
1786	kmem_free((void *)ittrans_cookie, sizeof (u2u_ittrans_data_t));
1787}
1788
1789/*
1790 * This routine,u2u_translate_tgtid(, , cpu_id, pino_map_reg),
1791 * searches index having same value of pino_map_reg, or empty.
1792 * Then, stores cpu_id in a U2U Data Register as this index,
1793 * and return this index.
1794 */
1795int
1796u2u_translate_tgtid(pcmu_t *pcmu_p, uint_t cpu_id,
1797    volatile uint64_t *pino_map_reg)
1798{
1799
1800	int index = -1;
1801	int ix;
1802	int err_level;	/* severity level for cmn_err */
1803	u2u_ittrans_id_t *ittrans_id_p;
1804	uintptr_t  data_reg_addr;
1805	u2u_ittrans_data_t *ittrans_cookie;
1806
1807	ittrans_cookie =
1808	    (u2u_ittrans_data_t *)(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie);
1809
1810	if (ittrans_cookie == NULL) {
1811		return (cpu_id);
1812	}
1813
1814	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1815		return (-1);	 /* illeagal case */
1816	}
1817
1818	mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
1819
1820	/*
1821	 * Decide index No. of U2U Data registers in either
1822	 * already used by same pino_map_reg, or empty.
1823	 */
1824	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1825		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
1826		if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
1827			/* already used this pino_map_reg */
1828			index = ix;
1829			break;
1830		}
1831		if (index == -1 &&
1832		    ittrans_id_p->u2u_ino_map_reg == NULL) {
1833			index = ix;
1834		}
1835	}
1836
1837	if (index == -1) {
1838		if (panicstr) {
1839			err_level = CE_WARN;
1840		} else {
1841			err_level = CE_PANIC;
1842		}
1843		cmn_err(err_level, "u2u%d:No more U2U-Data regs!!",
1844		    ittrans_cookie->u2u_board);
1845		return (cpu_id);
1846	}
1847
1848	/*
1849	 * For U2U
1850	 * set cpu_id into u2u_data_reg by index.
1851	 * ((uint64_t)(u2u_regs_base
1852	 *	+ U2U_DATA_REGISTER_OFFSET))[index] = cpu_id;
1853	 */
1854
1855	data_reg_addr = ittrans_cookie->u2u_regs_base
1856	    + U2U_DATA_REGISTER_OFFSET
1857	    + (index * sizeof (uint64_t));
1858
1859	/*
1860	 * Set cpu_id into U2U Data register[index]
1861	 */
1862	*(volatile uint32_t *) (data_reg_addr) = (uint32_t)cpu_id;
1863
1864	/*
1865	 * Setup for software, excepting at panicing.
1866	 * and rebooting, etc...?
1867	 */
1868	if (!panicstr) {
1869		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[index]);
1870		ittrans_id_p->u2u_tgt_cpu_id = cpu_id;
1871		ittrans_id_p->u2u_ino_map_reg = pino_map_reg;
1872	}
1873
1874	mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
1875
1876	return (index);
1877}
1878
1879/*
1880 * u2u_ittrans_cleanup() is called from common_pcmu_ib_intr_disable()
1881 * after called intr_rem_cpu(mondo).
1882 */
1883void
1884u2u_ittrans_cleanup(u2u_ittrans_data_t *ittrans_cookie,
1885			volatile uint64_t *pino_map_reg)
1886{
1887
1888	int ix;
1889	u2u_ittrans_id_t *ittrans_id_p;
1890
1891	if (ittrans_cookie == NULL) {
1892		return;
1893	}
1894
1895	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1896		return;	 /* illeagal case */
1897	}
1898
1899	mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
1900
1901	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1902		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
1903		if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
1904			ittrans_id_p->u2u_ino_map_reg = NULL;
1905			break;
1906		}
1907	}
1908
1909	mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
1910}
1911
1912/*
1913 * pcmu_ecc_classify, called by ecc_handler to classify ecc errors
1914 * and determine if we should panic or not.
1915 */
1916void
1917pcmu_ecc_classify(uint64_t err, pcmu_ecc_errstate_t *ecc_err_p)
1918{
1919	struct async_flt *ecc = &ecc_err_p->ecc_aflt;
1920	/* LINTED */
1921	pcmu_t *pcmu_p = ecc_err_p->ecc_ii_p.pecc_p->pecc_pcmu_p;
1922
1923	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
1924
1925	ecc_err_p->ecc_bridge_type = PCI_OPLCMU;	/* RAGS */
1926	/*
1927	 * Get the parent bus id that caused the error.
1928	 */
1929	ecc_err_p->ecc_dev_id = (ecc_err_p->ecc_afsr & PCMU_ECC_UE_AFSR_ID)
1930	    >> PCMU_ECC_UE_AFSR_ID_SHIFT;
1931	/*
1932	 * Determine the doubleword offset of the error.
1933	 */
1934	ecc_err_p->ecc_dw_offset = (ecc_err_p->ecc_afsr &
1935	    PCMU_ECC_UE_AFSR_DW_OFFSET) >> PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1936	/*
1937	 * Determine the primary error type.
1938	 */
1939	switch (err) {
1940	case PCMU_ECC_UE_AFSR_E_PIO:
1941		if (ecc_err_p->pecc_pri) {
1942			ecc->flt_erpt_class = PCI_ECC_PIO_UE;
1943		} else {
1944			ecc->flt_erpt_class = PCI_ECC_SEC_PIO_UE;
1945		}
1946		/* For CMU-CH, a UE is always fatal. */
1947		ecc->flt_panic = 1;
1948		break;
1949
1950	default:
1951		return;
1952	}
1953}
1954
1955/*
1956 * pcmu_pbm_classify, called by pcmu_pbm_afsr_report to classify piow afsr.
1957 */
1958int
1959pcmu_pbm_classify(pcmu_pbm_errstate_t *pbm_err_p)
1960{
1961	uint32_t e;
1962	int nerr = 0;
1963	char **tmp_class;
1964
1965	if (pbm_err_p->pcbm_pri) {
1966		tmp_class = &pbm_err_p->pcbm_pci.pcmu_err_class;
1967		e = PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr);
1968		pbm_err_p->pbm_log = FM_LOG_PCI;
1969	} else {
1970		tmp_class = &pbm_err_p->pbm_err_class;
1971		e = PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
1972		pbm_err_p->pbm_log = FM_LOG_PBM;
1973	}
1974
1975	if (e & PCMU_PCI_AFSR_E_MA) {
1976		*tmp_class = pbm_err_p->pcbm_pri ? PCI_MA : PCI_SEC_MA;
1977		nerr++;
1978	}
1979	return (nerr);
1980}
1981
1982/*
1983 * Function used to clear PBM/PCI/IOMMU error state after error handling
1984 * is complete. Only clearing error bits which have been logged. Called by
1985 * pcmu_pbm_err_handler and pcmu_bus_exit.
1986 */
1987static void
1988pcmu_clear_error(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
1989{
1990	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
1991
1992	ASSERT(MUTEX_HELD(&pcbm_p->pcbm_pcmu_p->pcmu_err_mutex));
1993
1994	*pcbm_p->pcbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
1995	*pcbm_p->pcbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
1996	pcbm_p->pcbm_config_header->ch_status_reg =
1997	    pbm_err_p->pcbm_pci.pcmu_cfg_stat;
1998}
1999
2000/*ARGSUSED*/
2001int
2002pcmu_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2003		const void *impl_data, int caller)
2004{
2005	int fatal = 0;
2006	int nonfatal = 0;
2007	int unknown = 0;
2008	uint32_t prierr, secerr;
2009	pcmu_pbm_errstate_t pbm_err;
2010	pcmu_t *pcmu_p = (pcmu_t *)impl_data;
2011	int ret = 0;
2012
2013	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2014	pcmu_pbm_errstate_get(pcmu_p, &pbm_err);
2015
2016	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2017	    fm_ena_generate(0, FM_ENA_FMT1);
2018
2019	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2020	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2021
2022	if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2023		/*
2024		 * For ddi_peek treat all events as nonfatal. We only
2025		 * really call this function so that pcmu_clear_error()
2026		 * and ndi_fm_handler_dispatch() will get called.
2027		 */
2028		nonfatal++;
2029		goto done;
2030	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2031		/*
2032		 * For ddi_poke we can treat as nonfatal if the
2033		 * following conditions are met :
2034		 * 1. Make sure only primary error is MA/TA
2035		 * 2. Make sure no secondary error
2036		 * 3. check pci config header stat reg to see MA/TA is
2037		 *    logged. We cannot verify only MA/TA is recorded
2038		 *    since it gets much more complicated when a
2039		 *    PCI-to-PCI bridge is present.
2040		 */
2041		if ((prierr == PCMU_PCI_AFSR_E_MA) && !secerr &&
2042		    (pbm_err.pcbm_pci.pcmu_cfg_stat & PCI_STAT_R_MAST_AB)) {
2043			nonfatal++;
2044			goto done;
2045		}
2046	}
2047
2048	if (prierr || secerr) {
2049		ret = pcmu_pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2050		if (ret == DDI_FM_FATAL) {
2051			fatal++;
2052		} else {
2053			nonfatal++;
2054		}
2055	}
2056
2057	ret = pcmu_cfg_report(dip, derr, &pbm_err.pcbm_pci, caller, prierr);
2058	if (ret == DDI_FM_FATAL) {
2059		fatal++;
2060	} else if (ret == DDI_FM_NONFATAL) {
2061		nonfatal++;
2062	}
2063
2064done:
2065	if (ret == DDI_FM_FATAL) {
2066		fatal++;
2067	} else if (ret == DDI_FM_NONFATAL) {
2068		nonfatal++;
2069	} else if (ret == DDI_FM_UNKNOWN) {
2070		unknown++;
2071	}
2072
2073	/* Cleanup and reset error bits */
2074	pcmu_clear_error(pcmu_p, &pbm_err);
2075
2076	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2077	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2078}
2079
2080int
2081pcmu_check_error(pcmu_t *pcmu_p)
2082{
2083	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
2084	uint16_t pcmu_cfg_stat;
2085	uint64_t pbm_afsr;
2086
2087	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2088
2089	pcmu_cfg_stat = pcbm_p->pcbm_config_header->ch_status_reg;
2090	pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2091
2092	if ((pcmu_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2093	    PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2094	    PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2095	    (PBM_AFSR_TO_PRIERR(pbm_afsr))) {
2096		return (1);
2097	}
2098	return (0);
2099
2100}
2101
2102/*
2103 * Function used to gather PBM/PCI error state for the
2104 * pcmu_pbm_err_handler. This function must be called while pcmu_err_mutex
2105 * is held.
2106 */
2107static void
2108pcmu_pbm_errstate_get(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
2109{
2110	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
2111
2112	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2113	bzero(pbm_err_p, sizeof (pcmu_pbm_errstate_t));
2114
2115	/*
2116	 * Capture all pbm error state for later logging
2117	 */
2118	pbm_err_p->pbm_bridge_type = PCI_OPLCMU;	/* RAGS */
2119	pbm_err_p->pcbm_pci.pcmu_cfg_stat =
2120	    pcbm_p->pcbm_config_header->ch_status_reg;
2121	pbm_err_p->pbm_ctl_stat = *pcbm_p->pcbm_ctrl_reg;
2122	pbm_err_p->pcbm_pci.pcmu_cfg_comm =
2123	    pcbm_p->pcbm_config_header->ch_command_reg;
2124	pbm_err_p->pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2125	pbm_err_p->pbm_afar = *pcbm_p->pcbm_async_flt_addr_reg;
2126	pbm_err_p->pcbm_pci.pcmu_pa = *pcbm_p->pcbm_async_flt_addr_reg;
2127}
2128
2129static void
2130pcmu_pbm_clear_error(pcmu_pbm_t *pcbm_p)
2131{
2132	uint64_t pbm_afsr;
2133
2134	/*
2135	 * for poke() support - called from POKE_FLUSH. Spin waiting
2136	 * for MA, TA or SERR to be cleared by a pcmu_pbm_error_intr().
2137	 * We have to wait for SERR too in case the device is beyond
2138	 * a pci-pci bridge.
2139	 */
2140	pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2141	while (((pbm_afsr >> PCMU_PCI_AFSR_PE_SHIFT) &
2142	    (PCMU_PCI_AFSR_E_MA | PCMU_PCI_AFSR_E_TA))) {
2143		pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2144	}
2145}
2146
2147void
2148pcmu_err_create(pcmu_t *pcmu_p)
2149{
2150	/*
2151	 * PCI detected ECC errorq, to schedule async handling
2152	 * of ECC errors and logging.
2153	 * The errorq is created here but destroyed when _fini is called
2154	 * for the pci module.
2155	 */
2156	if (pcmu_ecc_queue == NULL) {
2157		pcmu_ecc_queue = errorq_create("pcmu_ecc_queue",
2158		    (errorq_func_t)pcmu_ecc_err_drain,
2159		    (void *)NULL,
2160		    ECC_MAX_ERRS, sizeof (pcmu_ecc_errstate_t),
2161		    PIL_2, ERRORQ_VITAL);
2162		if (pcmu_ecc_queue == NULL)
2163			panic("failed to create required system error queue");
2164	}
2165
2166	/*
2167	 * Initialize error handling mutex.
2168	 */
2169	mutex_init(&pcmu_p->pcmu_err_mutex, NULL, MUTEX_DRIVER,
2170	    (void *)pcmu_p->pcmu_fm_ibc);
2171}
2172
2173void
2174pcmu_err_destroy(pcmu_t *pcmu_p)
2175{
2176	mutex_destroy(&pcmu_p->pcmu_err_mutex);
2177}
2178
2179/*
2180 * Function used to post PCI block module specific ereports.
2181 */
2182void
2183pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena,
2184    pcmu_pbm_errstate_t *pbm_err)
2185{
2186	char *aux_msg;
2187	uint32_t prierr, secerr;
2188	pcmu_t *pcmu_p;
2189	int instance = ddi_get_instance(dip);
2190
2191	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2192
2193	pcmu_p = get_pcmu_soft_state(instance);
2194	prierr = PBM_AFSR_TO_PRIERR(pbm_err->pbm_afsr);
2195	secerr = PBM_AFSR_TO_SECERR(pbm_err->pbm_afsr);
2196	if (prierr)
2197		aux_msg = "PCI primary error: Master Abort";
2198	else if (secerr)
2199		aux_msg = "PCI secondary error: Master Abort";
2200	else
2201		aux_msg = "";
2202	cmn_err(CE_WARN, "%s %s: %s %s=0x%lx, %s=0x%lx, %s=0x%lx %s=0x%x",
2203	    (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str,
2204	    (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str,
2205	    aux_msg,
2206	    PCI_PBM_AFAR, pbm_err->pbm_afar,
2207	    PCI_PBM_AFSR, pbm_err->pbm_afsr,
2208	    PCI_PBM_CSR, pbm_err->pbm_ctl_stat,
2209	    "portid", pcmu_p->pcmu_id);
2210}
2211