1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * PCI PBM implementation:
30 *	initialization
31 *	Bus error interrupt handler
32 */
33
34#include <sys/types.h>
35#include <sys/kmem.h>
36#include <sys/spl.h>
37#include <sys/sysmacros.h>
38#include <sys/sunddi.h>
39#include <sys/fm/protocol.h>
40#include <sys/fm/util.h>
41#include <sys/machsystm.h>	/* ldphysio() */
42#include <sys/async.h>
43#include <sys/ddi_impldefs.h>
44#include <sys/ontrap.h>
45#include <sys/pci/pci_obj.h>
46#include <sys/membar.h>
47#include <sys/ivintr.h>
48
49/*LINTLIBRARY*/
50
51static uint_t pbm_error_intr(caddr_t a);
52
53/* The nexus interrupt priority values */
54int pci_pil[] = {14, 14, 14, 14, 14, 14};
55void
56pbm_create(pci_t *pci_p)
57{
58	pbm_t *pbm_p;
59	int i, len;
60	int nrange = pci_p->pci_ranges_length / sizeof (pci_ranges_t);
61	dev_info_t *dip = pci_p->pci_dip;
62	pci_ranges_t *rangep = pci_p->pci_ranges;
63	uint64_t base_addr, last_addr;
64
65#ifdef lint
66	dip = dip;
67#endif
68
69	/*
70	 * Allocate a state structure for the PBM and cross-link it
71	 * to its per pci node state structure.
72	 */
73	pbm_p = (pbm_t *)kmem_zalloc(sizeof (pbm_t), KM_SLEEP);
74	pci_p->pci_pbm_p = pbm_p;
75	pbm_p->pbm_pci_p = pci_p;
76
77	len = snprintf(pbm_p->pbm_nameinst_str,
78		sizeof (pbm_p->pbm_nameinst_str),
79		"%s%d", NAMEINST(dip));
80	pbm_p->pbm_nameaddr_str = pbm_p->pbm_nameinst_str + ++len;
81	(void) snprintf(pbm_p->pbm_nameaddr_str,
82		sizeof (pbm_p->pbm_nameinst_str) - len,
83		"%s@%s", NAMEADDR(dip));
84
85	pci_pbm_setup(pbm_p);
86
87	/*
88	 * Get this pbm's mem32 and mem64 segments to determine whether
89	 * a dma object originates from ths pbm. i.e. dev to dev dma
90	 */
91	/* Init all of our boundaries */
92	base_addr = -1ull;
93	last_addr = 0ull;
94
95	for (i = 0; i < nrange; i++, rangep++) {
96		uint32_t rng_type = rangep->child_high & PCI_ADDR_MASK;
97		if (rng_type == PCI_ADDR_MEM32 || rng_type == PCI_ADDR_MEM64) {
98			uint64_t rng_addr, rng_size;
99
100			rng_addr = (uint64_t)rangep->parent_high << 32;
101			rng_addr |= (uint64_t)rangep->parent_low;
102			rng_size = (uint64_t)rangep->size_high << 32;
103			rng_size |= (uint64_t)rangep->size_low;
104			base_addr = MIN(rng_addr, base_addr);
105			last_addr = MAX(rng_addr + rng_size, last_addr);
106		}
107	}
108	pbm_p->pbm_base_pfn = mmu_btop(base_addr);
109	pbm_p->pbm_last_pfn = mmu_btop(last_addr);
110
111	DEBUG4(DBG_ATTACH, dip,
112		"pbm_create: ctrl=%x, afsr=%x, afar=%x, diag=%x\n",
113		pbm_p->pbm_ctrl_reg, pbm_p->pbm_async_flt_status_reg,
114		pbm_p->pbm_async_flt_addr_reg, pbm_p->pbm_diag_reg);
115	DEBUG1(DBG_ATTACH, dip, "pbm_create: conf=%x\n",
116		pbm_p->pbm_config_header);
117
118	/*
119	 * Register a function to disable pbm error interrupts during a panic.
120	 */
121	bus_func_register(BF_TYPE_ERRDIS,
122	    (busfunc_t)pbm_disable_pci_errors, pbm_p);
123
124	/*
125	 * create the interrupt-priorities property if it doesn't
126	 * already exist to provide a hint as to the PIL level for
127	 * our interrupt.
128	 */
129	if (ddi_getproplen(DDI_DEV_T_ANY, dip,
130	    DDI_PROP_DONTPASS, "interrupt-priorities",
131	    &len) != DDI_PROP_SUCCESS) {
132				/* Create the interrupt-priorities property. */
133		(void) ddi_prop_create(DDI_DEV_T_NONE, dip,
134		    DDI_PROP_CANSLEEP, "interrupt-priorities",
135		    (caddr_t)pci_pil, sizeof (pci_pil));
136	}
137
138	pbm_configure(pbm_p);
139
140	/*
141	 * Determine if we need to apply the Sun Fire 15k AXQ/PIO
142	 * workaround.
143	 */
144	pci_axq_pio_limit(pbm_p);
145}
146
147int
148pbm_register_intr(pbm_t *pbm_p)
149{
150	pci_t		*pci_p = pbm_p->pbm_pci_p;
151	uint32_t	mondo;
152	int		r = DDI_SUCCESS;
153
154	ib_nintr_clear(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_PBM]);
155
156	/*
157	 * Install the PCI error interrupt handler.
158	 */
159	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_PBM]);
160	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
161
162	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_PBM], (intrfunc)pbm_error_intr,
163	    (caddr_t)pci_p, NULL, NULL) == 0);
164
165	pbm_p->pbm_iblock_cookie = (void *)(uintptr_t)pci_pil[CBNINTR_PBM];
166
167	/*
168	 * Create the pokefault mutex at the PIL below the error interrupt.
169	 */
170	mutex_init(&pbm_p->pbm_pokefault_mutex, NULL, MUTEX_DRIVER,
171	    (void *)(uintptr_t)ipltospl(spltoipl(
172	    (int)(uintptr_t)pbm_p->pbm_iblock_cookie) - 1));
173
174	if (!r)
175		r = pci_pbm_add_intr(pci_p);
176	return (PCI_ATTACH_RETCODE(PCI_PBM_OBJ, PCI_OBJ_INTR_ADD, r));
177}
178
179void
180pbm_destroy(pci_t *pci_p)
181{
182	pbm_t		*pbm_p = pci_p->pci_pbm_p;
183	ib_t		*ib_p = pci_p->pci_ib_p;
184	uint32_t	mondo;
185
186	DEBUG0(DBG_DETACH, pci_p->pci_dip, "pbm_destroy:\n");
187
188	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_PBM]);
189	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
190
191	/*
192	 * Free the pokefault mutex.
193	 */
194	mutex_destroy(&pbm_p->pbm_pokefault_mutex);
195
196	/*
197	 * Remove the error interrupt and consistent dma sync handler.
198	 */
199	intr_dist_rem(pbm_intr_dist, pbm_p);
200	pci_pbm_rem_intr(pci_p);
201	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_WAIT);
202	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_PBM]) == 0);
203
204	/*
205	 * Remove the error disable function.
206	 */
207	bus_func_unregister(BF_TYPE_ERRDIS,
208	    (busfunc_t)pbm_disable_pci_errors, pbm_p);
209
210	pci_pbm_teardown(pbm_p);
211
212	/*
213	 * Free the pbm state structure.
214	 */
215	kmem_free(pbm_p, sizeof (pbm_t));
216	pci_p->pci_pbm_p = NULL;
217}
218
219static uint_t
220pbm_error_intr(caddr_t a)
221{
222	pci_t *pci_p = (pci_t *)a;
223	pbm_t *pbm_p = pci_p->pci_pbm_p;
224	ddi_fm_error_t derr;
225	int err = DDI_FM_OK;
226	on_trap_data_t *otp = pbm_p->pbm_ontrap_data;
227
228	bzero(&derr, sizeof (ddi_fm_error_t));
229	derr.fme_version = DDI_FME_VERSION;
230	mutex_enter(&pci_p->pci_common_p->pci_fm_mutex);
231	if (pbm_p->pbm_excl_handle != NULL) {
232		/*
233		 * cautious write protection, protected from all errors.
234		 */
235		ASSERT(MUTEX_HELD(&pbm_p->pbm_pokefault_mutex));
236		ddi_fm_acc_err_get(pbm_p->pbm_excl_handle, &derr,
237				DDI_FME_VERSION);
238		ASSERT(derr.fme_flag == DDI_FM_ERR_EXPECTED);
239		derr.fme_acc_handle = pbm_p->pbm_excl_handle;
240		err = pci_pbm_err_handler(pci_p->pci_dip, &derr, (void *)pci_p,
241		    PCI_INTR_CALL);
242	} else if ((otp != NULL) && (otp->ot_prot & OT_DATA_ACCESS)) {
243		/*
244		 * ddi_poke protection, check nexus and children for
245		 * expected errors.
246		 */
247		otp->ot_trap |= OT_DATA_ACCESS;
248		membar_sync();
249		derr.fme_flag = DDI_FM_ERR_POKE;
250		err = pci_pbm_err_handler(pci_p->pci_dip, &derr, (void *)pci_p,
251				PCI_INTR_CALL);
252	} else if (pci_check_error(pci_p) != 0) {
253		/*
254		 * unprotected error, check for all errors.
255		 */
256		if (pci_errtrig_pa)
257			(void) ldphysio(pci_errtrig_pa);
258		derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
259		err = pci_pbm_err_handler(pci_p->pci_dip, &derr, (void *)pci_p,
260				PCI_INTR_CALL);
261	}
262
263	if (err == DDI_FM_FATAL) {
264		if (pci_panic_on_fatal_errors) {
265			mutex_exit(&pci_p->pci_common_p->pci_fm_mutex);
266			fm_panic("%s-%d: Fatal PCI bus error(s)\n",
267				ddi_driver_name(pci_p->pci_dip),
268				ddi_get_instance(pci_p->pci_dip));
269		}
270	}
271
272	mutex_exit(&pci_p->pci_common_p->pci_fm_mutex);
273	ib_nintr_clear(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_PBM]);
274	return (DDI_INTR_CLAIMED);
275}
276
277void
278pbm_suspend(pbm_t *pbm_p)
279{
280	pci_t *pci_p = pbm_p->pbm_pci_p;
281	ib_ino_t ino = pci_p->pci_inos[CBNINTR_PBM];
282	pbm_p->pbm_imr_save = *ib_intr_map_reg_addr(pci_p->pci_ib_p, ino);
283
284	pci_pbm_suspend(pci_p);
285}
286
287void
288pbm_resume(pbm_t *pbm_p)
289{
290	pci_t *pci_p = pbm_p->pbm_pci_p;
291	ib_ino_t ino = pci_p->pci_inos[CBNINTR_PBM];
292
293	ib_nintr_clear(pci_p->pci_ib_p, ino);
294	*ib_intr_map_reg_addr(pci_p->pci_ib_p, ino) = pbm_p->pbm_imr_save;
295
296	pci_pbm_resume(pci_p);
297}
298
299void
300pbm_intr_dist(void *arg)
301{
302	pbm_t *pbm_p = (pbm_t *)arg;
303	pci_t *pci_p = pbm_p->pbm_pci_p;
304	ib_t *ib_p = pci_p->pci_ib_p;
305	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[CBNINTR_PBM]);
306
307	mutex_enter(&ib_p->ib_intr_lock);
308	ib_intr_dist_nintr(ib_p, ino, ib_intr_map_reg_addr(ib_p, ino));
309	pci_pbm_intr_dist(pbm_p);
310	mutex_exit(&ib_p->ib_intr_lock);
311}
312
313/*
314 * Function used to log PBM AFSR register bits and to lookup and fault
315 * handle associated with PBM AFAR register. Called by pci_pbm_err_handler with
316 * pci_fm_mutex held.
317 */
318int
319pbm_afsr_report(dev_info_t *dip, uint64_t fme_ena, pbm_errstate_t *pbm_err_p)
320{
321	int fatal = 0;
322	int ret = 0;
323	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
324	pci_common_t *cmn_p = pci_p->pci_common_p;
325
326	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
327
328	pbm_err_p->pbm_pri = PBM_PRIMARY;
329	(void) pci_pbm_classify(pbm_err_p);
330
331	pci_format_addr(dip, &pbm_err_p->pbm_pci.pci_pa, pbm_err_p->pbm_afsr);
332
333	if (pbm_err_p->pbm_log == FM_LOG_PBM)
334		pbm_ereport_post(dip, fme_ena, pbm_err_p);
335
336	/*
337	 * Lookup and fault errant handle
338	 */
339	if (((ret = ndi_fmc_error(dip, NULL, ACC_HANDLE, fme_ena,
340	    (void *)&pbm_err_p->pbm_pci.pci_pa)) == DDI_FM_FATAL) ||
341	    (ret == DDI_FM_UNKNOWN))
342		fatal++;
343
344	/*
345	 * queue target ereport if appropriate
346	 */
347	if (pbm_err_p->pbm_terr_class)
348		pci_target_enqueue(fme_ena, pbm_err_p->pbm_terr_class,
349		    (pbm_err_p->pbm_log == FM_LOG_PCI) ? "pci" :
350		    pbm_err_p->pbm_bridge_type, pbm_err_p->pbm_pci.pci_pa);
351
352	/*
353	 * We are currently not dealing with the multiple error
354	 * case, for any secondary errors we will panic.
355	 */
356	pbm_err_p->pbm_pri = PBM_SECONDARY;
357	if (pci_pbm_classify(pbm_err_p)) {
358		fatal++;
359		if (pbm_err_p->pbm_log == FM_LOG_PBM)
360			pbm_ereport_post(dip, fme_ena, pbm_err_p);
361	}
362
363	if (fatal)
364		return (DDI_FM_FATAL);
365
366	return (DDI_FM_NONFATAL);
367}
368