1/*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
14 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
16 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 * $Id: sbutils.c 401759 2013-05-13 16:08:08Z $
20 */
21
22#include <bcm_cfg.h>
23#include <typedefs.h>
24#include <bcmdefs.h>
25#include <osl.h>
26#include <bcmutils.h>
27#include <siutils.h>
28#include <bcmdevs.h>
29#include <hndsoc.h>
30#include <sbchipc.h>
31#include <pci_core.h>
32#include <pcicfg.h>
33#include <sbpcmcia.h>
34
35#include "siutils_priv.h"
36
37
38/* local prototypes */
39static uint _sb_coreidx(si_info_t *sii, uint32 sba);
40static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
41                     uint ncores);
42static uint32 _sb_coresba(si_info_t *sii);
43static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
44
45#define	SET_SBREG(sii, r, mask, val)	\
46		W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
47#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
48
49/* sonicsrev */
50#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
51#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
52
53#define	R_SBREG(sii, sbr)	sb_read_sbreg((sii), (sbr))
54#define	W_SBREG(sii, sbr, v)	sb_write_sbreg((sii), (sbr), (v))
55#define	AND_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
56#define	OR_SBREG(sii, sbr, v)	W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
57
58static uint32
59sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
60{
61	uint8 tmp;
62	uint32 val, intr_val = 0;
63
64
65	/*
66	 * compact flash only has 11 bits address, while we needs 12 bits address.
67	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
68	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
69	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
70	 */
71	if (PCMCIA(sii)) {
72		INTR_OFF(sii, intr_val);
73		tmp = 1;
74		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
75		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
76	}
77
78	val = R_REG(sii->osh, sbr);
79
80	if (PCMCIA(sii)) {
81		tmp = 0;
82		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
83		INTR_RESTORE(sii, intr_val);
84	}
85
86	return (val);
87}
88
89static void
90sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
91{
92	uint8 tmp;
93	volatile uint32 dummy;
94	uint32 intr_val = 0;
95
96
97	/*
98	 * compact flash only has 11 bits address, while we needs 12 bits address.
99	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
100	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
101	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
102	 */
103	if (PCMCIA(sii)) {
104		INTR_OFF(sii, intr_val);
105		tmp = 1;
106		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
107		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
108	}
109
110	if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
111#ifdef IL_BIGENDIAN
112		dummy = R_REG(sii->osh, sbr);
113		BCM_REFERENCE(dummy);
114		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
115		dummy = R_REG(sii->osh, sbr);
116		BCM_REFERENCE(dummy);
117		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
118#else
119		dummy = R_REG(sii->osh, sbr);
120		BCM_REFERENCE(dummy);
121		W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
122		dummy = R_REG(sii->osh, sbr);
123		BCM_REFERENCE(dummy);
124		W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
125#endif	/* IL_BIGENDIAN */
126	} else
127		W_REG(sii->osh, sbr, v);
128
129	if (PCMCIA(sii)) {
130		tmp = 0;
131		OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
132		INTR_RESTORE(sii, intr_val);
133	}
134}
135
136uint
137sb_coreid(si_t *sih)
138{
139	si_info_t *sii;
140	sbconfig_t *sb;
141
142	sii = SI_INFO(sih);
143	sb = REGS2SB(sii->curmap);
144
145	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
146}
147
148uint
149sb_intflag(si_t *sih)
150{
151	si_info_t *sii;
152	void *corereg;
153	sbconfig_t *sb;
154	uint origidx, intflag, intr_val = 0;
155
156	sii = SI_INFO(sih);
157
158	INTR_OFF(sii, intr_val);
159	origidx = si_coreidx(sih);
160	corereg = si_setcore(sih, CC_CORE_ID, 0);
161	ASSERT(corereg != NULL);
162	sb = REGS2SB(corereg);
163	intflag = R_SBREG(sii, &sb->sbflagst);
164	sb_setcoreidx(sih, origidx);
165	INTR_RESTORE(sii, intr_val);
166
167	return intflag;
168}
169
170uint
171sb_flag(si_t *sih)
172{
173	si_info_t *sii;
174	sbconfig_t *sb;
175
176	sii = SI_INFO(sih);
177	sb = REGS2SB(sii->curmap);
178
179	return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
180}
181
182void
183sb_setint(si_t *sih, int siflag)
184{
185	si_info_t *sii;
186	sbconfig_t *sb;
187	uint32 vec;
188
189	sii = SI_INFO(sih);
190	sb = REGS2SB(sii->curmap);
191
192	if (siflag == -1)
193		vec = 0;
194	else
195		vec = 1 << siflag;
196	W_SBREG(sii, &sb->sbintvec, vec);
197}
198
199/* return core index of the core with address 'sba' */
200static uint
201BCMATTACHFN(_sb_coreidx)(si_info_t *sii, uint32 sba)
202{
203	uint i;
204
205	for (i = 0; i < sii->numcores; i ++)
206		if (sba == sii->coresba[i])
207			return i;
208	return BADIDX;
209}
210
211/* return core address of the current core */
212static uint32
213BCMATTACHFN(_sb_coresba)(si_info_t *sii)
214{
215	uint32 sbaddr;
216
217
218	switch (BUSTYPE(sii->pub.bustype)) {
219	case SI_BUS: {
220		sbconfig_t *sb = REGS2SB(sii->curmap);
221		sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
222		break;
223	}
224
225	case PCI_BUS:
226		sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
227		break;
228
229	case PCMCIA_BUS: {
230		uint8 tmp = 0;
231		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
232		sbaddr  = (uint32)tmp << 12;
233		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
234		sbaddr |= (uint32)tmp << 16;
235		OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
236		sbaddr |= (uint32)tmp << 24;
237		break;
238	}
239
240
241#ifdef BCMJTAG
242	case JTAG_BUS:
243		sbaddr = (uint32)(uintptr)sii->curmap;
244		break;
245#endif
246
247	default:
248		sbaddr = BADCOREADDR;
249		break;
250	}
251
252	return sbaddr;
253}
254
255uint
256sb_corevendor(si_t *sih)
257{
258	si_info_t *sii;
259	sbconfig_t *sb;
260
261	sii = SI_INFO(sih);
262	sb = REGS2SB(sii->curmap);
263
264	return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
265}
266
267uint
268sb_corerev(si_t *sih)
269{
270	si_info_t *sii;
271	sbconfig_t *sb;
272	uint sbidh;
273
274	sii = SI_INFO(sih);
275	sb = REGS2SB(sii->curmap);
276	sbidh = R_SBREG(sii, &sb->sbidhigh);
277
278	return (SBCOREREV(sbidh));
279}
280
281/* set core-specific control flags */
282void
283sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
284{
285	si_info_t *sii;
286	sbconfig_t *sb;
287	uint32 w;
288
289	sii = SI_INFO(sih);
290	sb = REGS2SB(sii->curmap);
291
292	ASSERT((val & ~mask) == 0);
293
294	/* mask and set */
295	w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
296	        (val << SBTML_SICF_SHIFT);
297	W_SBREG(sii, &sb->sbtmstatelow, w);
298}
299
300/* set/clear core-specific control flags */
301uint32
302sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
303{
304	si_info_t *sii;
305	sbconfig_t *sb;
306	uint32 w;
307
308	sii = SI_INFO(sih);
309	sb = REGS2SB(sii->curmap);
310
311	ASSERT((val & ~mask) == 0);
312
313	/* mask and set */
314	if (mask || val) {
315		w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
316		        (val << SBTML_SICF_SHIFT);
317		W_SBREG(sii, &sb->sbtmstatelow, w);
318	}
319
320	/* return the new value
321	 * for write operation, the following readback ensures the completion of write opration.
322	 */
323	return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
324}
325
326/* set/clear core-specific status flags */
327uint32
328sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
329{
330	si_info_t *sii;
331	sbconfig_t *sb;
332	uint32 w;
333
334	sii = SI_INFO(sih);
335	sb = REGS2SB(sii->curmap);
336
337	ASSERT((val & ~mask) == 0);
338	ASSERT((mask & ~SISF_CORE_BITS) == 0);
339
340	/* mask and set */
341	if (mask || val) {
342		w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
343		        (val << SBTMH_SISF_SHIFT);
344		W_SBREG(sii, &sb->sbtmstatehigh, w);
345	}
346
347	/* return the new value */
348	return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
349}
350
351bool
352sb_iscoreup(si_t *sih)
353{
354	si_info_t *sii;
355	sbconfig_t *sb;
356
357	sii = SI_INFO(sih);
358	sb = REGS2SB(sii->curmap);
359
360	return ((R_SBREG(sii, &sb->sbtmstatelow) &
361	         (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
362	        (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
363}
364
365/*
366 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
367 * switch back to the original core, and return the new value.
368 *
369 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
370 *
371 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
372 * and (on newer pci cores) chipcommon registers.
373 */
374uint
375sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
376{
377	uint origidx = 0;
378	uint32 *r = NULL;
379	uint w;
380	uint intr_val = 0;
381	bool fast = FALSE;
382	si_info_t *sii;
383
384	sii = SI_INFO(sih);
385
386	ASSERT(GOODIDX(coreidx));
387	ASSERT(regoff < SI_CORE_SIZE);
388	ASSERT((val & ~mask) == 0);
389
390	if (coreidx >= SI_MAXCORES)
391		return 0;
392
393	if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
394		/* If internal bus, we can always get at everything */
395		fast = TRUE;
396		/* map if does not exist */
397		if (!sii->regs[coreidx]) {
398			sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
399			                            SI_CORE_SIZE);
400			ASSERT(GOODREGS(sii->regs[coreidx]));
401		}
402		r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
403	} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
404		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
405
406		if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
407			/* Chipc registers are mapped at 12KB */
408
409			fast = TRUE;
410			r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
411		} else if (sii->pub.buscoreidx == coreidx) {
412			/* pci registers are at either in the last 2KB of an 8KB window
413			 * or, in pcie and pci rev 13 at 8KB
414			 */
415			fast = TRUE;
416			if (SI_FAST(sii))
417				r = (uint32 *)((char *)sii->curmap +
418				               PCI_16KB0_PCIREGS_OFFSET + regoff);
419			else
420				r = (uint32 *)((char *)sii->curmap +
421				               ((regoff >= SBCONFIGOFF) ?
422				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
423				               regoff);
424		}
425	}
426
427	if (!fast) {
428		INTR_OFF(sii, intr_val);
429
430		/* save current core index */
431		origidx = si_coreidx(&sii->pub);
432
433		/* switch core */
434		r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
435	}
436	ASSERT(r != NULL);
437
438	/* mask and set */
439	if (mask || val) {
440		if (regoff >= SBCONFIGOFF) {
441			w = (R_SBREG(sii, r) & ~mask) | val;
442			W_SBREG(sii, r, w);
443		} else {
444			w = (R_REG(sii->osh, r) & ~mask) | val;
445			W_REG(sii->osh, r, w);
446		}
447	}
448
449	/* readback */
450	if (regoff >= SBCONFIGOFF)
451		w = R_SBREG(sii, r);
452	else {
453		if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
454		    (coreidx == SI_CC_IDX) &&
455		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
456			w = val;
457		} else
458			w = R_REG(sii->osh, r);
459	}
460
461	if (!fast) {
462		/* restore core index */
463		if (origidx != coreidx)
464			sb_setcoreidx(&sii->pub, origidx);
465
466		INTR_RESTORE(sii, intr_val);
467	}
468
469	return (w);
470}
471
472/* Scan the enumeration space to find all cores starting from the given
473 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
474 * is the default core address at chip POR time and 'regs' is the virtual
475 * address that the default core is mapped at. 'ncores' is the number of
476 * cores expected on bus 'sbba'. It returns the total number of cores
477 * starting from bus 'sbba', inclusive.
478 */
479#define SB_MAXBUSES	2
480static uint
481BCMATTACHFN(_sb_scan)(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
482{
483	uint next;
484	uint ncc = 0;
485	uint i;
486
487	if (bus >= SB_MAXBUSES) {
488		SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
489		return 0;
490	}
491	SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
492
493	/* Scan all cores on the bus starting from core 0.
494	 * Core addresses must be contiguous on each bus.
495	 */
496	for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
497		sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
498
499		/* keep and reuse the initial register mapping */
500		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
501			SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
502			sii->regs[next] = regs;
503		}
504
505		/* change core to 'next' and read its coreid */
506		sii->curmap = _sb_setcoreidx(sii, next);
507		sii->curidx = next;
508
509		sii->coreid[next] = sb_coreid(&sii->pub);
510
511		/* core specific processing... */
512		/* chipc provides # cores */
513		if (sii->coreid[next] == CC_CORE_ID) {
514			chipcregs_t *cc = (chipcregs_t *)sii->curmap;
515			uint32 ccrev = sb_corerev(&sii->pub);
516
517			/* determine numcores - this is the total # cores in the chip */
518			if (((ccrev == 4) || (ccrev >= 6)))
519				numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
520				        CID_CC_SHIFT;
521			else {
522				/* Older chips */
523				uint chip = CHIPID(sii->pub.chip);
524
525				if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
526					numcores = 6;
527				else if (chip == BCM4704_CHIP_ID)
528					numcores = 9;
529				else if (chip == BCM5365_CHIP_ID)
530					numcores = 7;
531				else {
532					SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
533					          chip));
534					ASSERT(0);
535					numcores = 1;
536				}
537			}
538			SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
539				sii->pub.issim ? "QT" : ""));
540		}
541		/* scan bridged SB(s) and add results to the end of the list */
542		else if (sii->coreid[next] == OCP_CORE_ID) {
543			sbconfig_t *sb = REGS2SB(sii->curmap);
544			uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
545			uint nsbcc;
546
547			sii->numcores = next + 1;
548
549			if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
550				continue;
551			nsbba &= 0xfffff000;
552			if (_sb_coreidx(sii, nsbba) != BADIDX)
553				continue;
554
555			nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
556			nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
557			if (sbba == SI_ENUM_BASE)
558				numcores -= nsbcc;
559			ncc += nsbcc;
560		}
561	}
562
563	SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
564
565	sii->numcores = i + ncc;
566	return sii->numcores;
567}
568
569/* scan the sb enumerated space to identify all cores */
570void
571BCMATTACHFN(sb_scan)(si_t *sih, void *regs, uint devid)
572{
573	si_info_t *sii;
574	uint32 origsba;
575	sbconfig_t *sb;
576
577	sii = SI_INFO(sih);
578	sb = REGS2SB(sii->curmap);
579
580	sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
581
582	/* Save the current core info and validate it later till we know
583	 * for sure what is good and what is bad.
584	 */
585	origsba = _sb_coresba(sii);
586
587	/* scan all SB(s) starting from SI_ENUM_BASE */
588	sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
589}
590
591/*
592 * This function changes logical "focus" to the indicated core;
593 * must be called with interrupts off.
594 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
595 */
596void *
597sb_setcoreidx(si_t *sih, uint coreidx)
598{
599	si_info_t *sii;
600
601	sii = SI_INFO(sih);
602
603	if (coreidx >= sii->numcores)
604		return (NULL);
605
606	/*
607	 * If the user has provided an interrupt mask enabled function,
608	 * then assert interrupts are disabled before switching the core.
609	 */
610	ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
611
612	sii->curmap = _sb_setcoreidx(sii, coreidx);
613	sii->curidx = coreidx;
614
615	return (sii->curmap);
616}
617
618/* This function changes the logical "focus" to the indicated core.
619 * Return the current core's virtual address.
620 */
621static void *
622_sb_setcoreidx(si_info_t *sii, uint coreidx)
623{
624	uint32 sbaddr = sii->coresba[coreidx];
625	void *regs;
626
627	switch (BUSTYPE(sii->pub.bustype)) {
628	case SI_BUS:
629		/* map new one */
630		if (!sii->regs[coreidx]) {
631			sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
632			ASSERT(GOODREGS(sii->regs[coreidx]));
633		}
634		regs = sii->regs[coreidx];
635		break;
636
637	case PCI_BUS:
638		/* point bar0 window */
639		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
640		regs = sii->curmap;
641		break;
642
643	case PCMCIA_BUS: {
644		uint8 tmp = (sbaddr >> 12) & 0x0f;
645		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
646		tmp = (sbaddr >> 16) & 0xff;
647		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
648		tmp = (sbaddr >> 24) & 0xff;
649		OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
650		regs = sii->curmap;
651		break;
652	}
653
654#ifdef BCMJTAG
655	case JTAG_BUS:
656		/* map new one */
657		if (!sii->regs[coreidx]) {
658			sii->regs[coreidx] = (void *)(uintptr)sbaddr;
659			ASSERT(GOODREGS(sii->regs[coreidx]));
660		}
661		regs = sii->regs[coreidx];
662		break;
663#endif	/* BCMJTAG */
664
665	default:
666		ASSERT(0);
667		regs = NULL;
668		break;
669	}
670
671	return regs;
672}
673
674/* Return the address of sbadmatch0/1/2/3 register */
675static volatile uint32 *
676sb_admatch(si_info_t *sii, uint asidx)
677{
678	sbconfig_t *sb;
679	volatile uint32 *addrm;
680
681	sb = REGS2SB(sii->curmap);
682
683	switch (asidx) {
684	case 0:
685		addrm =  &sb->sbadmatch0;
686		break;
687
688	case 1:
689		addrm =  &sb->sbadmatch1;
690		break;
691
692	case 2:
693		addrm =  &sb->sbadmatch2;
694		break;
695
696	case 3:
697		addrm =  &sb->sbadmatch3;
698		break;
699
700	default:
701		SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
702		return 0;
703	}
704
705	return (addrm);
706}
707
708/* Return the number of address spaces in current core */
709int
710sb_numaddrspaces(si_t *sih)
711{
712	si_info_t *sii;
713	sbconfig_t *sb;
714
715	sii = SI_INFO(sih);
716	sb = REGS2SB(sii->curmap);
717
718	/* + 1 because of enumeration space */
719	return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
720}
721
722/* Return the address of the nth address space in the current core */
723uint32
724sb_addrspace(si_t *sih, uint asidx)
725{
726	si_info_t *sii;
727
728	sii = SI_INFO(sih);
729
730	return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
731}
732
733/* Return the size of the nth address space in the current core */
734uint32
735sb_addrspacesize(si_t *sih, uint asidx)
736{
737	si_info_t *sii;
738
739	sii = SI_INFO(sih);
740
741	return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
742}
743
744#if defined(BCMASSERT_SUPPORT) || defined(BCMDBG_DUMP)
745/* traverse all cores to find and clear source of serror */
746static void
747sb_serr_clear(si_info_t *sii)
748{
749	sbconfig_t *sb;
750	uint origidx;
751	uint i, intr_val = 0;
752	void *corereg = NULL;
753
754	INTR_OFF(sii, intr_val);
755	origidx = si_coreidx(&sii->pub);
756
757	for (i = 0; i < sii->numcores; i++) {
758		corereg = sb_setcoreidx(&sii->pub, i);
759		if (NULL != corereg) {
760			sb = REGS2SB(corereg);
761			if ((R_SBREG(sii, &sb->sbtmstatehigh)) & SBTMH_SERR) {
762				AND_SBREG(sii, &sb->sbtmstatehigh, ~SBTMH_SERR);
763				SI_ERROR(("sb_serr_clear: SError at core 0x%x\n",
764				          sb_coreid(&sii->pub)));
765			}
766		}
767	}
768
769	sb_setcoreidx(&sii->pub, origidx);
770	INTR_RESTORE(sii, intr_val);
771}
772
773/*
774 * Check if any inband, outband or timeout errors has happened and clear them.
775 * Must be called with chip clk on !
776 */
777bool
778sb_taclear(si_t *sih, bool details)
779{
780	si_info_t *sii;
781	sbconfig_t *sb;
782	uint origidx;
783	uint intr_val = 0;
784	bool rc = FALSE;
785	uint32 inband = 0, serror = 0, timeout = 0;
786	void *corereg = NULL;
787	volatile uint32 imstate, tmstate;
788
789	sii = SI_INFO(sih);
790
791	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
792		volatile uint32 stcmd;
793
794		/* inband error is Target abort for PCI */
795		stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32));
796		inband = stcmd & PCI_STAT_TA;
797		if (inband) {
798			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
799		}
800
801		/* serror */
802		stcmd = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32));
803		serror = stcmd & PCI_SBIM_STATUS_SERR;
804		if (serror) {
805			sb_serr_clear(sii);
806			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
807		}
808
809		/* timeout */
810		imstate = sb_corereg(sih, sii->pub.buscoreidx,
811		                     SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
812		if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
813			sb_corereg(sih, sii->pub.buscoreidx,
814			           SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
815			           (imstate & ~(SBIM_IBE | SBIM_TO)));
816			/* inband = imstate & SBIM_IBE; same as TA above */
817			timeout = imstate & SBIM_TO;
818			if (timeout) {
819			}
820		}
821
822		if (inband) {
823			/* dump errlog for sonics >= 2.3 */
824			if (sii->pub.socirev == SONICS_2_2)
825				;
826			else {
827				uint32 imerrlog, imerrloga;
828				imerrlog = sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, 0, 0);
829				if (imerrlog & SBTMEL_EC) {
830					imerrloga = sb_corereg(sih, sii->pub.buscoreidx,
831					                       SBIMERRLOGA, 0, 0);
832					BCM_REFERENCE(imerrloga);
833					/* clear errlog */
834					sb_corereg(sih, sii->pub.buscoreidx, SBIMERRLOG, ~0, 0);
835					SI_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
836						imerrlog, imerrloga));
837				}
838			}
839		}
840
841
842	} else if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
843
844		INTR_OFF(sii, intr_val);
845		origidx = si_coreidx(sih);
846
847		corereg = si_setcore(sih, PCMCIA_CORE_ID, 0);
848		if (NULL != corereg) {
849			sb = REGS2SB(corereg);
850
851			imstate = R_SBREG(sii, &sb->sbimstate);
852			/* handle surprise removal */
853			if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
854				AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
855				inband = imstate & SBIM_IBE;
856				timeout = imstate & SBIM_TO;
857			}
858			tmstate = R_SBREG(sii, &sb->sbtmstatehigh);
859			if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
860				if (!inband) {
861					serror = 1;
862					sb_serr_clear(sii);
863				}
864				OR_SBREG(sii, &sb->sbtmstatelow, SBTML_INT_ACK);
865				AND_SBREG(sii, &sb->sbtmstatelow, ~SBTML_INT_ACK);
866			}
867		}
868		sb_setcoreidx(sih, origidx);
869		INTR_RESTORE(sii, intr_val);
870
871	}
872
873
874	if (inband | timeout | serror) {
875		rc = TRUE;
876		SI_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
877		          inband, serror, timeout));
878	}
879
880	return (rc);
881}
882#endif
883
884/* do buffered registers update */
885void
886sb_commit(si_t *sih)
887{
888	si_info_t *sii;
889	uint origidx;
890	uint intr_val = 0;
891
892	sii = SI_INFO(sih);
893
894	origidx = sii->curidx;
895	ASSERT(GOODIDX(origidx));
896
897	INTR_OFF(sii, intr_val);
898
899	/* switch over to chipcommon core if there is one, else use pci */
900	if (sii->pub.ccrev != NOREV) {
901		chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
902		ASSERT(ccregs != NULL);
903
904		/* do the buffer registers update */
905		W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
906		W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
907	} else if (PCI(sii)) {
908		sbpciregs_t *pciregs = (sbpciregs_t *)si_setcore(sih, PCI_CORE_ID, 0);
909
910		/* do the buffer registers update */
911		W_REG(sii->osh, &pciregs->bcastaddr, SB_COMMIT);
912		W_REG(sii->osh, &pciregs->bcastdata, 0x0);
913	} else
914		ASSERT(0);
915
916	/* restore core index */
917	sb_setcoreidx(sih, origidx);
918	INTR_RESTORE(sii, intr_val);
919}
920
921void
922sb_core_disable(si_t *sih, uint32 bits)
923{
924	si_info_t *sii;
925	volatile uint32 dummy;
926	sbconfig_t *sb;
927
928	sii = SI_INFO(sih);
929
930	ASSERT(GOODREGS(sii->curmap));
931	sb = REGS2SB(sii->curmap);
932
933	/* if core is already in reset, just return */
934	if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
935		return;
936
937	/* if clocks are not enabled, put into reset and return */
938	if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
939		goto disable;
940
941	/* set target reject and spin until busy is clear (preserve core-specific bits) */
942	OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
943	dummy = R_SBREG(sii, &sb->sbtmstatelow);
944	BCM_REFERENCE(dummy);
945	OSL_DELAY(1);
946	SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
947	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
948		SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
949
950	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
951		OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
952		dummy = R_SBREG(sii, &sb->sbimstate);
953		BCM_REFERENCE(dummy);
954		OSL_DELAY(1);
955		SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
956	}
957
958	/* set reset and reject while enabling the clocks */
959	W_SBREG(sii, &sb->sbtmstatelow,
960	        (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
961	         SBTML_REJ | SBTML_RESET));
962	dummy = R_SBREG(sii, &sb->sbtmstatelow);
963	BCM_REFERENCE(dummy);
964	OSL_DELAY(10);
965
966	/* don't forget to clear the initiator reject bit */
967	if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
968		AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
969
970disable:
971	/* leave reset and reject asserted */
972	W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
973	OSL_DELAY(1);
974}
975
976/* reset and re-enable a core
977 * inputs:
978 * bits - core specific bits that are set during and after reset sequence
979 * resetbits - core specific bits that are set only during reset sequence
980 */
981void
982sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
983{
984	si_info_t *sii;
985	sbconfig_t *sb;
986	volatile uint32 dummy;
987
988	sii = SI_INFO(sih);
989	ASSERT(GOODREGS(sii->curmap));
990	sb = REGS2SB(sii->curmap);
991
992	/*
993	 * Must do the disable sequence first to work for arbitrary current core state.
994	 */
995	sb_core_disable(sih, (bits | resetbits));
996
997	/*
998	 * Now do the initialization sequence.
999	 */
1000
1001	/* set reset while enabling the clock and forcing them on throughout the core */
1002	W_SBREG(sii, &sb->sbtmstatelow,
1003	        (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
1004	         SBTML_RESET));
1005	dummy = R_SBREG(sii, &sb->sbtmstatelow);
1006	BCM_REFERENCE(dummy);
1007	OSL_DELAY(1);
1008
1009	if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
1010		W_SBREG(sii, &sb->sbtmstatehigh, 0);
1011	}
1012	if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1013		AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1014	}
1015
1016	/* clear reset and allow it to propagate throughout the core */
1017	W_SBREG(sii, &sb->sbtmstatelow,
1018	        ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
1019	dummy = R_SBREG(sii, &sb->sbtmstatelow);
1020	BCM_REFERENCE(dummy);
1021	OSL_DELAY(1);
1022
1023	/* leave clock enabled */
1024	W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
1025	dummy = R_SBREG(sii, &sb->sbtmstatelow);
1026	BCM_REFERENCE(dummy);
1027	OSL_DELAY(1);
1028}
1029
1030/*
1031 * Set the initiator timeout for the "master core".
1032 * The master core is defined to be the core in control
1033 * of the chip and so it issues accesses to non-memory
1034 * locations (Because of dma *any* core can access memeory).
1035 *
1036 * The routine uses the bus to decide who is the master:
1037 *	SI_BUS => mips
1038 *	JTAG_BUS => chipc
1039 *	PCI_BUS => pci or pcie
1040 *	PCMCIA_BUS => pcmcia
1041 *	SDIO_BUS => pcmcia
1042 *
1043 * This routine exists so callers can disable initiator
1044 * timeouts so accesses to very slow devices like otp
1045 * won't cause an abort. The routine allows arbitrary
1046 * settings of the service and request timeouts, though.
1047 *
1048 * Returns the timeout state before changing it or -1
1049 * on error.
1050 */
1051
1052#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
1053
1054uint32
1055sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
1056{
1057	si_info_t *sii;
1058	uint origidx;
1059	uint intr_val = 0;
1060	uint32 tmp, ret = 0xffffffff;
1061	sbconfig_t *sb;
1062
1063	sii = SI_INFO(sih);
1064
1065	if ((to & ~TO_MASK) != 0)
1066		return ret;
1067
1068	/* Figure out the master core */
1069	if (idx == BADIDX) {
1070		switch (BUSTYPE(sii->pub.bustype)) {
1071		case PCI_BUS:
1072			idx = sii->pub.buscoreidx;
1073			break;
1074		case JTAG_BUS:
1075			idx = SI_CC_IDX;
1076			break;
1077		case PCMCIA_BUS:
1078			idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
1079			break;
1080		case SI_BUS:
1081			idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
1082			break;
1083		default:
1084			ASSERT(0);
1085		}
1086		if (idx == BADIDX)
1087			return ret;
1088	}
1089
1090	INTR_OFF(sii, intr_val);
1091	origidx = si_coreidx(sih);
1092
1093	sb = REGS2SB(sb_setcoreidx(sih, idx));
1094
1095	tmp = R_SBREG(sii, &sb->sbimconfiglow);
1096	ret = tmp & TO_MASK;
1097	W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
1098
1099	sb_commit(sih);
1100	sb_setcoreidx(sih, origidx);
1101	INTR_RESTORE(sii, intr_val);
1102	return ret;
1103}
1104
1105uint32
1106sb_base(uint32 admatch)
1107{
1108	uint32 base;
1109	uint type;
1110
1111	type = admatch & SBAM_TYPE_MASK;
1112	ASSERT(type < 3);
1113
1114	base = 0;
1115
1116	if (type == 0) {
1117		base = admatch & SBAM_BASE0_MASK;
1118	} else if (type == 1) {
1119		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1120		base = admatch & SBAM_BASE1_MASK;
1121	} else if (type == 2) {
1122		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1123		base = admatch & SBAM_BASE2_MASK;
1124	}
1125
1126	return (base);
1127}
1128
1129uint32
1130sb_size(uint32 admatch)
1131{
1132	uint32 size;
1133	uint type;
1134
1135	type = admatch & SBAM_TYPE_MASK;
1136	ASSERT(type < 3);
1137
1138	size = 0;
1139
1140	if (type == 0) {
1141		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
1142	} else if (type == 1) {
1143		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1144		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
1145	} else if (type == 2) {
1146		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
1147		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
1148	}
1149
1150	return (size);
1151}
1152
1153#if defined(BCMDBG_DUMP)
1154/* print interesting sbconfig registers */
1155void
1156sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
1157{
1158	si_info_t *sii;
1159	sbconfig_t *sb;
1160	uint origidx, i, intr_val = 0;
1161
1162	sii = SI_INFO(sih);
1163	origidx = sii->curidx;
1164
1165	INTR_OFF(sii, intr_val);
1166
1167	for (i = 0; i < sii->numcores; i++) {
1168		sb = REGS2SB(sb_setcoreidx(sih, i));
1169
1170		bcm_bprintf(b, "core 0x%x: \n", sii->coreid[i]);
1171
1172		if (sii->pub.socirev > SONICS_2_2)
1173			bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
1174			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
1175			          sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
1176
1177		bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
1178		            "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
1179		            R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
1180		            R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
1181		            R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
1182	}
1183
1184	sb_setcoreidx(sih, origidx);
1185	INTR_RESTORE(sii, intr_val);
1186}
1187#endif
1188