1/*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
5 * Copyright 2007, Broadcom Corporation
6 * All Rights Reserved.
7 *
8 * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
9 * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
10 * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
11 * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
12 * $Id: sbutils.c,v 1.1.1.1 2008/10/15 03:31:34 james26_jang Exp $
13 */
14
15#include <typedefs.h>
16#include <bcmdefs.h>
17#include <osl.h>
18#include <bcmutils.h>
19#include <sbutils.h>
20#include <bcmdevs.h>
21#include <sbconfig.h>
22#include <sbchipc.h>
23#include <sbpci.h>
24#include <sbpcie.h>
25#include <pcicfg.h>
26#include <sbpcmcia.h>
27#include <sbsocram.h>
28#include <bcmnvram.h>
29#include <bcmsrom.h>
30#include <hndpmu.h>
31
32/* debug/trace */
33#define	SB_ERROR(args)
34
35#define	SB_MSG(args)
36
37typedef uint32 (*sb_intrsoff_t)(void *intr_arg);
38typedef void (*sb_intrsrestore_t)(void *intr_arg, uint32 arg);
39typedef bool (*sb_intrsenabled_t)(void *intr_arg);
40
41typedef struct gpioh_item {
42	void			*arg;
43	bool			level;
44	gpio_handler_t		handler;
45	uint32			event;
46	struct gpioh_item	*next;
47} gpioh_item_t;
48
49/* misc sb info needed by some of the routines */
50typedef struct sb_info {
51
52	struct sb_pub  	sb;		/* back plane public state (must be first field) */
53
54	void	*osh;			/* osl os handle */
55	void	*sdh;			/* bcmsdh handle */
56
57	void	*curmap;		/* current regs va */
58	void	*regs[SB_MAXCORES];	/* other regs va */
59
60	uint	curidx;			/* current core index */
61	uint	dev_coreid;		/* the core provides driver functions */
62
63	bool	memseg;			/* flag to toggle MEM_SEG register */
64
65	uint	numcores;		/* # discovered cores */
66	uint	coreid[SB_MAXCORES];	/* id of each core */
67	uint32	coresba[SB_MAXCORES];	/* backplane address of each core */
68
69	void	*intr_arg;		/* interrupt callback function arg */
70	sb_intrsoff_t intrsoff_fn;	/* turns chip interrupts off */
71	sb_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */
72	sb_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */
73
74	uint8	pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
75	bool	pr42767_war;
76	uint8	pcie_polarity;
77	bool pcie_war_ovr; /* Override ASPM/Clkreq settings */
78
79	uint8 pmecap_offset;	/* PM Capability offset in the config space */
80	bool pmecap;		/* Capable of generating PME */
81
82	gpioh_item_t *gpioh_head; 	/* GPIO event handlers list */
83
84	char *vars;
85	uint varsz;
86} sb_info_t;
87
88/* local prototypes */
89static sb_info_t * sb_doattach(sb_info_t *si, uint devid, osl_t *osh, void *regs,
90                               uint bustype, void *sdh, char **vars, uint *varsz);
91static void sb_scan(sb_info_t *si, void *regs, uint devid);
92static uint _sb_coreidx(sb_info_t *si, uint32 sba);
93static uint _sb_scan(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba,
94                     uint ncores);
95static uint32 _sb_coresba(sb_info_t *si);
96static void *_sb_setcoreidx(sb_info_t *si, uint coreidx);
97static uint sb_chip2numcores(uint chip);
98static bool sb_ispcie(sb_info_t *si);
99static uint8 sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id,
100                                    uchar *buf, uint32 *buflen);
101static int sb_pci_fixcfg(sb_info_t *si);
102/* routines to access mdio slave device registers */
103static int sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint readdr, uint val);
104static int sb_pcie_mdioread(sb_info_t *si,  uint physmedia, uint readdr, uint *ret_val);
105
106/* dev path concatenation util */
107static char *sb_devpathvar(sb_t *sbh, char *var, int len, const char *name);
108
109/* WARs */
110static void sb_war43448(sb_t *sbh);
111static void sb_war43448_aspm(sb_t *sbh);
112static void sb_war32414_forceHT(sb_t *sbh, bool forceHT);
113static void sb_war30841(sb_info_t *si);
114static void sb_war42767(sb_t *sbh);
115static void sb_war42767_clkreq(sb_t *sbh);
116
117/* delay needed between the mdio control/ mdiodata register data access */
118#define PR28829_DELAY() OSL_DELAY(10)
119
120/* size that can take bitfielddump */
121#define BITFIELD_DUMP_SIZE  32
122
123/* global variable to indicate reservation/release of gpio's */
124static uint32 sb_gpioreservation = 0;
125
126/* global flag to prevent shared resources from being initialized multiple times in sb_attach() */
127static bool sb_onetimeinit = FALSE;
128
129#define	SB_INFO(sbh)	(sb_info_t*)(uintptr)sbh
130#define	SET_SBREG(si, r, mask, val)	\
131		W_SBREG((si), (r), ((R_SBREG((si), (r)) & ~(mask)) | (val)))
132#define	GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SB_MAXCORES * SB_CORE_SIZE)) && \
133		ISALIGNED((x), SB_CORE_SIZE))
134#define	GOODREGS(regs)	((regs) && ISALIGNED((uintptr)(regs), SB_CORE_SIZE))
135#define	REGS2SB(va)	(sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
136#define BADCOREADDR	0
137#define	GOODIDX(idx)	(((uint)idx) < SB_MAXCORES)
138#define	BADIDX		(SB_MAXCORES+1)
139#define	NOREV		-1		/* Invalid rev */
140
141#define PCI(si)		((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCI))
142#define PCIE(si)	((BUSTYPE(si->sb.bustype) == PCI_BUS) && (si->sb.buscoretype == SB_PCIE))
143#define PCMCIA(si)	((BUSTYPE(si->sb.bustype) == PCMCIA_BUS) && (si->memseg == TRUE))
144
145/* sonicsrev */
146#define	SONICS_2_2	(SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
147#define	SONICS_2_3	(SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
148
149#define	R_SBREG(si, sbr)	sb_read_sbreg((si), (sbr))
150#define	W_SBREG(si, sbr, v)	sb_write_sbreg((si), (sbr), (v))
151#define	AND_SBREG(si, sbr, v)	W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) & (v)))
152#define	OR_SBREG(si, sbr, v)	W_SBREG((si), (sbr), (R_SBREG((si), (sbr)) | (v)))
153
154/*
155 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/
156 * after core switching to avoid invalid register accesss inside ISR.
157 */
158#define INTR_OFF(si, intr_val) \
159	if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
160		intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
161#define INTR_RESTORE(si, intr_val) \
162	if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) {	\
163		(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
164
165/* dynamic clock control defines */
166#define	LPOMINFREQ		25000		/* low power oscillator min */
167#define	LPOMAXFREQ		43000		/* low power oscillator max */
168#define	XTALMINFREQ		19800000	/* 20 MHz - 1% */
169#define	XTALMAXFREQ		20200000	/* 20 MHz + 1% */
170#define	PCIMINFREQ		25000000	/* 25 MHz */
171#define	PCIMAXFREQ		34000000	/* 33 MHz + fudge */
172
173#define	ILP_DIV_5MHZ		0		/* ILP = 5 MHz */
174#define	ILP_DIV_1MHZ		4		/* ILP = 1 MHz */
175
176/* force HT war check on non-mips platforms
177   This WAR seem to introduce a significant slowdon on
178   4704 mips router where the problem itself never shows.
179*/
180
181#ifndef __mips__
182#define FORCEHT_WAR32414(si)	\
183	(((PCIE(si)) && (si->sb.chip == BCM4311_CHIP_ID) && ((si->sb.chiprev <= 1))) || \
184	((PCI(si) || PCIE(si)) && (si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev <= 3)))
185#else
186#define FORCEHT_WAR32414(si)   0
187#endif /* __mips__ */
188
189
190#define PCIE_ASPMWARS(si)	\
191	((PCIE(si)) && ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)))
192
193/* GPIO Based LED powersave defines */
194#define DEFAULT_GPIO_ONTIME	10		/* Default: 10% on */
195#define DEFAULT_GPIO_OFFTIME	90		/* Default: 10% on */
196
197#define DEFAULT_GPIOTIMERVAL  ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
198
199static uint32
200sb_read_sbreg(sb_info_t *si, volatile uint32 *sbr)
201{
202	uint8 tmp;
203	uint32 val, intr_val = 0;
204
205
206	/*
207	 * compact flash only has 11 bits address, while we needs 12 bits address.
208	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
209	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
210	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
211	 */
212	if (PCMCIA(si)) {
213		INTR_OFF(si, intr_val);
214		tmp = 1;
215		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
216		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
217	}
218
219	val = R_REG(si->osh, sbr);
220
221	if (PCMCIA(si)) {
222		tmp = 0;
223		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
224		INTR_RESTORE(si, intr_val);
225	}
226
227	return (val);
228}
229
230static void
231sb_write_sbreg(sb_info_t *si, volatile uint32 *sbr, uint32 v)
232{
233	uint8 tmp;
234	volatile uint32 dummy;
235	uint32 intr_val = 0;
236
237
238	/*
239	 * compact flash only has 11 bits address, while we needs 12 bits address.
240	 * MEM_SEG will be OR'd with other 11 bits address in hardware,
241	 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
242	 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
243	 */
244	if (PCMCIA(si)) {
245		INTR_OFF(si, intr_val);
246		tmp = 1;
247		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
248		sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
249	}
250
251	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
252#ifdef IL_BIGENDIAN
253		dummy = R_REG(si->osh, sbr);
254		W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
255		dummy = R_REG(si->osh, sbr);
256		W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
257#else
258		dummy = R_REG(si->osh, sbr);
259		W_REG(si->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
260		dummy = R_REG(si->osh, sbr);
261		W_REG(si->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
262#endif	/* IL_BIGENDIAN */
263	} else
264		W_REG(si->osh, sbr, v);
265
266	if (PCMCIA(si)) {
267		tmp = 0;
268		OSL_PCMCIA_WRITE_ATTR(si->osh, MEM_SEG, &tmp, 1);
269		INTR_RESTORE(si, intr_val);
270	}
271}
272
273/*
274 * Allocate a sb handle.
275 * devid - pci device id (used to determine chip#)
276 * osh - opaque OS handle
277 * regs - virtual address of initial core registers
278 * bustype - pci/pcmcia/sb/sdio/etc
279 * vars - pointer to a pointer area for "environment" variables
280 * varsz - pointer to int to return the size of the vars
281 */
282sb_t *
283BCMINITFN(sb_attach)(uint devid, osl_t *osh, void *regs,
284                     uint bustype, void *sdh, char **vars, uint *varsz)
285{
286	sb_info_t *si;
287
288	/* alloc sb_info_t */
289	if ((si = MALLOC(osh, sizeof (sb_info_t))) == NULL) {
290		SB_ERROR(("sb_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
291		return (NULL);
292	}
293
294	if (sb_doattach(si, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
295		MFREE(osh, si, sizeof(sb_info_t));
296		return (NULL);
297	}
298	si->vars = vars ? *vars : NULL;
299	si->varsz = varsz ? *varsz : 0;
300
301	return (sb_t *)si;
302}
303
304/* Using sb_kattach depends on SB_BUS support, either implicit  */
305/* no limiting BCMBUSTYPE value) or explicit (value is SB_BUS). */
306#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
307
308/* global kernel resource */
309static sb_info_t ksi;
310
311/* generic kernel variant of sb_attach() */
312sb_t *
313BCMINITFN(sb_kattach)(osl_t *osh)
314{
315	static bool ksi_attached = FALSE;
316
317	if (!ksi_attached) {
318		void *regs = (void *)REG_MAP(SB_ENUM_BASE, SB_CORE_SIZE);
319
320		if (sb_doattach(&ksi, BCM4710_DEVICE_ID, osh, regs,
321		                SB_BUS, NULL,
322		                osh != SB_OSH ? &ksi.vars : NULL,
323		                osh != SB_OSH ? &ksi.varsz : NULL) == NULL) {
324			SB_ERROR(("sb_kattach: sb_doattach failed\n"));
325			return NULL;
326		}
327
328		ksi_attached = TRUE;
329	}
330
331	return &ksi.sb;
332}
333#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
334
335static sb_info_t *
336BCMINITFN(sb_doattach)(sb_info_t *si, uint devid, osl_t *osh, void *regs,
337                       uint bustype, void *sdh, char **vars, uint *varsz)
338{
339	uint origidx;
340	chipcregs_t *cc;
341	sbconfig_t *sb;
342	uint32 w;
343	char *pvars;
344
345	ASSERT(GOODREGS(regs));
346
347	bzero((uchar*)si, sizeof(sb_info_t));
348
349	si->sb.buscoreidx = BADIDX;
350
351	si->curmap = regs;
352	si->sdh = sdh;
353	si->osh = osh;
354
355	/* check to see if we are a sb core mimic'ing a pci core */
356	if (bustype == PCI_BUS) {
357		if (OSL_PCI_READ_CONFIG(si->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff) {
358			SB_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SB "
359			          "devid:0x%x\n", __FUNCTION__, devid));
360			bustype = SB_BUS;
361		}
362	}
363	si->sb.bustype = bustype;
364	if (si->sb.bustype != BUSTYPE(si->sb.bustype)) {
365		SB_ERROR(("sb_doattach: bus type %d does not match configured bus type %d\n",
366		          si->sb.bustype, BUSTYPE(si->sb.bustype)));
367		return NULL;
368	}
369
370	/* need to set memseg flag for CF card first before any sb registers access */
371	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS)
372		si->memseg = TRUE;
373
374	/* kludge to enable the clock on the 4306 which lacks a slowclock */
375	if (BUSTYPE(si->sb.bustype) == PCI_BUS && !sb_ispcie(si))
376		sb_clkctl_xtal(&si->sb, XTAL|PLL, ON);
377
378	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
379		w = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
380		if (!GOODCOREADDR(w, SB_ENUM_BASE))
381			OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32), SB_ENUM_BASE);
382	}
383
384
385	/* get sonics backplane revision */
386	sb = REGS2SB(regs);
387	si->sb.sonicsrev = (R_SBREG(si, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
388
389	/* scan for cores */
390	sb_scan(si, regs, devid);
391
392	/* no cores found, bail out */
393	if (si->numcores == 0) {
394		SB_ERROR(("sb_doattach: could not find any cores\n"));
395		return NULL;
396	}
397
398	/* save the current core index */
399	origidx = si->curidx;
400
401	/* don't go beyond if there is no chipc core in the chip */
402	if (!(cc = sb_setcore(&si->sb, SB_CC, 0)))
403		return si;
404
405	if (BUSTYPE(si->sb.bustype) == SB_BUS &&
406	    (si->sb.chip == BCM4712_CHIP_ID) &&
407	    (si->sb.chippkg != BCM4712LARGE_PKG_ID) &&
408	    (si->sb.chiprev <= 3))
409		OR_REG(si->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
410
411	/* fixup necessary chip/core configurations */
412	if (BUSTYPE(si->sb.bustype) == PCI_BUS && sb_pci_fixcfg(si)) {
413		SB_ERROR(("sb_doattach: sb_pci_fixcfg failed\n"));
414		return NULL;
415	}
416
417
418	/* Switch back to the original core, nvram/srom init needs it */
419	sb_setcoreidx(&si->sb, origidx);
420
421	/* Init nvram from flash if it exists */
422	nvram_init((void *)&si->sb);
423
424	/* Init nvram from sprom/otp if they exist */
425	if (srom_var_init(&si->sb, BUSTYPE(si->sb.bustype), regs, si->osh, vars, varsz)) {
426		SB_ERROR(("sb_doattach: srom_var_init failed: bad srom\n"));
427		return (NULL);
428	}
429	pvars = vars ? *vars : NULL;
430
431	/* PMU specific initializations */
432	if ((si->sb.cccaps & CC_CAP_PMU) && !sb_onetimeinit) {
433		sb_pmu_init(&si->sb, si->osh);
434		/* Find out Crystal frequency and init PLL */
435		sb_pmu_pll_init(&si->sb, si->osh, getintvar(pvars, "xtalfreq"));
436		/* Initialize PMU resources (up/dn timers, dep masks, etc.) */
437		sb_pmu_res_init(&si->sb, si->osh);
438	}
439
440	if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
441		w = getintvar(pvars, "regwindowsz");
442		si->memseg = (w <= CFTABLE_REGWIN_2K) ? TRUE : FALSE;
443	}
444
445	/* get boardtype and boardrev */
446	switch (BUSTYPE(si->sb.bustype)) {
447	case PCI_BUS:
448		/* do a pci config read to get subsystem id and subvendor id */
449		w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_SVID, sizeof(uint32));
450		/* Let nvram variables override subsystem Vend/ID */
451		if ((si->sb.boardvendor = (uint16)sb_getdevpathintvar(&si->sb, "boardvendor")) == 0)
452			si->sb.boardvendor = w & 0xffff;
453		else
454			SB_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n",
455			          si->sb.boardvendor, w & 0xffff));
456		if ((si->sb.boardtype = (uint16)sb_getdevpathintvar(&si->sb, "boardtype")) == 0)
457			si->sb.boardtype = (w >> 16) & 0xffff;
458		else
459			SB_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n",
460			          si->sb.boardtype, (w >> 16) & 0xffff));
461		break;
462
463	case PCMCIA_BUS:
464		si->sb.boardvendor = getintvar(pvars, "manfid");
465		si->sb.boardtype = getintvar(pvars, "prodid");
466		break;
467
468	case SB_BUS:
469	case JTAG_BUS:
470		si->sb.boardvendor = VENDOR_BROADCOM;
471		if (pvars == NULL || ((si->sb.boardtype = getintvar(pvars, "prodid")) == 0))
472			if ((si->sb.boardtype = getintvar(NULL, "boardtype")) == 0)
473				si->sb.boardtype = 0xffff;
474		break;
475	}
476
477	if (si->sb.boardtype == 0) {
478		SB_ERROR(("sb_doattach: unknown board type\n"));
479		ASSERT(si->sb.boardtype);
480	}
481
482	si->sb.boardflags = getintvar(pvars, "boardflags");
483
484	/* setup the GPIO based LED powersave register */
485	if (si->sb.ccrev >= 16) {
486		if ((pvars == NULL) || ((w = getintvar(pvars, "leddc")) == 0))
487			w = DEFAULT_GPIOTIMERVAL;
488		sb_corereg(&si->sb, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
489	}
490
491	/* Determine if this board needs override */
492	if (PCIE(si) && (si->sb.chip == BCM4321_CHIP_ID))
493		si->pcie_war_ovr = ((si->sb.boardvendor == VENDOR_APPLE) &&
494		                    ((uint8)getintvar(pvars, "sromrev") == 4) &&
495		                    ((uint8)getintvar(pvars, "boardrev") <= 0x71)) ||
496		        ((uint32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR);
497
498	if (PCIE_ASPMWARS(si)) {
499		sb_war43448_aspm((void *)si);
500		sb_war42767_clkreq((void *)si);
501	}
502
503	if (FORCEHT_WAR32414(si)) {
504		si->sb.pr32414 = TRUE;
505		sb_clkctl_init(&si->sb);
506		sb_war32414_forceHT(&si->sb, 1);
507	}
508
509	if (PCIE(si) && ((si->sb.buscorerev == 6) || (si->sb.buscorerev == 7)))
510		si->sb.pr42780 = TRUE;
511
512	if (PCIE_ASPMWARS(si))
513		sb_pcieclkreq(&si->sb, 1, 0);
514
515	if (PCIE(si) &&
516	    (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
517	     ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
518		sb_set_initiator_to(&si->sb, 0x3, sb_findcoreidx(&si->sb, SB_D11, 0));
519
520	/* Disable gpiopullup and gpiopulldown */
521	if (!sb_onetimeinit && si->sb.ccrev >= 20) {
522		cc = (chipcregs_t *)sb_setcore(&si->sb, SB_CC, 0);
523		W_REG(osh, &cc->gpiopullup, 0);
524		W_REG(osh, &cc->gpiopulldown, 0);
525		sb_setcoreidx(&si->sb, origidx);
526	}
527
528
529#ifdef HNDRTE
530	sb_onetimeinit = TRUE;
531#endif
532
533	return (si);
534}
535
536/* Enable/Disable clkreq for PCIE (4311B0/4321B1) */
537void
538BCMINITFN(sb_war42780_clkreq)(sb_t *sbh, bool clkreq)
539{
540	sb_info_t *si;
541
542	si = SB_INFO(sbh);
543
544	/* Don't change clkreq value if serdespll war has not yet been applied */
545	if (!si->pr42767_war && PCIE_ASPMWARS(si))
546		return;
547
548	sb_pcieclkreq(sbh, 1, (int32)clkreq);
549}
550
551static void
552BCMINITFN(sb_war43448)(sb_t *sbh)
553{
554	sb_info_t *si;
555
556	si = SB_INFO(sbh);
557
558	/* if not pcie bus, we're done */
559	if (!PCIE(si) || !PCIE_ASPMWARS(si))
560		return;
561
562	/* Restore the polarity */
563	if (si->pcie_polarity != 0)
564		sb_pcie_mdiowrite((void *)(uintptr)&si->sb, MDIODATA_DEV_RX,
565		                  SERDES_RX_CTRL, si->pcie_polarity);
566}
567
568static void
569BCMINITFN(sb_war43448_aspm)(sb_t *sbh)
570{
571	uint32 w;
572	uint16 val16, *reg16;
573	sbpcieregs_t *pcieregs;
574	sb_info_t *si;
575
576	si = SB_INFO(sbh);
577
578	/* if not pcie bus, we're done */
579	if (!PCIE(si) || !PCIE_ASPMWARS(si))
580		return;
581
582	/* no ASPM stuff on QT or VSIM */
583	if (si->sb.chippkg == HDLSIM_PKG_ID || si->sb.chippkg == HWSIM_PKG_ID)
584		return;
585
586	pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
587
588	/* Enable ASPM in the shadow SROM and Link control */
589	reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
590	val16 = R_REG(si->osh, reg16);
591	if (!si->pcie_war_ovr)
592		val16 |= SRSH_ASPM_ENB;
593	else
594		val16 &= ~SRSH_ASPM_ENB;
595	W_REG(si->osh, reg16, val16);
596
597	w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
598	if (!si->pcie_war_ovr)
599		w |= PCIE_ASPM_ENAB;
600	else
601		w &= ~PCIE_ASPM_ENAB;
602	OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
603}
604
605static void
606BCMINITFN(sb_war32414_forceHT)(sb_t *sbh, bool forceHT)
607{
608	sb_info_t *si;
609	uint32 val = 0;
610
611	si = SB_INFO(sbh);
612
613	ASSERT(FORCEHT_WAR32414(si));
614
615
616	if (forceHT)
617		val = SYCC_HR;
618	sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, system_clk_ctl),
619	           SYCC_HR, val);
620}
621
622uint
623sb_coreid(sb_t *sbh)
624{
625	sb_info_t *si;
626	sbconfig_t *sb;
627
628	si = SB_INFO(sbh);
629	sb = REGS2SB(si->curmap);
630
631	return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
632}
633
634uint
635sb_flag(sb_t *sbh)
636{
637	sb_info_t *si;
638	sbconfig_t *sb;
639
640	si = SB_INFO(sbh);
641	sb = REGS2SB(si->curmap);
642
643	return R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
644}
645
646uint
647sb_coreidx(sb_t *sbh)
648{
649	sb_info_t *si;
650
651	si = SB_INFO(sbh);
652	return (si->curidx);
653}
654
655/* return core index of the core with address 'sba' */
656static uint
657BCMINITFN(_sb_coreidx)(sb_info_t *si, uint32 sba)
658{
659	uint i;
660
661	for (i = 0; i < si->numcores; i ++)
662		if (sba == si->coresba[i])
663			return i;
664	return BADIDX;
665}
666
667/* return core address of the current core */
668static uint32
669BCMINITFN(_sb_coresba)(sb_info_t *si)
670{
671	uint32 sbaddr;
672
673	switch (BUSTYPE(si->sb.bustype)) {
674	case SB_BUS: {
675		sbconfig_t *sb = REGS2SB(si->curmap);
676		sbaddr = sb_base(R_SBREG(si, &sb->sbadmatch0));
677		break;
678	}
679
680	case PCI_BUS:
681		sbaddr = OSL_PCI_READ_CONFIG(si->osh, PCI_BAR0_WIN, sizeof(uint32));
682		break;
683
684	case PCMCIA_BUS: {
685		uint8 tmp = 0;
686		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
687		sbaddr  = (uint32)tmp << 12;
688		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
689		sbaddr |= (uint32)tmp << 16;
690		OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
691		sbaddr |= (uint32)tmp << 24;
692		break;
693	}
694
695
696#ifdef BCMJTAG
697	case JTAG_BUS:
698		sbaddr = (uint32)(uintptr)si->curmap;
699		break;
700#endif	/* BCMJTAG */
701
702	default:
703		sbaddr = BADCOREADDR;
704		break;
705	}
706
707	SB_MSG(("_sb_coresba: current core is 0x%08x\n", sbaddr));
708	return sbaddr;
709}
710
711uint
712sb_corevendor(sb_t *sbh)
713{
714	sb_info_t *si;
715	sbconfig_t *sb;
716
717	si = SB_INFO(sbh);
718	sb = REGS2SB(si->curmap);
719
720	return ((R_SBREG(si, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
721}
722
723uint
724sb_corerev(sb_t *sbh)
725{
726	sb_info_t *si;
727	sbconfig_t *sb;
728	uint sbidh;
729
730	si = SB_INFO(sbh);
731	sb = REGS2SB(si->curmap);
732	sbidh = R_SBREG(si, &sb->sbidhigh);
733
734	return (SBCOREREV(sbidh));
735}
736
737void *
738sb_osh(sb_t *sbh)
739{
740	sb_info_t *si;
741
742	si = SB_INFO(sbh);
743	return si->osh;
744}
745
746void
747sb_setosh(sb_t *sbh, osl_t *osh)
748{
749	sb_info_t *si;
750
751	si = SB_INFO(sbh);
752	if (si->osh != NULL) {
753		SB_ERROR(("osh is already set....\n"));
754		ASSERT(!si->osh);
755	}
756	si->osh = osh;
757}
758
759/* set sbtmstatelow core-specific flags */
760void
761sb_coreflags_wo(sb_t *sbh, uint32 mask, uint32 val)
762{
763	sb_info_t *si;
764	sbconfig_t *sb;
765	uint32 w;
766
767	si = SB_INFO(sbh);
768	sb = REGS2SB(si->curmap);
769
770	ASSERT((val & ~mask) == 0);
771
772	/* mask and set */
773	w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
774	W_SBREG(si, &sb->sbtmstatelow, w);
775}
776
777/* set/clear sbtmstatelow core-specific flags */
778uint32
779sb_coreflags(sb_t *sbh, uint32 mask, uint32 val)
780{
781	sb_info_t *si;
782	sbconfig_t *sb;
783	uint32 w;
784
785	si = SB_INFO(sbh);
786	sb = REGS2SB(si->curmap);
787
788	ASSERT((val & ~mask) == 0);
789
790	/* mask and set */
791	if (mask || val) {
792		w = (R_SBREG(si, &sb->sbtmstatelow) & ~mask) | val;
793		W_SBREG(si, &sb->sbtmstatelow, w);
794	}
795
796	/* return the new value
797	 * for write operation, the following readback ensures the completion of write opration.
798	 */
799	return (R_SBREG(si, &sb->sbtmstatelow));
800}
801
802/* set/clear sbtmstatehigh core-specific flags */
803uint32
804sb_coreflagshi(sb_t *sbh, uint32 mask, uint32 val)
805{
806	sb_info_t *si;
807	sbconfig_t *sb;
808	uint32 w;
809
810	si = SB_INFO(sbh);
811	sb = REGS2SB(si->curmap);
812
813	ASSERT((val & ~mask) == 0);
814	ASSERT((mask & ~SBTMH_FL_MASK) == 0);
815
816	/* mask and set */
817	if (mask || val) {
818		w = (R_SBREG(si, &sb->sbtmstatehigh) & ~mask) | val;
819		W_SBREG(si, &sb->sbtmstatehigh, w);
820	}
821
822	/* return the new value */
823	return (R_SBREG(si, &sb->sbtmstatehigh));
824}
825
826/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
827int
828sb_corebist(sb_t *sbh)
829{
830	uint32 sblo;
831	sb_info_t *si;
832	sbconfig_t *sb;
833	int result = 0;
834
835	si = SB_INFO(sbh);
836	sb = REGS2SB(si->curmap);
837
838	sblo = R_SBREG(si, &sb->sbtmstatelow);
839	W_SBREG(si, &sb->sbtmstatelow, (sblo | SBTML_FGC | SBTML_BE));
840
841	SPINWAIT(((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTD) == 0), 100000);
842
843	if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BISTF)
844		result = BCME_ERROR;
845
846	W_SBREG(si, &sb->sbtmstatelow, sblo);
847
848	return result;
849}
850
851bool
852sb_iscoreup(sb_t *sbh)
853{
854	sb_info_t *si;
855	sbconfig_t *sb;
856
857	si = SB_INFO(sbh);
858	sb = REGS2SB(si->curmap);
859
860	return ((R_SBREG(si, &sb->sbtmstatelow) &
861	         (SBTML_RESET | SBTML_REJ_MASK | SBTML_CLK)) == SBTML_CLK);
862}
863
864/*
865 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
866 * switch back to the original core, and return the new value.
867 *
868 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
869 *
870 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
871 * and (on newer pci cores) chipcommon registers.
872 */
873uint
874sb_corereg(sb_t *sbh, uint coreidx, uint regoff, uint mask, uint val)
875{
876	uint origidx = 0;
877	uint32 *r = NULL;
878	uint w;
879	uint intr_val = 0;
880	bool fast = FALSE;
881	sb_info_t *si;
882
883	si = SB_INFO(sbh);
884
885	ASSERT(GOODIDX(coreidx));
886	ASSERT(regoff < SB_CORE_SIZE);
887	ASSERT((val & ~mask) == 0);
888
889	if (BUSTYPE(si->sb.bustype) == SB_BUS) {
890		/* If internal bus, we can always get at everything */
891		fast = TRUE;
892		/* map if does not exist */
893		if (!si->regs[coreidx]) {
894			si->regs[coreidx] = (void*)REG_MAP(si->coresba[coreidx],
895			                                   SB_CORE_SIZE);
896			ASSERT(GOODREGS(si->regs[coreidx]));
897		}
898		r = (uint32 *)((uchar *)si->regs[coreidx] + regoff);
899	} else if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
900		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
901
902		if ((si->coreid[coreidx] == SB_CC) &&
903		    ((si->sb.buscoretype == SB_PCIE) ||
904		     (si->sb.buscorerev >= 13))) {
905			/* Chipc registers are mapped at 12KB */
906
907			fast = TRUE;
908			r = (uint32 *)((char *)si->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
909		} else if (si->sb.buscoreidx == coreidx) {
910			/* pci registers are at either in the last 2KB of an 8KB window
911			 * or, in pcie and pci rev 13 at 8KB
912			 */
913			fast = TRUE;
914			if ((si->sb.buscoretype == SB_PCIE) ||
915			    (si->sb.buscorerev >= 13))
916				r = (uint32 *)((char *)si->curmap +
917				               PCI_16KB0_PCIREGS_OFFSET + regoff);
918			else
919				r = (uint32 *)((char *)si->curmap +
920				               ((regoff >= SBCONFIGOFF) ?
921				                PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
922				               regoff);
923		}
924	}
925
926	if (!fast) {
927		INTR_OFF(si, intr_val);
928
929		/* save current core index */
930		origidx = sb_coreidx(&si->sb);
931
932		/* switch core */
933		r = (uint32*) ((uchar*) sb_setcoreidx(&si->sb, coreidx) + regoff);
934	}
935	ASSERT(r);
936
937	/* mask and set */
938	if (mask || val) {
939		if (regoff >= SBCONFIGOFF) {
940			w = (R_SBREG(si, r) & ~mask) | val;
941			W_SBREG(si, r, w);
942		} else {
943			w = (R_REG(si->osh, r) & ~mask) | val;
944			W_REG(si->osh, r, w);
945		}
946	}
947
948	/* readback */
949	if (regoff >= SBCONFIGOFF)
950		w = R_SBREG(si, r);
951	else {
952		if ((si->sb.chip == BCM5354_CHIP_ID) &&
953		    (coreidx == SB_CC_IDX) &&
954		    (regoff == OFFSETOF(chipcregs_t, watchdog))) {
955			w = val;
956		} else
957			w = R_REG(si->osh, r);
958	}
959
960	if (!fast) {
961		/* restore core index */
962		if (origidx != coreidx)
963			sb_setcoreidx(&si->sb, origidx);
964
965		INTR_RESTORE(si, intr_val);
966	}
967
968	return (w);
969}
970
971#define DWORD_ALIGN(x)  (x & ~(0x03))
972#define BYTE_POS(x) (x & 0x3)
973#define WORD_POS(x) (x & 0x1)
974
975#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
976#define WORD_SHIFT(x)  (16 * WORD_POS(x))
977
978#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
979#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
980
981#define read_pci_cfg_byte(a) \
982	(BYTE_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xff)
983
984#define read_pci_cfg_word(a) \
985	(WORD_VAL(OSL_PCI_READ_CONFIG(si->osh, DWORD_ALIGN(a), 4), a) & 0xffff)
986
987
988/* return cap_offset if requested capability exists in the PCI config space */
989static uint8
990sb_find_pci_capability(sb_info_t *si, uint8 req_cap_id, uchar *buf, uint32 *buflen)
991{
992	uint8 cap_id;
993	uint8 cap_ptr = 0;
994	uint32 	bufsize;
995	uint8 byte_val;
996
997	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
998	       goto end;
999
1000	/* check for Header type 0 */
1001	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
1002	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
1003		goto end;
1004
1005	/* check if the capability pointer field exists */
1006	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
1007	if (!(byte_val & PCI_CAPPTR_PRESENT))
1008		goto end;
1009
1010	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
1011	/* check if the capability pointer is 0x00 */
1012	if (cap_ptr == 0x00)
1013		goto end;
1014
1015	/* loop thr'u the capability list and see if the pcie capabilty exists */
1016
1017	cap_id = read_pci_cfg_byte(cap_ptr);
1018
1019	while (cap_id != req_cap_id) {
1020		cap_ptr = read_pci_cfg_byte((cap_ptr+1));
1021		if (cap_ptr == 0x00) break;
1022		cap_id = read_pci_cfg_byte(cap_ptr);
1023	}
1024	if (cap_id != req_cap_id) {
1025		goto end;
1026	}
1027	/* found the caller requested capability */
1028	if ((buf != NULL) && (buflen != NULL)) {
1029		uint8 cap_data;
1030
1031		bufsize = *buflen;
1032		if (!bufsize) goto end;
1033		*buflen = 0;
1034		/* copy the cpability data excluding cap ID and next ptr */
1035		cap_data = cap_ptr + 2;
1036		if ((bufsize + cap_data)  > SZPCR)
1037			bufsize = SZPCR - cap_data;
1038		*buflen = bufsize;
1039		while (bufsize--) {
1040			*buf = read_pci_cfg_byte(cap_data);
1041			cap_data++;
1042			buf++;
1043		}
1044	}
1045end:
1046	return cap_ptr;
1047}
1048
1049uint8
1050sb_pcieclkreq(sb_t *sbh, uint32 mask, uint32 val)
1051{
1052	sb_info_t *si;
1053	uint32 reg_val;
1054	uint8 offset;
1055
1056	si = SB_INFO(sbh);
1057
1058	offset = si->pciecap_lcreg_offset;
1059	if (!offset)
1060		return 0;
1061
1062	reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1063	/* set operation */
1064	if (mask) {
1065		if (val)
1066			reg_val |= PCIE_CLKREQ_ENAB;
1067		else
1068			reg_val &= ~PCIE_CLKREQ_ENAB;
1069		OSL_PCI_WRITE_CONFIG(si->osh, offset, sizeof(uint32), reg_val);
1070		reg_val = OSL_PCI_READ_CONFIG(si->osh, offset, sizeof(uint32));
1071	}
1072	if (reg_val & PCIE_CLKREQ_ENAB)
1073		return 1;
1074	else
1075		return 0;
1076}
1077
1078
1079
1080/* return TRUE if PCIE capability exists in the pci config space */
1081static bool
1082sb_ispcie(sb_info_t *si)
1083{
1084	uint8 cap_ptr;
1085
1086	cap_ptr = sb_find_pci_capability(si, PCI_CAP_PCIECAP_ID, NULL, NULL);
1087	if (!cap_ptr)
1088	    return FALSE;
1089
1090	si->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
1091
1092	return TRUE;
1093}
1094
1095/* Wake-on-wireless-LAN (WOWL) support functions */
1096/* return TRUE if PM capability exists in the pci config space */
1097bool
1098sb_pci_pmecap(sb_t *sbh)
1099{
1100	uint8 cap_ptr;
1101	uint32 pmecap;
1102	sb_info_t *si;
1103
1104	si = SB_INFO(sbh);
1105
1106	if (si == NULL || !(PCI(si) || PCIE(si)))
1107		return FALSE;
1108
1109	if (!si->pmecap_offset) {
1110		cap_ptr = sb_find_pci_capability(si, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL);
1111		if (!cap_ptr)
1112			return FALSE;
1113
1114		si->pmecap_offset = cap_ptr;
1115
1116		pmecap = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset, sizeof(uint32));
1117
1118		/* At least one state can generate PME */
1119		si->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
1120	}
1121
1122	return (si->pmecap);
1123}
1124
1125/* Enable PME generation and disable clkreq */
1126void
1127sb_pci_pmeen(sb_t *sbh)
1128{
1129	sb_info_t *si;
1130	uint32 w;
1131	si = SB_INFO(sbh);
1132
1133	/* if not pmecapable return */
1134	if (!sb_pci_pmecap(sbh))
1135		return;
1136
1137	w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1138	w |= (PME_CSR_PME_EN);
1139	OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1140
1141	/* Disable clkreq */
1142	if (si->pr42767_war) {
1143		sb_pcieclkreq(sbh, 1, 0);
1144		si->pr42767_war = FALSE;
1145	} else if (si->sb.pr42780) {
1146		sb_pcieclkreq(sbh, 1, 1);
1147	}
1148}
1149
1150/* Disable PME generation, clear the PME status bit if set and
1151 * return TRUE if PME status set
1152 */
1153bool
1154sb_pci_pmeclr(sb_t *sbh)
1155{
1156	sb_info_t *si;
1157	uint32 w;
1158	bool ret = FALSE;
1159
1160	si = SB_INFO(sbh);
1161
1162	if (!sb_pci_pmecap(sbh))
1163		return ret;
1164
1165	w = OSL_PCI_READ_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1166
1167	SB_ERROR(("sb_pci_pmeclr PMECSR : 0x%x\n", w));
1168	ret = (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
1169
1170	/* PMESTAT is cleared by writing 1 to it */
1171	w &= ~(PME_CSR_PME_EN);
1172
1173	OSL_PCI_WRITE_CONFIG(si->osh, si->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1174
1175	return ret;
1176}
1177
1178/* Scan the enumeration space to find all cores starting from the given
1179 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
1180 * is the default core address at chip POR time and 'regs' is the virtual
1181 * address that the default core is mapped at. 'ncores' is the number of
1182 * cores expected on bus 'sbba'. It returns the total number of cores
1183 * starting from bus 'sbba', inclusive.
1184 */
1185#define SB_MAXBUSES	2
1186static uint
1187BCMINITFN(_sb_scan)(sb_info_t *si, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
1188{
1189	uint next;
1190	uint ncc = 0;
1191	uint i;
1192
1193	if (bus >= SB_MAXBUSES) {
1194		SB_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
1195		return 0;
1196	}
1197	SB_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
1198
1199	/* Scan all cores on the bus starting from core 0.
1200	 * Core addresses must be contiguous on each bus.
1201	 */
1202	for (i = 0, next = si->numcores; i < numcores && next < SB_MAXCORES; i++, next++) {
1203		si->coresba[next] = sbba + i * SB_CORE_SIZE;
1204
1205		/* keep and reuse the initial register mapping */
1206		if (BUSTYPE(si->sb.bustype) == SB_BUS && si->coresba[next] == sba) {
1207			SB_MSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
1208			si->regs[next] = regs;
1209		}
1210
1211		/* change core to 'next' and read its coreid */
1212		si->curmap = _sb_setcoreidx(si, next);
1213		si->curidx = next;
1214
1215		si->coreid[next] = sb_coreid(&si->sb);
1216
1217		/* core specific processing... */
1218		/* chipc on bus SB_ENUM_BASE provides # cores in the chip and lots of
1219		 * other stuff.
1220		 */
1221		if (sbba == SB_ENUM_BASE && si->coreid[next] == SB_CC) {
1222			chipcregs_t *cc = (chipcregs_t *)si->curmap;
1223
1224			/* get chip id and rev */
1225			si->sb.chip = R_REG(si->osh, &cc->chipid) & CID_ID_MASK;
1226			si->sb.chiprev = (R_REG(si->osh, &cc->chipid) & CID_REV_MASK) >>
1227			        CID_REV_SHIFT;
1228			si->sb.chippkg = (R_REG(si->osh, &cc->chipid) & CID_PKG_MASK) >>
1229			        CID_PKG_SHIFT;
1230
1231			/* get chipcommon rev */
1232			si->sb.ccrev = (int)sb_corerev(&si->sb);
1233
1234			/* get chipcommon chipstatus */
1235			if (si->sb.ccrev >= 11)
1236				si->sb.chipst = R_REG(si->osh, &cc->chipstatus);
1237
1238			/* get chipcommon capabilites */
1239			si->sb.cccaps = R_REG(si->osh, &cc->capabilities);
1240
1241			/* get pmu rev and caps */
1242			if ((si->sb.cccaps & CC_CAP_PMU)) {
1243				si->sb.pmucaps = R_REG(si->osh, &cc->pmucapabilities);
1244				si->sb.pmurev = si->sb.pmucaps & PCAP_REV_MASK;
1245			}
1246
1247			/* determine numcores - this is the total # cores in the chip */
1248			if (((si->sb.ccrev == 4) || (si->sb.ccrev >= 6)))
1249				numcores = (R_REG(si->osh, &cc->chipid) & CID_CC_MASK) >>
1250				        CID_CC_SHIFT;
1251			else
1252				numcores = sb_chip2numcores(si->sb.chip);
1253			SB_MSG(("_sb_scan: there are %u cores in the chip\n", numcores));
1254		}
1255		/* scan bridged SB(s) and add results to the end of the list */
1256		else if (si->coreid[next] == SB_OCP) {
1257			sbconfig_t *sb = REGS2SB(si->curmap);
1258			uint32 nsbba = R_SBREG(si, &sb->sbadmatch1);
1259			uint nsbcc;
1260
1261			si->numcores = next + 1;
1262
1263			if ((nsbba & 0xfff00000) != SB_ENUM_BASE)
1264				continue;
1265			nsbba &= 0xfffff000;
1266			if (_sb_coreidx(si, nsbba) != BADIDX)
1267				continue;
1268
1269			nsbcc = (R_SBREG(si, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
1270			nsbcc = _sb_scan(si, sba, regs, bus + 1, nsbba, nsbcc);
1271			if (sbba == SB_ENUM_BASE)
1272				numcores -= nsbcc;
1273			ncc += nsbcc;
1274		}
1275	}
1276
1277	SB_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
1278
1279	si->numcores = i + ncc;
1280	return si->numcores;
1281}
1282
1283/* scan the sb enumerated space to identify all cores */
1284static void
1285BCMINITFN(sb_scan)(sb_info_t *si, void *regs, uint devid)
1286{
1287	uint origidx;
1288	uint32 origsba;
1289	uint i;
1290	bool pci;
1291	bool pcie;
1292	uint pciidx;
1293	uint pcieidx;
1294	uint pcirev;
1295	uint pcierev;
1296	uint numcores;
1297
1298	/* Save the current core info and validate it later till we know
1299	 * for sure what is good and what is bad.
1300	 */
1301	origsba = _sb_coresba(si);
1302	origidx = BADIDX;
1303
1304	/* Use devid as initial chipid and we'll update it later in _sb_scan */
1305	si->sb.chip = devid;
1306
1307	/* Support chipcommon-less chips for a little while longer so the old
1308	 * sdio host fpga continues to work until we can get the new one working
1309	 * reliably. This particular chip has 2 cores - codec/sdio and pci.
1310	 */
1311	if (devid == SDIOH_FPGA_ID)
1312		numcores = 2;
1313	/* Expect at least one core on 0x18000000 and it must be chipcommon where
1314	 * the core count for the whole chip is kept.
1315	 */
1316	else
1317		numcores = 1;
1318
1319	/* scan all SB(s) starting from SB_ENUM_BASE */
1320	si->numcores = _sb_scan(si, origsba, regs, 0, SB_ENUM_BASE, numcores);
1321	if (si->numcores == 0)
1322		return;
1323
1324	/* figure out bus/orignal core idx */
1325	si->sb.buscorerev = NOREV;
1326	si->sb.buscoreidx = BADIDX;
1327
1328	pci = pcie = FALSE;
1329	pcirev = pcierev = NOREV;
1330	pciidx = pcieidx = BADIDX;
1331
1332	for (i = 0; i < si->numcores; i++) {
1333		sb_setcoreidx(&si->sb, i);
1334
1335		if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1336			if (si->coreid[i] == SB_PCI) {
1337				pciidx = i;
1338				pcirev = sb_corerev(&si->sb);
1339				pci = TRUE;
1340			} else if (si->coreid[i] == SB_PCIE) {
1341				pcieidx = i;
1342				pcierev = sb_corerev(&si->sb);
1343				pcie = TRUE;
1344			}
1345		} else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1346			if (si->coreid[i] == SB_PCMCIA) {
1347				si->sb.buscorerev = sb_corerev(&si->sb);
1348				si->sb.buscoretype = si->coreid[i];
1349				si->sb.buscoreidx = i;
1350			}
1351		}
1352
1353		/* find the core idx before entering this func. */
1354		if (origsba == si->coresba[i])
1355			origidx = i;
1356	}
1357
1358	if (pci && pcie) {
1359		if (sb_ispcie(si))
1360			pci = FALSE;
1361		else
1362			pcie = FALSE;
1363	}
1364	if (pci) {
1365		si->sb.buscoretype = SB_PCI;
1366		si->sb.buscorerev = pcirev;
1367		si->sb.buscoreidx = pciidx;
1368	} else if (pcie) {
1369		si->sb.buscoretype = SB_PCIE;
1370		si->sb.buscorerev = pcierev;
1371		si->sb.buscoreidx = pcieidx;
1372	}
1373
1374	/* return to the original core */
1375	if (origidx != BADIDX)
1376		sb_setcoreidx(&si->sb, origidx);
1377	ASSERT(origidx != BADIDX);
1378}
1379
1380/* may be called with core in reset */
1381void
1382sb_detach(sb_t *sbh)
1383{
1384	sb_info_t *si;
1385	uint idx;
1386
1387	si = SB_INFO(sbh);
1388
1389	if (si == NULL)
1390		return;
1391
1392	if (BUSTYPE(si->sb.bustype) == SB_BUS)
1393		for (idx = 0; idx < SB_MAXCORES; idx++)
1394			if (si->regs[idx]) {
1395				REG_UNMAP(si->regs[idx]);
1396				si->regs[idx] = NULL;
1397			}
1398#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SB_BUS)
1399	if (si != &ksi)
1400#endif	/* !BCMBUSTYPE || (BCMBUSTYPE == SB_BUS) */
1401		MFREE(si->osh, si, sizeof(sb_info_t));
1402}
1403
1404/* convert chip number to number of i/o cores */
1405static uint
1406BCMINITFN(sb_chip2numcores)(uint chip)
1407{
1408	if (chip == BCM4306_CHIP_ID)	/* < 4306c0 */
1409		return (6);
1410	if (chip == BCM4704_CHIP_ID)
1411		return (9);
1412	if (chip == BCM5365_CHIP_ID)
1413		return (7);
1414
1415	SB_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", chip));
1416	ASSERT(0);
1417	return (1);
1418}
1419
1420/* return index of coreid or BADIDX if not found */
1421uint
1422sb_findcoreidx(sb_t *sbh, uint coreid, uint coreunit)
1423{
1424	sb_info_t *si;
1425	uint found;
1426	uint i;
1427
1428	si = SB_INFO(sbh);
1429
1430	found = 0;
1431
1432	for (i = 0; i < si->numcores; i++)
1433		if (si->coreid[i] == coreid) {
1434			if (found == coreunit)
1435				return (i);
1436			found++;
1437		}
1438
1439	return (BADIDX);
1440}
1441
1442/*
1443 * this function changes logical "focus" to the indiciated core,
1444 * must be called with interrupt off.
1445 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1446 */
1447void*
1448sb_setcoreidx(sb_t *sbh, uint coreidx)
1449{
1450	sb_info_t *si;
1451
1452	si = SB_INFO(sbh);
1453
1454	if (coreidx >= si->numcores)
1455		return (NULL);
1456
1457	/*
1458	 * If the user has provided an interrupt mask enabled function,
1459	 * then assert interrupts are disabled before switching the core.
1460	 */
1461	ASSERT((si->intrsenabled_fn == NULL) || !(*(si)->intrsenabled_fn)((si)->intr_arg));
1462
1463	si->curmap = _sb_setcoreidx(si, coreidx);
1464	si->curidx = coreidx;
1465
1466	return (si->curmap);
1467}
1468
1469/* This function changes the logical "focus" to the indiciated core.
1470 * Return the current core's virtual address.
1471 */
1472static void *
1473_sb_setcoreidx(sb_info_t *si, uint coreidx)
1474{
1475	uint32 sbaddr = si->coresba[coreidx];
1476	void *regs;
1477
1478	switch (BUSTYPE(si->sb.bustype)) {
1479	case SB_BUS:
1480		/* map new one */
1481		if (!si->regs[coreidx]) {
1482			si->regs[coreidx] = (void*)REG_MAP(sbaddr, SB_CORE_SIZE);
1483			ASSERT(GOODREGS(si->regs[coreidx]));
1484		}
1485		regs = si->regs[coreidx];
1486		break;
1487
1488	case PCI_BUS:
1489		/* point bar0 window */
1490		OSL_PCI_WRITE_CONFIG(si->osh, PCI_BAR0_WIN, 4, sbaddr);
1491		regs = si->curmap;
1492		break;
1493
1494	case PCMCIA_BUS: {
1495		uint8 tmp = (sbaddr >> 12) & 0x0f;
1496		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR0, &tmp, 1);
1497		tmp = (sbaddr >> 16) & 0xff;
1498		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR1, &tmp, 1);
1499		tmp = (sbaddr >> 24) & 0xff;
1500		OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_ADDR2, &tmp, 1);
1501		regs = si->curmap;
1502		break;
1503	}
1504
1505#ifdef BCMJTAG
1506	case JTAG_BUS:
1507		/* map new one */
1508		if (!si->regs[coreidx]) {
1509			si->regs[coreidx] = (void *)(uintptr)sbaddr;
1510			ASSERT(GOODREGS(si->regs[coreidx]));
1511		}
1512		regs = si->regs[coreidx];
1513		break;
1514#endif	/* BCMJTAG */
1515
1516	default:
1517		ASSERT(0);
1518		regs = NULL;
1519		break;
1520	}
1521
1522	return regs;
1523}
1524
1525/*
1526 * this function changes logical "focus" to the indiciated core,
1527 * must be called with interrupt off.
1528 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
1529 */
1530void*
1531sb_setcore(sb_t *sbh, uint coreid, uint coreunit)
1532{
1533	uint idx;
1534
1535	idx = sb_findcoreidx(sbh, coreid, coreunit);
1536	if (!GOODIDX(idx))
1537		return (NULL);
1538
1539	return (sb_setcoreidx(sbh, idx));
1540}
1541
1542/* return chip number */
1543uint
1544BCMINITFN(sb_chip)(sb_t *sbh)
1545{
1546	sb_info_t *si;
1547
1548	si = SB_INFO(sbh);
1549	return (si->sb.chip);
1550}
1551
1552/* return chip revision number */
1553uint
1554BCMINITFN(sb_chiprev)(sb_t *sbh)
1555{
1556	sb_info_t *si;
1557
1558	si = SB_INFO(sbh);
1559	return (si->sb.chiprev);
1560}
1561
1562/* return chip common revision number */
1563uint
1564BCMINITFN(sb_chipcrev)(sb_t *sbh)
1565{
1566	sb_info_t *si;
1567
1568	si = SB_INFO(sbh);
1569	return (si->sb.ccrev);
1570}
1571
1572/* return chip package option */
1573uint
1574BCMINITFN(sb_chippkg)(sb_t *sbh)
1575{
1576	sb_info_t *si;
1577
1578	si = SB_INFO(sbh);
1579	return (si->sb.chippkg);
1580}
1581
1582/* return PCI core rev. */
1583uint
1584BCMINITFN(sb_pcirev)(sb_t *sbh)
1585{
1586	sb_info_t *si;
1587
1588	si = SB_INFO(sbh);
1589	return (si->sb.buscorerev);
1590}
1591
1592bool
1593BCMINITFN(sb_war16165)(sb_t *sbh)
1594{
1595	sb_info_t *si;
1596
1597	si = SB_INFO(sbh);
1598
1599	return (PCI(si) && (si->sb.buscorerev <= 10));
1600}
1601
1602static void
1603BCMINITFN(sb_war30841)(sb_info_t *si)
1604{
1605	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
1606	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
1607	sb_pcie_mdiowrite(si, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
1608}
1609
1610/* return PCMCIA core rev. */
1611uint
1612BCMINITFN(sb_pcmciarev)(sb_t *sbh)
1613{
1614	sb_info_t *si;
1615
1616	si = SB_INFO(sbh);
1617	return (si->sb.buscorerev);
1618}
1619
1620/* return board vendor id */
1621uint
1622BCMINITFN(sb_boardvendor)(sb_t *sbh)
1623{
1624	sb_info_t *si;
1625
1626	si = SB_INFO(sbh);
1627	return (si->sb.boardvendor);
1628}
1629
1630/* return boardtype */
1631uint
1632BCMINITFN(sb_boardtype)(sb_t *sbh)
1633{
1634	sb_info_t *si;
1635	char *var;
1636
1637	si = SB_INFO(sbh);
1638
1639	if (BUSTYPE(si->sb.bustype) == SB_BUS && si->sb.boardtype == 0xffff) {
1640		/* boardtype format is a hex string */
1641		si->sb.boardtype = getintvar(NULL, "boardtype");
1642
1643		/* backward compatibility for older boardtype string format */
1644		if ((si->sb.boardtype == 0) && (var = getvar(NULL, "boardtype"))) {
1645			if (!strcmp(var, "bcm94710dev"))
1646				si->sb.boardtype = BCM94710D_BOARD;
1647			else if (!strcmp(var, "bcm94710ap"))
1648				si->sb.boardtype = BCM94710AP_BOARD;
1649			else if (!strcmp(var, "bu4710"))
1650				si->sb.boardtype = BU4710_BOARD;
1651			else if (!strcmp(var, "bcm94702mn"))
1652				si->sb.boardtype = BCM94702MN_BOARD;
1653			else if (!strcmp(var, "bcm94710r1"))
1654				si->sb.boardtype = BCM94710R1_BOARD;
1655			else if (!strcmp(var, "bcm94710r4"))
1656				si->sb.boardtype = BCM94710R4_BOARD;
1657			else if (!strcmp(var, "bcm94702cpci"))
1658				si->sb.boardtype = BCM94702CPCI_BOARD;
1659			else if (!strcmp(var, "bcm95380_rr"))
1660				si->sb.boardtype = BCM95380RR_BOARD;
1661		}
1662	}
1663
1664	return (si->sb.boardtype);
1665}
1666
1667/* return bus type of sbh device */
1668uint
1669sb_bus(sb_t *sbh)
1670{
1671	sb_info_t *si;
1672
1673	si = SB_INFO(sbh);
1674	return (si->sb.bustype);
1675}
1676
1677/* return bus core type */
1678uint
1679sb_buscoretype(sb_t *sbh)
1680{
1681	sb_info_t *si;
1682
1683	si = SB_INFO(sbh);
1684
1685	return (si->sb.buscoretype);
1686}
1687
1688/* return bus core revision */
1689uint
1690sb_buscorerev(sb_t *sbh)
1691{
1692	sb_info_t *si;
1693	si = SB_INFO(sbh);
1694
1695	return (si->sb.buscorerev);
1696}
1697
1698/* return list of found cores */
1699uint
1700sb_corelist(sb_t *sbh, uint coreid[])
1701{
1702	sb_info_t *si;
1703
1704	si = SB_INFO(sbh);
1705
1706	bcopy((uchar*)si->coreid, (uchar*)coreid, (si->numcores * sizeof(uint)));
1707	return (si->numcores);
1708}
1709
1710/* return current register mapping */
1711void *
1712sb_coreregs(sb_t *sbh)
1713{
1714	sb_info_t *si;
1715
1716	si = SB_INFO(sbh);
1717	ASSERT(GOODREGS(si->curmap));
1718
1719	return (si->curmap);
1720}
1721
1722#if defined(BCMDBG_ASSERT)
1723/* traverse all cores to find and clear source of serror */
1724static void
1725sb_serr_clear(sb_info_t *si)
1726{
1727	sbconfig_t *sb;
1728	uint origidx;
1729	uint i, intr_val = 0;
1730	void * corereg = NULL;
1731
1732	INTR_OFF(si, intr_val);
1733	origidx = sb_coreidx(&si->sb);
1734
1735	for (i = 0; i < si->numcores; i++) {
1736		corereg = sb_setcoreidx(&si->sb, i);
1737		if (NULL != corereg) {
1738			sb = REGS2SB(corereg);
1739			if ((R_SBREG(si, &sb->sbtmstatehigh)) & SBTMH_SERR) {
1740				AND_SBREG(si, &sb->sbtmstatehigh, ~SBTMH_SERR);
1741				SB_ERROR(("sb_serr_clear: SError at core 0x%x\n",
1742				          sb_coreid(&si->sb)));
1743			}
1744		}
1745	}
1746
1747	sb_setcoreidx(&si->sb, origidx);
1748	INTR_RESTORE(si, intr_val);
1749}
1750
1751/*
1752 * Check if any inband, outband or timeout errors has happened and clear them.
1753 * Must be called with chip clk on !
1754 */
1755bool
1756sb_taclear(sb_t *sbh)
1757{
1758	sb_info_t *si;
1759	sbconfig_t *sb;
1760	uint origidx;
1761	uint intr_val = 0;
1762	bool rc = FALSE;
1763	uint32 inband = 0, serror = 0, timeout = 0;
1764	void *corereg = NULL;
1765	volatile uint32 imstate, tmstate;
1766
1767	si = SB_INFO(sbh);
1768
1769	if (BUSTYPE(si->sb.bustype) == PCI_BUS) {
1770		volatile uint32 stcmd;
1771
1772		/* inband error is Target abort for PCI */
1773		stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32));
1774		inband = stcmd & PCI_CFG_CMD_STAT_TA;
1775		if (inband) {
1776			OSL_PCI_WRITE_CONFIG(si->osh, PCI_CFG_CMD, sizeof(uint32), stcmd);
1777		}
1778
1779		/* serror */
1780		stcmd = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32));
1781		serror = stcmd & PCI_SBIM_STATUS_SERR;
1782		if (serror) {
1783			sb_serr_clear(si);
1784			OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_STATUS, sizeof(uint32), stcmd);
1785		}
1786
1787		/* timeout */
1788		imstate = sb_corereg(sbh, si->sb.buscoreidx,
1789		                     SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), 0, 0);
1790		if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1791			sb_corereg(sbh, si->sb.buscoreidx,
1792			           SBCONFIGOFF + OFFSETOF(sbconfig_t, sbimstate), ~0,
1793			           (imstate & ~(SBIM_IBE | SBIM_TO)));
1794			/* inband = imstate & SBIM_IBE; same as TA above */
1795			timeout = imstate & SBIM_TO;
1796			if (timeout) {
1797			}
1798		}
1799
1800		if (inband) {
1801			/* dump errlog for sonics >= 2.3 */
1802			if (si->sb.sonicsrev == SONICS_2_2)
1803				;
1804			else {
1805				uint32 imerrlog, imerrloga;
1806				imerrlog = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, 0, 0);
1807				if (imerrlog & SBTMEL_EC) {
1808					imerrloga = sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOGA,
1809						0, 0);
1810					/* clear errlog */
1811					sb_corereg(sbh, si->sb.buscoreidx, SBIMERRLOG, ~0, 0);
1812					SB_ERROR(("sb_taclear: ImErrLog 0x%x, ImErrLogA 0x%x\n",
1813						imerrlog, imerrloga));
1814				}
1815			}
1816		}
1817
1818
1819	} else if (BUSTYPE(si->sb.bustype) == PCMCIA_BUS) {
1820
1821		INTR_OFF(si, intr_val);
1822		origidx = sb_coreidx(sbh);
1823
1824		corereg = sb_setcore(sbh, SB_PCMCIA, 0);
1825		if (NULL != corereg) {
1826			sb = REGS2SB(corereg);
1827
1828			imstate = R_SBREG(si, &sb->sbimstate);
1829			/* handle surprise removal */
1830			if ((imstate != 0xffffffff) && (imstate & (SBIM_IBE | SBIM_TO))) {
1831				AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1832				inband = imstate & SBIM_IBE;
1833				timeout = imstate & SBIM_TO;
1834			}
1835			tmstate = R_SBREG(si, &sb->sbtmstatehigh);
1836			if ((tmstate != 0xffffffff) && (tmstate & SBTMH_INT_STATUS)) {
1837				if (!inband) {
1838					serror = 1;
1839					sb_serr_clear(si);
1840				}
1841				OR_SBREG(si, &sb->sbtmstatelow, SBTML_INT_ACK);
1842				AND_SBREG(si, &sb->sbtmstatelow, ~SBTML_INT_ACK);
1843			}
1844		}
1845		sb_setcoreidx(sbh, origidx);
1846		INTR_RESTORE(si, intr_val);
1847
1848	}
1849
1850
1851	if (inband | timeout | serror) {
1852		rc = TRUE;
1853		SB_ERROR(("sb_taclear: inband 0x%x, serror 0x%x, timeout 0x%x!\n",
1854		          inband, serror, timeout));
1855	}
1856
1857	return (rc);
1858}
1859#endif
1860
1861/* do buffered registers update */
1862void
1863sb_commit(sb_t *sbh)
1864{
1865	sb_info_t *si;
1866	uint origidx;
1867	uint intr_val = 0;
1868
1869	si = SB_INFO(sbh);
1870
1871	origidx = si->curidx;
1872	ASSERT(GOODIDX(origidx));
1873
1874	INTR_OFF(si, intr_val);
1875
1876	/* switch over to chipcommon core if there is one, else use pci */
1877	if (si->sb.ccrev != NOREV) {
1878		chipcregs_t *ccregs = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
1879
1880		/* do the buffer registers update */
1881		W_REG(si->osh, &ccregs->broadcastaddress, SB_COMMIT);
1882		W_REG(si->osh, &ccregs->broadcastdata, 0x0);
1883	} else if (PCI(si)) {
1884		sbpciregs_t *pciregs = (sbpciregs_t *)sb_setcore(sbh, SB_PCI, 0);
1885
1886		/* do the buffer registers update */
1887		W_REG(si->osh, &pciregs->bcastaddr, SB_COMMIT);
1888		W_REG(si->osh, &pciregs->bcastdata, 0x0);
1889	} else
1890		ASSERT(0);
1891
1892	/* restore core index */
1893	sb_setcoreidx(sbh, origidx);
1894	INTR_RESTORE(si, intr_val);
1895}
1896
1897/* reset and re-enable a core
1898 * inputs:
1899 * bits - core specific bits that are set during and after reset sequence
1900 * resetbits - core specific bits that are set only during reset sequence
1901 */
1902void
1903sb_core_reset(sb_t *sbh, uint32 bits, uint32 resetbits)
1904{
1905	sb_info_t *si;
1906	sbconfig_t *sb;
1907	volatile uint32 dummy;
1908
1909	si = SB_INFO(sbh);
1910	ASSERT(GOODREGS(si->curmap));
1911	sb = REGS2SB(si->curmap);
1912
1913	/*
1914	 * Must do the disable sequence first to work for arbitrary current core state.
1915	 */
1916	sb_core_disable(sbh, (bits | resetbits));
1917
1918	/*
1919	 * Now do the initialization sequence.
1920	 */
1921
1922	/* set reset while enabling the clock and forcing them on throughout the core */
1923	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | SBTML_RESET | bits | resetbits));
1924	dummy = R_SBREG(si, &sb->sbtmstatelow);
1925	OSL_DELAY(1);
1926
1927	if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_SERR) {
1928		W_SBREG(si, &sb->sbtmstatehigh, 0);
1929	}
1930	if ((dummy = R_SBREG(si, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
1931		AND_SBREG(si, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
1932	}
1933
1934	/* clear reset and allow it to propagate throughout the core */
1935	W_SBREG(si, &sb->sbtmstatelow, (SBTML_FGC | SBTML_CLK | bits));
1936	dummy = R_SBREG(si, &sb->sbtmstatelow);
1937	OSL_DELAY(1);
1938
1939	/* leave clock enabled */
1940	W_SBREG(si, &sb->sbtmstatelow, (SBTML_CLK | bits));
1941	dummy = R_SBREG(si, &sb->sbtmstatelow);
1942	OSL_DELAY(1);
1943}
1944
1945void
1946sb_core_tofixup(sb_t *sbh)
1947{
1948	sb_info_t *si;
1949	sbconfig_t *sb;
1950
1951	si = SB_INFO(sbh);
1952
1953	if ((BUSTYPE(si->sb.bustype) != PCI_BUS) || PCIE(si) ||
1954	    (PCI(si) && (si->sb.buscorerev >= 5)))
1955		return;
1956
1957	ASSERT(GOODREGS(si->curmap));
1958	sb = REGS2SB(si->curmap);
1959
1960	if (BUSTYPE(si->sb.bustype) == SB_BUS) {
1961		SET_SBREG(si, &sb->sbimconfiglow,
1962		          SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1963		          (0x5 << SBIMCL_RTO_SHIFT) | 0x3);
1964	} else {
1965		if (sb_coreid(sbh) == SB_PCI) {
1966			SET_SBREG(si, &sb->sbimconfiglow,
1967			          SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
1968			          (0x3 << SBIMCL_RTO_SHIFT) | 0x2);
1969		} else {
1970			SET_SBREG(si, &sb->sbimconfiglow, (SBIMCL_RTO_MASK | SBIMCL_STO_MASK), 0);
1971		}
1972	}
1973
1974	sb_commit(sbh);
1975}
1976
1977/*
1978 * Set the initiator timeout for the "master core".
1979 * The master core is defined to be the core in control
1980 * of the chip and so it issues accesses to non-memory
1981 * locations (Because of dma *any* core can access memeory).
1982 *
1983 * The routine uses the bus to decide who is the master:
1984 *	SB_BUS => mips
1985 *	JTAG_BUS => chipc
1986 *	PCI_BUS => pci or pcie
1987 *	PCMCIA_BUS => pcmcia
1988 *	SDIO_BUS => pcmcia
1989 *
1990 * This routine exists so callers can disable initiator
1991 * timeouts so accesses to very slow devices like otp
1992 * won't cause an abort. The routine allows arbitrary
1993 * settings of the service and request timeouts, though.
1994 *
1995 * Returns the timeout state before changing it or -1
1996 * on error.
1997 */
1998
1999#define	TO_MASK	(SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
2000
2001uint32
2002sb_set_initiator_to(sb_t *sbh, uint32 to, uint idx)
2003{
2004	sb_info_t *si;
2005	uint origidx;
2006	uint intr_val = 0;
2007	uint32 tmp, ret = 0xffffffff;
2008	sbconfig_t *sb;
2009
2010	si = SB_INFO(sbh);
2011
2012	if ((to & ~TO_MASK) != 0)
2013		return ret;
2014
2015	/* Figure out the master core */
2016	if (idx == BADIDX) {
2017		switch (BUSTYPE(si->sb.bustype)) {
2018		case PCI_BUS:
2019			idx = si->sb.buscoreidx;
2020			break;
2021		case JTAG_BUS:
2022			idx = SB_CC_IDX;
2023			break;
2024		case PCMCIA_BUS:
2025			idx = sb_findcoreidx(sbh, SB_PCMCIA, 0);
2026			break;
2027		case SB_BUS:
2028			idx = sb_findcoreidx(sbh, SB_MIPS33, 0);
2029			break;
2030		default:
2031			ASSERT(0);
2032		}
2033		if (idx == BADIDX)
2034			return ret;
2035	}
2036
2037	INTR_OFF(si, intr_val);
2038	origidx = sb_coreidx(sbh);
2039
2040	sb = REGS2SB(sb_setcoreidx(sbh, idx));
2041
2042	tmp = R_SBREG(si, &sb->sbimconfiglow);
2043	ret = tmp & TO_MASK;
2044	W_SBREG(si, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
2045
2046	sb_commit(sbh);
2047	sb_setcoreidx(sbh, origidx);
2048	INTR_RESTORE(si, intr_val);
2049	return ret;
2050}
2051
2052void
2053sb_core_disable(sb_t *sbh, uint32 bits)
2054{
2055	sb_info_t *si;
2056	volatile uint32 dummy;
2057	uint32 rej;
2058	sbconfig_t *sb;
2059
2060	si = SB_INFO(sbh);
2061
2062	ASSERT(GOODREGS(si->curmap));
2063	sb = REGS2SB(si->curmap);
2064
2065	/* if core is already in reset, just return */
2066	if (R_SBREG(si, &sb->sbtmstatelow) & SBTML_RESET)
2067		return;
2068
2069	/* reject value changed between sonics 2.2 and 2.3 */
2070	if (si->sb.sonicsrev == SONICS_2_2)
2071		rej = (1 << SBTML_REJ_SHIFT);
2072	else
2073		rej = (2 << SBTML_REJ_SHIFT);
2074
2075	/* if clocks are not enabled, put into reset and return */
2076	if ((R_SBREG(si, &sb->sbtmstatelow) & SBTML_CLK) == 0)
2077		goto disable;
2078
2079	/* set target reject and spin until busy is clear (preserve core-specific bits) */
2080	OR_SBREG(si, &sb->sbtmstatelow, rej);
2081	dummy = R_SBREG(si, &sb->sbtmstatelow);
2082	OSL_DELAY(1);
2083	SPINWAIT((R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
2084	if (R_SBREG(si, &sb->sbtmstatehigh) & SBTMH_BUSY)
2085		SB_ERROR(("%s: target state still busy\n", __FUNCTION__));
2086
2087	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT) {
2088		OR_SBREG(si, &sb->sbimstate, SBIM_RJ);
2089		dummy = R_SBREG(si, &sb->sbimstate);
2090		OSL_DELAY(1);
2091		SPINWAIT((R_SBREG(si, &sb->sbimstate) & SBIM_BY), 100000);
2092	}
2093
2094	/* set reset and reject while enabling the clocks */
2095	W_SBREG(si, &sb->sbtmstatelow, (bits | SBTML_FGC | SBTML_CLK | rej | SBTML_RESET));
2096	dummy = R_SBREG(si, &sb->sbtmstatelow);
2097	OSL_DELAY(10);
2098
2099	/* don't forget to clear the initiator reject bit */
2100	if (R_SBREG(si, &sb->sbidlow) & SBIDL_INIT)
2101		AND_SBREG(si, &sb->sbimstate, ~SBIM_RJ);
2102
2103disable:
2104	/* leave reset and reject asserted */
2105	W_SBREG(si, &sb->sbtmstatelow, (bits | rej | SBTML_RESET));
2106	OSL_DELAY(1);
2107}
2108
2109/* set chip watchdog reset timer to fire in 'ticks' backplane cycles */
2110void
2111sb_watchdog(sb_t *sbh, uint ticks)
2112{
2113	/* make sure we come up in fast clock mode; or if clearing, clear clock */
2114	if (ticks)
2115		sb_clkctl_clk(sbh, CLK_FAST);
2116	else
2117		sb_clkctl_clk(sbh, CLK_DYNAMIC);
2118
2119#if defined(BCM4328)
2120	if (sbh->chip == BCM4328_CHIP_ID && ticks != 0)
2121		sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, min_res_mask),
2122		           PMURES_BIT(RES4328_ROM_SWITCH),
2123		           PMURES_BIT(RES4328_ROM_SWITCH));
2124#endif
2125
2126	/* instant NMI */
2127	sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
2128}
2129
2130/* initialize the pcmcia core */
2131void
2132sb_pcmcia_init(sb_t *sbh)
2133{
2134	sb_info_t *si;
2135	uint8 cor = 0;
2136
2137	si = SB_INFO(sbh);
2138
2139	/* enable d11 mac interrupts */
2140	OSL_PCMCIA_READ_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2141	cor |= COR_IRQEN | COR_FUNEN;
2142	OSL_PCMCIA_WRITE_ATTR(si->osh, PCMCIA_FCR0 + PCMCIA_COR, &cor, 1);
2143
2144}
2145
2146
2147void
2148BCMINITFN(sb_pci_up)(sb_t *sbh)
2149{
2150	sb_info_t *si;
2151
2152	si = SB_INFO(sbh);
2153
2154	/* if not pci bus, we're done */
2155	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2156		return;
2157
2158	if (FORCEHT_WAR32414(si))
2159		sb_war32414_forceHT(sbh, 1);
2160
2161	if (PCIE_ASPMWARS(si) || si->sb.pr42780)
2162		sb_pcieclkreq(sbh, 1, 0);
2163
2164	if (PCIE(si) &&
2165	    (((si->sb.chip == BCM4311_CHIP_ID) && (si->sb.chiprev == 2)) ||
2166	     ((si->sb.chip == BCM4312_CHIP_ID) && (si->sb.chiprev == 0))))
2167		sb_set_initiator_to((void *)si, 0x3, sb_findcoreidx((void *)si, SB_D11, 0));
2168
2169}
2170
2171/* Unconfigure and/or apply various WARs when system is going to sleep mode */
2172void
2173BCMUNINITFN(sb_pci_sleep)(sb_t *sbh)
2174{
2175	sb_info_t *si;
2176	uint32 w;
2177	si = SB_INFO(sbh);
2178
2179	/* if not pci bus, we're done */
2180	if (!PCIE(si) || !PCIE_ASPMWARS(si))
2181		return;
2182
2183	w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
2184	w &= ~PCIE_CAP_LCREG_ASPML1;
2185	OSL_PCI_WRITE_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32), w);
2186}
2187
2188/* Unconfigure and/or apply various WARs when going down */
2189void
2190BCMINITFN(sb_pci_down)(sb_t *sbh)
2191{
2192	sb_info_t *si;
2193
2194	si = SB_INFO(sbh);
2195
2196	/* if not pci bus, we're done */
2197	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2198		return;
2199
2200	if (FORCEHT_WAR32414(si))
2201		sb_war32414_forceHT(sbh, 0);
2202
2203	if (si->pr42767_war) {
2204		sb_pcieclkreq(sbh, 1, 1);
2205		si->pr42767_war = FALSE;
2206	} else if (si->sb.pr42780) {
2207		sb_pcieclkreq(sbh, 1, 1);
2208	}
2209}
2210
2211static void
2212BCMINITFN(sb_war42767_clkreq)(sb_t *sbh)
2213{
2214	sbpcieregs_t *pcieregs;
2215	uint16 val16, *reg16;
2216	sb_info_t *si;
2217
2218	si = SB_INFO(sbh);
2219
2220	/* if not pcie bus, we're done */
2221	if (!PCIE(si) || !PCIE_ASPMWARS(si))
2222		return;
2223
2224	pcieregs = (sbpcieregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2225	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
2226	val16 = R_REG(si->osh, reg16);
2227	/* if clockreq is not advertized advertize it */
2228	if (!si->pcie_war_ovr) {
2229		val16 |= SRSH_CLKREQ_ENB;
2230		si->pr42767_war = TRUE;
2231
2232		si->sb.pr42780 = TRUE;
2233	} else
2234		val16 &= ~SRSH_CLKREQ_ENB;
2235	W_REG(si->osh, reg16, val16);
2236}
2237
2238static void
2239BCMINITFN(sb_war42767)(sb_t *sbh)
2240{
2241	uint32 w = 0;
2242	sb_info_t *si;
2243
2244	si = SB_INFO(sbh);
2245
2246	/* if not pcie bus, we're done */
2247	if (!PCIE(si) || !PCIE_ASPMWARS(si))
2248		return;
2249
2250	sb_pcie_mdioread(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
2251	if (w & PLL_CTRL_FREQDET_EN) {
2252		w &= ~PLL_CTRL_FREQDET_EN;
2253		sb_pcie_mdiowrite(si, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
2254	}
2255}
2256
2257/*
2258 * Configure the pci core for pci client (NIC) action
2259 * coremask is the bitvec of cores by index to be enabled.
2260 */
2261void
2262BCMINITFN(sb_pci_setup)(sb_t *sbh, uint coremask)
2263{
2264	sb_info_t *si;
2265	sbconfig_t *sb;
2266	sbpciregs_t *pciregs;
2267	uint32 sbflag;
2268	uint32 w;
2269	uint idx;
2270
2271	si = SB_INFO(sbh);
2272
2273	/* if not pci bus, we're done */
2274	if (BUSTYPE(si->sb.bustype) != PCI_BUS)
2275		return;
2276
2277	ASSERT(PCI(si) || PCIE(si));
2278	ASSERT(si->sb.buscoreidx != BADIDX);
2279
2280	/* get current core index */
2281	idx = si->curidx;
2282
2283	/* we interrupt on this backplane flag number */
2284	ASSERT(GOODREGS(si->curmap));
2285	sb = REGS2SB(si->curmap);
2286	sbflag = R_SBREG(si, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
2287
2288	/* switch over to pci core */
2289	pciregs = (sbpciregs_t*) sb_setcoreidx(sbh, si->sb.buscoreidx);
2290	sb = REGS2SB(pciregs);
2291
2292	/*
2293	 * Enable sb->pci interrupts.  Assume
2294	 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
2295	 */
2296	if (PCIE(si) || (PCI(si) && ((si->sb.buscorerev) >= 6))) {
2297		/* pci config write to set this core bit in PCIIntMask */
2298		w = OSL_PCI_READ_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32));
2299		w |= (coremask << PCI_SBIM_SHIFT);
2300		OSL_PCI_WRITE_CONFIG(si->osh, PCI_INT_MASK, sizeof(uint32), w);
2301	} else {
2302		/* set sbintvec bit for our flag number */
2303		OR_SBREG(si, &sb->sbintvec, (1 << sbflag));
2304	}
2305
2306	if (PCI(si)) {
2307		OR_REG(si->osh, &pciregs->sbtopci2, (SBTOPCI_PREF|SBTOPCI_BURST));
2308		if (si->sb.buscorerev >= 11)
2309			OR_REG(si->osh, &pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
2310		if (si->sb.buscorerev < 5) {
2311			SET_SBREG(si, &sb->sbimconfiglow, SBIMCL_RTO_MASK | SBIMCL_STO_MASK,
2312				(0x3 << SBIMCL_RTO_SHIFT) | 0x2);
2313			sb_commit(sbh);
2314		}
2315	}
2316
2317	/* PCIE workarounds */
2318	if (PCIE(si)) {
2319		if ((si->sb.buscorerev == 0) || (si->sb.buscorerev == 1)) {
2320			w = sb_pcie_readreg((void *)(uintptr)sbh,
2321				(void *)(uintptr)PCIE_PCIEREGS,
2322				PCIE_TLP_WORKAROUNDSREG);
2323			w |= 0x8;
2324			sb_pcie_writereg((void *)(uintptr)sbh,
2325				(void *)(uintptr)PCIE_PCIEREGS,
2326				PCIE_TLP_WORKAROUNDSREG, w);
2327		}
2328
2329		if (si->sb.buscorerev == 1) {
2330			w = sb_pcie_readreg((void *)(uintptr)sbh,
2331				(void *)(uintptr)PCIE_PCIEREGS,
2332				PCIE_DLLP_LCREG);
2333			w |= (0x40);
2334			sb_pcie_writereg((void *)(uintptr)sbh,
2335				(void *)(uintptr)PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
2336		}
2337
2338		if (si->sb.buscorerev == 0)
2339			sb_war30841(si);
2340
2341		if ((si->sb.buscorerev >= 3) && (si->sb.buscorerev <= 5)) {
2342			w = sb_pcie_readreg((void *)(uintptr)sbh,
2343				(void *)(uintptr)PCIE_PCIEREGS,
2344				PCIE_DLLP_PMTHRESHREG);
2345			w &= ~(PCIE_L1THRESHOLDTIME_MASK);
2346			w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
2347			sb_pcie_writereg((void *)(uintptr)sbh, (void *)(uintptr)PCIE_PCIEREGS,
2348				PCIE_DLLP_PMTHRESHREG, w);
2349
2350			sb_war43448(sbh);
2351
2352			sb_war42767(sbh);
2353
2354			sb_war43448_aspm(sbh);
2355			sb_war42767_clkreq(sbh);
2356		}
2357	}
2358
2359	/* switch back to previous core */
2360	sb_setcoreidx(sbh, idx);
2361}
2362
2363uint32
2364sb_base(uint32 admatch)
2365{
2366	uint32 base;
2367	uint type;
2368
2369	type = admatch & SBAM_TYPE_MASK;
2370	ASSERT(type < 3);
2371
2372	base = 0;
2373
2374	if (type == 0) {
2375		base = admatch & SBAM_BASE0_MASK;
2376	} else if (type == 1) {
2377		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
2378		base = admatch & SBAM_BASE1_MASK;
2379	} else if (type == 2) {
2380		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
2381		base = admatch & SBAM_BASE2_MASK;
2382	}
2383
2384	return (base);
2385}
2386
2387uint32
2388sb_size(uint32 admatch)
2389{
2390	uint32 size;
2391	uint type;
2392
2393	type = admatch & SBAM_TYPE_MASK;
2394	ASSERT(type < 3);
2395
2396	size = 0;
2397
2398	if (type == 0) {
2399		size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
2400	} else if (type == 1) {
2401		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
2402		size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
2403	} else if (type == 2) {
2404		ASSERT(!(admatch & SBAM_ADNEG));	/* neg not supported */
2405		size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
2406	}
2407
2408	return (size);
2409}
2410
2411/* return the core-type instantiation # of the current core */
2412uint
2413sb_coreunit(sb_t *sbh)
2414{
2415	sb_info_t *si;
2416	uint idx;
2417	uint coreid;
2418	uint coreunit;
2419	uint i;
2420
2421	si = SB_INFO(sbh);
2422	coreunit = 0;
2423
2424	idx = si->curidx;
2425
2426	ASSERT(GOODREGS(si->curmap));
2427	coreid = sb_coreid(sbh);
2428
2429	/* count the cores of our type */
2430	for (i = 0; i < idx; i++)
2431		if (si->coreid[i] == coreid)
2432			coreunit++;
2433
2434	return (coreunit);
2435}
2436
2437static uint32
2438BCMINITFN(factor6)(uint32 x)
2439{
2440	switch (x) {
2441	case CC_F6_2:	return 2;
2442	case CC_F6_3:	return 3;
2443	case CC_F6_4:	return 4;
2444	case CC_F6_5:	return 5;
2445	case CC_F6_6:	return 6;
2446	case CC_F6_7:	return 7;
2447	default:	return 0;
2448	}
2449}
2450
2451/* calculate the speed the SB would run at given a set of clockcontrol values */
2452uint32
2453BCMINITFN(sb_clock_rate)(uint32 pll_type, uint32 n, uint32 m)
2454{
2455	uint32 n1, n2, clock, m1, m2, m3, mc;
2456
2457	n1 = n & CN_N1_MASK;
2458	n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
2459
2460	if (pll_type == PLL_TYPE6) {
2461		if (m & CC_T6_MMASK)
2462			return CC_T6_M1;
2463		else
2464			return CC_T6_M0;
2465	} else if ((pll_type == PLL_TYPE1) ||
2466	           (pll_type == PLL_TYPE3) ||
2467	           (pll_type == PLL_TYPE4) ||
2468	           (pll_type == PLL_TYPE7)) {
2469		n1 = factor6(n1);
2470		n2 += CC_F5_BIAS;
2471	} else if (pll_type == PLL_TYPE2) {
2472		n1 += CC_T2_BIAS;
2473		n2 += CC_T2_BIAS;
2474		ASSERT((n1 >= 2) && (n1 <= 7));
2475		ASSERT((n2 >= 5) && (n2 <= 23));
2476	} else if (pll_type == PLL_TYPE5) {
2477		return (100000000);
2478	} else
2479		ASSERT(0);
2480	/* PLL types 3 and 7 use BASE2 (25Mhz) */
2481	if ((pll_type == PLL_TYPE3) ||
2482	    (pll_type == PLL_TYPE7)) {
2483		clock =  CC_CLOCK_BASE2 * n1 * n2;
2484	} else
2485		clock = CC_CLOCK_BASE1 * n1 * n2;
2486
2487	if (clock == 0)
2488		return 0;
2489
2490	m1 = m & CC_M1_MASK;
2491	m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
2492	m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
2493	mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
2494
2495	if ((pll_type == PLL_TYPE1) ||
2496	    (pll_type == PLL_TYPE3) ||
2497	    (pll_type == PLL_TYPE4) ||
2498	    (pll_type == PLL_TYPE7)) {
2499		m1 = factor6(m1);
2500		if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
2501			m2 += CC_F5_BIAS;
2502		else
2503			m2 = factor6(m2);
2504		m3 = factor6(m3);
2505
2506		switch (mc) {
2507		case CC_MC_BYPASS:	return (clock);
2508		case CC_MC_M1:		return (clock / m1);
2509		case CC_MC_M1M2:	return (clock / (m1 * m2));
2510		case CC_MC_M1M2M3:	return (clock / (m1 * m2 * m3));
2511		case CC_MC_M1M3:	return (clock / (m1 * m3));
2512		default:		return (0);
2513		}
2514	} else {
2515		ASSERT(pll_type == PLL_TYPE2);
2516
2517		m1 += CC_T2_BIAS;
2518		m2 += CC_T2M2_BIAS;
2519		m3 += CC_T2_BIAS;
2520		ASSERT((m1 >= 2) && (m1 <= 7));
2521		ASSERT((m2 >= 3) && (m2 <= 10));
2522		ASSERT((m3 >= 2) && (m3 <= 7));
2523
2524		if ((mc & CC_T2MC_M1BYP) == 0)
2525			clock /= m1;
2526		if ((mc & CC_T2MC_M2BYP) == 0)
2527			clock /= m2;
2528		if ((mc & CC_T2MC_M3BYP) == 0)
2529			clock /= m3;
2530
2531		return (clock);
2532	}
2533}
2534
2535/* returns the current speed the SB is running at */
2536uint32
2537BCMINITFN(sb_clock)(sb_t *sbh)
2538{
2539	sb_info_t *si;
2540	chipcregs_t *cc;
2541	uint32 n, m;
2542	uint idx;
2543	uint32 pll_type, rate;
2544	uint intr_val = 0;
2545
2546	si = SB_INFO(sbh);
2547	idx = si->curidx;
2548	pll_type = PLL_TYPE1;
2549
2550	INTR_OFF(si, intr_val);
2551
2552	cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
2553	ASSERT(cc);
2554
2555	if (sbh->cccaps & CC_CAP_PMU) {
2556		rate = sb_pmu_cpu_clock(sbh, si->osh);
2557		goto exit;
2558	}
2559
2560	pll_type = sbh->cccaps & CC_CAP_PLL_MASK;
2561	n = R_REG(si->osh, &cc->clockcontrol_n);
2562	if (pll_type == PLL_TYPE6)
2563		m = R_REG(si->osh, &cc->clockcontrol_m3);
2564	else if (pll_type == PLL_TYPE3)
2565		m = R_REG(si->osh, &cc->clockcontrol_m2);
2566	else
2567		m = R_REG(si->osh, &cc->clockcontrol_sb);
2568
2569	/* calculate rate */
2570	rate = sb_clock_rate(pll_type, n, m);
2571
2572	if (pll_type == PLL_TYPE3)
2573		rate = rate / 2;
2574
2575exit:
2576	/* switch back to previous core */
2577	sb_setcoreidx(sbh, idx);
2578
2579	INTR_RESTORE(si, intr_val);
2580
2581	return rate;
2582}
2583
2584uint32
2585BCMINITFN(sb_alp_clock)(sb_t *sbh)
2586{
2587	uint32 clock = ALP_CLOCK;
2588
2589	if (sbh->cccaps & CC_CAP_PMU)
2590		clock = sb_pmu_alp_clock(sbh, sb_osh(sbh));
2591
2592	return clock;
2593}
2594
2595/* change logical "focus" to the gpio core for optimized access */
2596void*
2597sb_gpiosetcore(sb_t *sbh)
2598{
2599	sb_info_t *si;
2600
2601	si = SB_INFO(sbh);
2602
2603	return (sb_setcoreidx(sbh, SB_CC_IDX));
2604}
2605
2606/* mask&set gpiocontrol bits */
2607uint32
2608sb_gpiocontrol(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2609{
2610	sb_info_t *si;
2611	uint regoff;
2612
2613	si = SB_INFO(sbh);
2614	regoff = 0;
2615
2616	/* gpios could be shared on router platforms
2617	 * ignore reservation if it's high priority (e.g., test apps)
2618	 */
2619	if ((priority != GPIO_HI_PRIORITY) &&
2620	    (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2621		mask = priority ? (sb_gpioreservation & mask) :
2622			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2623		val &= mask;
2624	}
2625
2626	regoff = OFFSETOF(chipcregs_t, gpiocontrol);
2627	return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2628}
2629
2630/* mask&set gpio output enable bits */
2631uint32
2632sb_gpioouten(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2633{
2634	sb_info_t *si;
2635	uint regoff;
2636
2637	si = SB_INFO(sbh);
2638	regoff = 0;
2639
2640	/* gpios could be shared on router platforms
2641	 * ignore reservation if it's high priority (e.g., test apps)
2642	 */
2643	if ((priority != GPIO_HI_PRIORITY) &&
2644	    (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2645		mask = priority ? (sb_gpioreservation & mask) :
2646			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2647		val &= mask;
2648	}
2649
2650	regoff = OFFSETOF(chipcregs_t, gpioouten);
2651	return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2652}
2653
2654/* mask&set gpio output bits */
2655uint32
2656sb_gpioout(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2657{
2658	sb_info_t *si;
2659	uint regoff;
2660
2661	si = SB_INFO(sbh);
2662	regoff = 0;
2663
2664	/* gpios could be shared on router platforms
2665	 * ignore reservation if it's high priority (e.g., test apps)
2666	 */
2667	if ((priority != GPIO_HI_PRIORITY) &&
2668	    (BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2669		mask = priority ? (sb_gpioreservation & mask) :
2670			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2671		val &= mask;
2672	}
2673
2674	regoff = OFFSETOF(chipcregs_t, gpioout);
2675	return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2676}
2677
2678/* reserve one gpio */
2679uint32
2680sb_gpioreserve(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2681{
2682	sb_info_t *si;
2683
2684	si = SB_INFO(sbh);
2685
2686	/* only cores on SB_BUS share GPIO's and only applcation users need to
2687	 * reserve/release GPIO
2688	 */
2689	if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
2690		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2691		return -1;
2692	}
2693	/* make sure only one bit is set */
2694	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2695		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2696		return -1;
2697	}
2698
2699	/* already reserved */
2700	if (sb_gpioreservation & gpio_bitmask)
2701		return -1;
2702	/* set reservation */
2703	sb_gpioreservation |= gpio_bitmask;
2704
2705	return sb_gpioreservation;
2706}
2707
2708/* release one gpio */
2709/*
2710 * releasing the gpio doesn't change the current value on the GPIO last write value
2711 * persists till some one overwrites it
2712*/
2713
2714uint32
2715sb_gpiorelease(sb_t *sbh, uint32 gpio_bitmask, uint8 priority)
2716{
2717	sb_info_t *si;
2718
2719	si = SB_INFO(sbh);
2720
2721	/* only cores on SB_BUS share GPIO's and only applcation users need to
2722	 * reserve/release GPIO
2723	 */
2724	if ((BUSTYPE(si->sb.bustype) != SB_BUS) || (!priority))  {
2725		ASSERT((BUSTYPE(si->sb.bustype) == SB_BUS) && (priority));
2726		return -1;
2727	}
2728	/* make sure only one bit is set */
2729	if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
2730		ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
2731		return -1;
2732	}
2733
2734	/* already released */
2735	if (!(sb_gpioreservation & gpio_bitmask))
2736		return -1;
2737
2738	/* clear reservation */
2739	sb_gpioreservation &= ~gpio_bitmask;
2740
2741	return sb_gpioreservation;
2742}
2743
2744/* return the current gpioin register value */
2745uint32
2746sb_gpioin(sb_t *sbh)
2747{
2748	sb_info_t *si;
2749	uint regoff;
2750
2751	si = SB_INFO(sbh);
2752	regoff = 0;
2753
2754	regoff = OFFSETOF(chipcregs_t, gpioin);
2755	return (sb_corereg(sbh, SB_CC_IDX, regoff, 0, 0));
2756}
2757
2758/* mask&set gpio interrupt polarity bits */
2759uint32
2760sb_gpiointpolarity(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2761{
2762	sb_info_t *si;
2763	uint regoff;
2764
2765	si = SB_INFO(sbh);
2766	regoff = 0;
2767
2768	/* gpios could be shared on router platforms */
2769	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2770		mask = priority ? (sb_gpioreservation & mask) :
2771			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2772		val &= mask;
2773	}
2774
2775	regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
2776	return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2777}
2778
2779/* mask&set gpio interrupt mask bits */
2780uint32
2781sb_gpiointmask(sb_t *sbh, uint32 mask, uint32 val, uint8 priority)
2782{
2783	sb_info_t *si;
2784	uint regoff;
2785
2786	si = SB_INFO(sbh);
2787	regoff = 0;
2788
2789	/* gpios could be shared on router platforms */
2790	if ((BUSTYPE(si->sb.bustype) == SB_BUS) && (val || mask)) {
2791		mask = priority ? (sb_gpioreservation & mask) :
2792			((sb_gpioreservation | mask) & ~(sb_gpioreservation));
2793		val &= mask;
2794	}
2795
2796	regoff = OFFSETOF(chipcregs_t, gpiointmask);
2797	return (sb_corereg(sbh, SB_CC_IDX, regoff, mask, val));
2798}
2799
2800/* assign the gpio to an led */
2801uint32
2802sb_gpioled(sb_t *sbh, uint32 mask, uint32 val)
2803{
2804	sb_info_t *si;
2805
2806	si = SB_INFO(sbh);
2807	if (si->sb.ccrev < 16)
2808		return -1;
2809
2810	/* gpio led powersave reg */
2811	return (sb_corereg(sbh, SB_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
2812}
2813
2814/* mask&set gpio timer val */
2815uint32
2816sb_gpiotimerval(sb_t *sbh, uint32 mask, uint32 gpiotimerval)
2817{
2818	sb_info_t *si;
2819	si = SB_INFO(sbh);
2820
2821	if (si->sb.ccrev < 16)
2822		return -1;
2823
2824	return (sb_corereg(sbh, SB_CC_IDX,
2825		OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
2826}
2827
2828uint32
2829sb_gpiopull(sb_t *sbh, bool updown, uint32 mask, uint32 val)
2830{
2831	sb_info_t *si;
2832	uint offs;
2833
2834	si = SB_INFO(sbh);
2835	if (si->sb.ccrev < 20)
2836		return -1;
2837
2838	offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
2839	return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2840}
2841
2842uint32
2843sb_gpioevent(sb_t *sbh, uint regtype, uint32 mask, uint32 val)
2844{
2845	sb_info_t *si;
2846	uint offs;
2847
2848	si = SB_INFO(sbh);
2849	if (si->sb.ccrev < 11)
2850		return -1;
2851
2852	if (regtype == GPIO_REGEVT)
2853		offs = OFFSETOF(chipcregs_t, gpioevent);
2854	else if (regtype == GPIO_REGEVT_INTMSK)
2855		offs = OFFSETOF(chipcregs_t, gpioeventintmask);
2856	else if (regtype == GPIO_REGEVT_INTPOL)
2857		offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
2858	else
2859		return -1;
2860
2861	return (sb_corereg(sbh, SB_CC_IDX, offs, mask, val));
2862}
2863
2864void*
2865BCMINITFN(sb_gpio_handler_register)(sb_t *sbh, uint32 event,
2866	bool level, gpio_handler_t cb, void *arg)
2867{
2868	sb_info_t *si;
2869	gpioh_item_t *gi;
2870
2871	ASSERT(event);
2872	ASSERT(cb);
2873
2874	si = SB_INFO(sbh);
2875	if (si->sb.ccrev < 11)
2876		return NULL;
2877
2878	if ((gi = MALLOC(si->osh, sizeof(gpioh_item_t))) == NULL)
2879		return NULL;
2880
2881	bzero(gi, sizeof(gpioh_item_t));
2882	gi->event = event;
2883	gi->handler = cb;
2884	gi->arg = arg;
2885	gi->level = level;
2886
2887	gi->next = si->gpioh_head;
2888	si->gpioh_head = gi;
2889
2890	return (void*)(gi);
2891}
2892
2893void
2894BCMINITFN(sb_gpio_handler_unregister)(sb_t *sbh, void* gpioh)
2895{
2896	sb_info_t *si;
2897	gpioh_item_t *p, *n;
2898
2899	si = SB_INFO(sbh);
2900	if (si->sb.ccrev < 11)
2901		return;
2902
2903	ASSERT(si->gpioh_head);
2904	if ((void*)si->gpioh_head == gpioh) {
2905		si->gpioh_head = si->gpioh_head->next;
2906		MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2907		return;
2908	}
2909	else {
2910		p = si->gpioh_head;
2911		n = p->next;
2912		while (n) {
2913			if ((void*)n == gpioh) {
2914				p->next = n->next;
2915				MFREE(si->osh, gpioh, sizeof(gpioh_item_t));
2916				return;
2917			}
2918			p = n;
2919			n = n->next;
2920		}
2921	}
2922
2923	ASSERT(0); /* Not found in list */
2924}
2925
2926void
2927sb_gpio_handler_process(sb_t *sbh)
2928{
2929	sb_info_t *si;
2930	gpioh_item_t *h;
2931	uint32 status;
2932	uint32 level = sb_gpioin(sbh);
2933	uint32 edge = sb_gpioevent(sbh, GPIO_REGEVT, 0, 0);
2934
2935	si = SB_INFO(sbh);
2936	for (h = si->gpioh_head; h != NULL; h = h->next) {
2937		if (h->handler) {
2938			status = (h->level ? level : edge);
2939
2940			if (status & h->event)
2941				h->handler(status, h->arg);
2942		}
2943	}
2944
2945	sb_gpioevent(sbh, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
2946}
2947
2948uint32
2949sb_gpio_int_enable(sb_t *sbh, bool enable)
2950{
2951	sb_info_t *si;
2952	uint offs;
2953
2954	si = SB_INFO(sbh);
2955	if (si->sb.ccrev < 11)
2956		return -1;
2957
2958	offs = OFFSETOF(chipcregs_t, intmask);
2959	return (sb_corereg(sbh, SB_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
2960}
2961
2962
2963/* return the slow clock source - LPO, XTAL, or PCI */
2964static uint
2965sb_slowclk_src(sb_info_t *si)
2966{
2967	chipcregs_t *cc;
2968
2969
2970	ASSERT(sb_coreid(&si->sb) == SB_CC);
2971
2972	if (si->sb.ccrev < 6) {
2973		if ((BUSTYPE(si->sb.bustype) == PCI_BUS) &&
2974		    (OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32)) &
2975		     PCI_CFG_GPIO_SCS))
2976			return (SCC_SS_PCI);
2977		else
2978			return (SCC_SS_XTAL);
2979	} else if (si->sb.ccrev < 10) {
2980		cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2981		return (R_REG(si->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
2982	} else	/* Insta-clock */
2983		return (SCC_SS_XTAL);
2984}
2985
2986/* return the ILP (slowclock) min or max frequency */
2987static uint
2988sb_slowclk_freq(sb_info_t *si, bool max_freq)
2989{
2990	chipcregs_t *cc;
2991	uint32 slowclk;
2992	uint div;
2993
2994
2995	ASSERT(sb_coreid(&si->sb) == SB_CC);
2996
2997	cc = (chipcregs_t*) sb_setcoreidx(&si->sb, si->curidx);
2998
2999	/* shouldn't be here unless we've established the chip has dynamic clk control */
3000	ASSERT(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
3001
3002	slowclk = sb_slowclk_src(si);
3003	if (si->sb.ccrev < 6) {
3004		if (slowclk == SCC_SS_PCI)
3005			return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
3006		else
3007			return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
3008	} else if (si->sb.ccrev < 10) {
3009		div = 4 * (((R_REG(si->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
3010		if (slowclk == SCC_SS_LPO)
3011			return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
3012		else if (slowclk == SCC_SS_XTAL)
3013			return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
3014		else if (slowclk == SCC_SS_PCI)
3015			return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
3016		else
3017			ASSERT(0);
3018	} else {
3019		/* Chipc rev 10 is InstaClock */
3020		div = R_REG(si->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
3021		div = 4 * (div + 1);
3022		return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
3023	}
3024	return (0);
3025}
3026
3027static void
3028BCMINITFN(sb_clkctl_setdelay)(sb_info_t *si, void *chipcregs)
3029{
3030	chipcregs_t * cc;
3031	uint slowmaxfreq, pll_delay, slowclk;
3032	uint pll_on_delay, fref_sel_delay;
3033
3034	pll_delay = PLL_DELAY;
3035
3036	/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
3037	 * since the xtal will also be powered down by dynamic clk control logic.
3038	 */
3039
3040	slowclk = sb_slowclk_src(si);
3041	if (slowclk != SCC_SS_XTAL)
3042		pll_delay += XTAL_ON_DELAY;
3043
3044	/* Starting with 4318 it is ILP that is used for the delays */
3045	slowmaxfreq = sb_slowclk_freq(si, (si->sb.ccrev >= 10) ? FALSE : TRUE);
3046
3047	pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
3048	fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
3049
3050	cc = (chipcregs_t *)chipcregs;
3051	W_REG(si->osh, &cc->pll_on_delay, pll_on_delay);
3052	W_REG(si->osh, &cc->fref_sel_delay, fref_sel_delay);
3053}
3054
3055/* initialize power control delay registers */
3056void
3057BCMINITFN(sb_clkctl_init)(sb_t *sbh)
3058{
3059	sb_info_t *si;
3060	uint origidx;
3061	chipcregs_t *cc;
3062
3063	si = SB_INFO(sbh);
3064
3065	origidx = si->curidx;
3066
3067	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3068		return;
3069
3070	if ((si->sb.chip == BCM4321_CHIP_ID) && (si->sb.chiprev < 2))
3071		W_REG(si->osh, &cc->chipcontrol,
3072		      (si->sb.chiprev == 0) ? CHIPCTRL_4321A0_DEFAULT : CHIPCTRL_4321A1_DEFAULT);
3073
3074	if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL))
3075		goto done;
3076
3077	/* set all Instaclk chip ILP to 1 MHz */
3078	if (si->sb.ccrev >= 10)
3079		SET_REG(si->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
3080		        (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
3081
3082	sb_clkctl_setdelay(si, (void *)(uintptr)cc);
3083
3084done:
3085	sb_setcoreidx(sbh, origidx);
3086}
3087
3088/* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
3089uint16
3090BCMINITFN(sb_clkctl_fast_pwrup_delay)(sb_t *sbh)
3091{
3092	sb_info_t *si;
3093	uint origidx;
3094	chipcregs_t *cc;
3095	uint slowminfreq;
3096	uint16 fpdelay;
3097	uint intr_val = 0;
3098
3099	si = SB_INFO(sbh);
3100	fpdelay = 0;
3101	origidx = si->curidx;
3102
3103	INTR_OFF(si, intr_val);
3104
3105	if ((cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0)) == NULL)
3106		goto done;
3107
3108	if (sbh->cccaps & CC_CAP_PMU) {
3109		fpdelay = sb_pmu_fast_pwrup_delay(sbh, si->osh);
3110		goto done;
3111	}
3112
3113	if (!(sbh->cccaps & CC_CAP_PWR_CTL))
3114		goto done;
3115
3116	slowminfreq = sb_slowclk_freq(si, FALSE);
3117	fpdelay = (((R_REG(si->osh, &cc->pll_on_delay) + 2) * 1000000) +
3118	           (slowminfreq - 1)) / slowminfreq;
3119
3120done:
3121	sb_setcoreidx(sbh, origidx);
3122	INTR_RESTORE(si, intr_val);
3123	return (fpdelay);
3124}
3125
3126/* turn primary xtal and/or pll off/on */
3127int
3128sb_clkctl_xtal(sb_t *sbh, uint what, bool on)
3129{
3130	sb_info_t *si;
3131	uint32 in, out, outen;
3132
3133	si = SB_INFO(sbh);
3134
3135	switch (BUSTYPE(si->sb.bustype)) {
3136
3137
3138		case PCMCIA_BUS:
3139			return (0);
3140
3141
3142		case PCI_BUS:
3143
3144			/* pcie core doesn't have any mapping to control the xtal pu */
3145			if (PCIE(si))
3146				return -1;
3147
3148			in = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_IN, sizeof(uint32));
3149			out = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32));
3150			outen = OSL_PCI_READ_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32));
3151
3152			/*
3153			 * Avoid glitching the clock if GPRS is already using it.
3154			 * We can't actually read the state of the PLLPD so we infer it
3155			 * by the value of XTAL_PU which *is* readable via gpioin.
3156			 */
3157			if (on && (in & PCI_CFG_GPIO_XTAL))
3158				return (0);
3159
3160			if (what & XTAL)
3161				outen |= PCI_CFG_GPIO_XTAL;
3162			if (what & PLL)
3163				outen |= PCI_CFG_GPIO_PLL;
3164
3165			if (on) {
3166				/* turn primary xtal on */
3167				if (what & XTAL) {
3168					out |= PCI_CFG_GPIO_XTAL;
3169					if (what & PLL)
3170						out |= PCI_CFG_GPIO_PLL;
3171					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3172					                     sizeof(uint32), out);
3173					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN,
3174					                     sizeof(uint32), outen);
3175					OSL_DELAY(XTAL_ON_DELAY);
3176				}
3177
3178				/* turn pll on */
3179				if (what & PLL) {
3180					out &= ~PCI_CFG_GPIO_PLL;
3181					OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT,
3182					                     sizeof(uint32), out);
3183					OSL_DELAY(2000);
3184				}
3185			} else {
3186				if (what & XTAL)
3187					out &= ~PCI_CFG_GPIO_XTAL;
3188				if (what & PLL)
3189					out |= PCI_CFG_GPIO_PLL;
3190				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUT, sizeof(uint32), out);
3191				OSL_PCI_WRITE_CONFIG(si->osh, PCI_GPIO_OUTEN, sizeof(uint32),
3192				                     outen);
3193			}
3194
3195		default:
3196			return (-1);
3197	}
3198
3199	return (0);
3200}
3201
3202/* set dynamic clk control mode (forceslow, forcefast, dynamic) */
3203/*   returns true if we are forcing fast clock */
3204bool
3205sb_clkctl_clk(sb_t *sbh, uint mode)
3206{
3207	sb_info_t *si;
3208	uint origidx;
3209	chipcregs_t *cc;
3210	uint32 scc;
3211	uint intr_val = 0;
3212
3213	si = SB_INFO(sbh);
3214
3215	/* chipcommon cores prior to rev6 don't support dynamic clock control */
3216	if (si->sb.ccrev < 6)
3217		return (FALSE);
3218
3219
3220	/* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
3221	ASSERT(si->sb.ccrev != 10);
3222
3223	INTR_OFF(si, intr_val);
3224
3225	origidx = si->curidx;
3226
3227	if (sb_setcore(sbh, SB_MIPS33, 0) && (sb_corerev(&si->sb) <= 7) &&
3228	    (BUSTYPE(si->sb.bustype) == SB_BUS) && (si->sb.ccrev >= 10))
3229		goto done;
3230
3231	if (FORCEHT_WAR32414(si))
3232		goto done;
3233
3234	cc = (chipcregs_t*) sb_setcore(sbh, SB_CC, 0);
3235	ASSERT(cc != NULL);
3236
3237	if (!(R_REG(si->osh, &cc->capabilities) & CC_CAP_PWR_CTL) && (si->sb.ccrev < 20))
3238		goto done;
3239
3240	switch (mode) {
3241	case CLK_FAST:	/* force fast (pll) clock */
3242		if (si->sb.ccrev < 10) {
3243			/* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
3244			sb_clkctl_xtal(&si->sb, XTAL, ON);
3245
3246			SET_REG(si->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
3247		} else if (si->sb.ccrev < 20) {
3248			OR_REG(si->osh, &cc->system_clk_ctl, SYCC_HR);
3249		} else {
3250			OR_REG(si->osh, &cc->clk_ctl_st, CCS_FORCEHT);
3251		}
3252
3253		/* wait for the PLL */
3254		if (R_REG(si->osh, &cc->capabilities) & CC_CAP_PMU) {
3255			SPINWAIT(((R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL) == 0),
3256			         PMU_MAX_TRANSITION_DLY);
3257			ASSERT(R_REG(si->osh, &cc->clk_ctl_st) & CCS_HTAVAIL);
3258		} else {
3259			OSL_DELAY(PLL_DELAY);
3260		}
3261		break;
3262
3263	case CLK_DYNAMIC:	/* enable dynamic clock control */
3264		if (si->sb.ccrev < 10) {
3265			scc = R_REG(si->osh, &cc->slow_clk_ctl);
3266			scc &= ~(SCC_FS | SCC_IP | SCC_XC);
3267			if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
3268				scc |= SCC_XC;
3269			W_REG(si->osh, &cc->slow_clk_ctl, scc);
3270
3271			/* for dynamic control, we have to release our xtal_pu "force on" */
3272			if (scc & SCC_XC)
3273				sb_clkctl_xtal(&si->sb, XTAL, OFF);
3274		} else if (si->sb.ccrev < 20) {
3275			/* Instaclock */
3276			AND_REG(si->osh, &cc->system_clk_ctl, ~SYCC_HR);
3277		} else {
3278			AND_REG(si->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
3279		}
3280		break;
3281
3282	default:
3283		ASSERT(0);
3284	}
3285
3286done:
3287	sb_setcoreidx(sbh, origidx);
3288	INTR_RESTORE(si, intr_val);
3289	return (mode == CLK_FAST);
3290}
3291
3292/* register driver interrupt disabling and restoring callback functions */
3293void
3294sb_register_intr_callback(sb_t *sbh, void *intrsoff_fn, void *intrsrestore_fn,
3295                          void *intrsenabled_fn, void *intr_arg)
3296{
3297	sb_info_t *si;
3298
3299	si = SB_INFO(sbh);
3300	si->intr_arg = intr_arg;
3301	si->intrsoff_fn = (sb_intrsoff_t)intrsoff_fn;
3302	si->intrsrestore_fn = (sb_intrsrestore_t)intrsrestore_fn;
3303	si->intrsenabled_fn = (sb_intrsenabled_t)intrsenabled_fn;
3304	/* save current core id.  when this function called, the current core
3305	 * must be the core which provides driver functions(il, et, wl, etc.)
3306	 */
3307	si->dev_coreid = si->coreid[si->curidx];
3308}
3309
3310void
3311sb_deregister_intr_callback(sb_t *sbh)
3312{
3313	sb_info_t *si;
3314
3315	si = SB_INFO(sbh);
3316	si->intrsoff_fn = NULL;
3317}
3318
3319
3320uint16
3321BCMINITFN(sb_d11_devid)(sb_t *sbh)
3322{
3323	sb_info_t *si = SB_INFO(sbh);
3324	uint16 device;
3325
3326#if defined(BCM4328)
3327	/* Fix device id for dual band BCM4328 */
3328	if (sbh->chip == BCM4328_CHIP_ID &&
3329	    (sbh->chippkg == BCM4328USBDUAL_PKG_ID || sbh->chippkg == BCM4328SDIODUAL_PKG_ID))
3330		device = BCM4328_D11DUAL_ID;
3331	else
3332#endif	/* BCM4328 */
3333	/* Let an nvram variable with devpath override devid */
3334	if ((device = (uint16)sb_getdevpathintvar(sbh, "devid")) != 0)
3335		;
3336	/* Get devid from OTP/SPROM depending on where the SROM is read */
3337	else if ((device = (uint16)getintvar(si->vars, "devid")) != 0)
3338		;
3339	/*
3340	 * no longer support wl0id, but keep the code
3341	 * here for backward compatibility.
3342	 */
3343	else if ((device = (uint16)getintvar(si->vars, "wl0id")) != 0)
3344		;
3345	/* Chip specific conversion */
3346	else if (sbh->chip == BCM4712_CHIP_ID) {
3347		if (sbh->chippkg == BCM4712SMALL_PKG_ID)
3348			device = BCM4306_D11G_ID;
3349		else
3350			device = BCM4306_D11DUAL_ID;
3351	}
3352	/* ignore it */
3353	else
3354		device = 0xffff;
3355
3356	return device;
3357}
3358
3359int
3360BCMINITFN(sb_corepciid)(sb_t *sbh, uint func, uint16 *pcivendor, uint16 *pcidevice,
3361                        uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif,
3362                        uint8 *pciheader)
3363{
3364	uint16 vendor = 0xffff, device = 0xffff;
3365	uint8 class, subclass, progif = 0;
3366	uint8 header = PCI_HEADER_NORMAL;
3367	uint32 core = sb_coreid(sbh);
3368
3369	/* Verify whether the function exists for the core */
3370	if (func >= (uint)(core == SB_USB20H ? 2 : 1))
3371		return BCME_ERROR;
3372
3373	/* Known vendor translations */
3374	switch (sb_corevendor(sbh)) {
3375	case SB_VEND_BCM:
3376		vendor = VENDOR_BROADCOM;
3377		break;
3378	default:
3379		return BCME_ERROR;
3380	}
3381
3382	/* Determine class based on known core codes */
3383	switch (core) {
3384	case SB_ILINE20:
3385		class = PCI_CLASS_NET;
3386		subclass = PCI_NET_ETHER;
3387		device = BCM47XX_ILINE_ID;
3388		break;
3389	case SB_ENET:
3390		class = PCI_CLASS_NET;
3391		subclass = PCI_NET_ETHER;
3392		device = BCM47XX_ENET_ID;
3393		break;
3394	case SB_GIGETH:
3395		class = PCI_CLASS_NET;
3396		subclass = PCI_NET_ETHER;
3397		device = BCM47XX_GIGETH_ID;
3398		break;
3399	case SB_SDRAM:
3400	case SB_MEMC:
3401		class = PCI_CLASS_MEMORY;
3402		subclass = PCI_MEMORY_RAM;
3403		device = (uint16)core;
3404		break;
3405	case SB_PCI:
3406	case SB_PCIE:
3407		class = PCI_CLASS_BRIDGE;
3408		subclass = PCI_BRIDGE_PCI;
3409		device = (uint16)core;
3410		header = PCI_HEADER_BRIDGE;
3411		break;
3412	case SB_MIPS33:
3413		class = PCI_CLASS_CPU;
3414		subclass = PCI_CPU_MIPS;
3415		device = (uint16)core;
3416		break;
3417	case SB_CODEC:
3418		class = PCI_CLASS_COMM;
3419		subclass = PCI_COMM_MODEM;
3420		device = BCM47XX_V90_ID;
3421		break;
3422	case SB_USB:
3423		class = PCI_CLASS_SERIAL;
3424		subclass = PCI_SERIAL_USB;
3425		progif = 0x10; /* OHCI */
3426		device = BCM47XX_USB_ID;
3427		break;
3428	case SB_USB11H:
3429		class = PCI_CLASS_SERIAL;
3430		subclass = PCI_SERIAL_USB;
3431		progif = 0x10; /* OHCI */
3432		device = BCM47XX_USBH_ID;
3433		break;
3434	case SB_USB20H:
3435		class = PCI_CLASS_SERIAL;
3436		subclass = PCI_SERIAL_USB;
3437		progif = func == 0 ? 0x10 : 0x20; /* OHCI/EHCI */
3438		device = BCM47XX_USB20H_ID;
3439		header = 0x80; /* multifunction */
3440		break;
3441	case SB_IPSEC:
3442		class = PCI_CLASS_CRYPT;
3443		subclass = PCI_CRYPT_NETWORK;
3444		device = BCM47XX_IPSEC_ID;
3445		break;
3446	case SB_ROBO:
3447		class = PCI_CLASS_NET;
3448		subclass = PCI_NET_OTHER;
3449		device = BCM47XX_ROBO_ID;
3450		break;
3451	case SB_CC:
3452		class = PCI_CLASS_MEMORY;
3453		subclass = PCI_MEMORY_FLASH;
3454		device = (uint16)core;
3455		break;
3456	case SB_SATAXOR:
3457		class = PCI_CLASS_XOR;
3458		subclass = PCI_XOR_QDMA;
3459		device = BCM47XX_SATAXOR_ID;
3460		break;
3461	case SB_ATA100:
3462		class = PCI_CLASS_DASDI;
3463		subclass = PCI_DASDI_IDE;
3464		device = BCM47XX_ATA100_ID;
3465		break;
3466	case SB_USB11D:
3467		class = PCI_CLASS_SERIAL;
3468		subclass = PCI_SERIAL_USB;
3469		device = BCM47XX_USBD_ID;
3470		break;
3471	case SB_USB20D:
3472		class = PCI_CLASS_SERIAL;
3473		subclass = PCI_SERIAL_USB;
3474		device = BCM47XX_USB20D_ID;
3475		break;
3476	case SB_D11:
3477		class = PCI_CLASS_NET;
3478		subclass = PCI_NET_OTHER;
3479		device = sb_d11_devid(sbh);
3480		break;
3481
3482	default:
3483		class = subclass = progif = 0xff;
3484		device = (uint16)core;
3485		break;
3486	}
3487
3488	*pcivendor = vendor;
3489	*pcidevice = device;
3490	*pciclass = class;
3491	*pcisubclass = subclass;
3492	*pciprogif = progif;
3493	*pciheader = header;
3494
3495	return 0;
3496}
3497
3498/* use the mdio interface to read from mdio slaves */
3499static int
3500sb_pcie_mdioread(sb_info_t *si,  uint physmedia, uint regaddr, uint *regval)
3501{
3502	uint mdiodata;
3503	uint i = 0;
3504	sbpcieregs_t *pcieregs;
3505
3506	pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3507	ASSERT(pcieregs);
3508
3509	/* enable mdio access to SERDES */
3510	W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3511
3512	mdiodata = MDIODATA_START | MDIODATA_READ |
3513	        (physmedia << MDIODATA_DEVADDR_SHF) |
3514		(regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA;
3515
3516	W_REG(si->osh, &pcieregs->mdiodata, mdiodata);
3517
3518	PR28829_DELAY();
3519
3520	/* retry till the transaction is complete */
3521	while (i < 10) {
3522		if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3523			PR28829_DELAY();
3524			*regval = (R_REG(si->osh, &(pcieregs->mdiodata)) & MDIODATA_MASK);
3525			/* Disable mdio access to SERDES */
3526			W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3527			return 0;
3528		}
3529		OSL_DELAY(1000);
3530		i++;
3531	}
3532
3533	SB_ERROR(("sb_pcie_mdioread: timed out\n"));
3534	/* Disable mdio access to SERDES */
3535	W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3536	return 1;
3537}
3538
3539
3540/* use the mdio interface to write to mdio slaves */
3541static int
3542sb_pcie_mdiowrite(sb_info_t *si,  uint physmedia, uint regaddr, uint val)
3543{
3544	uint mdiodata;
3545	uint i = 0;
3546	sbpcieregs_t *pcieregs;
3547
3548	pcieregs = (sbpcieregs_t*) sb_setcoreidx(&si->sb, si->sb.buscoreidx);
3549	ASSERT(pcieregs);
3550
3551	/* enable mdio access to SERDES */
3552	W_REG(si->osh, (&pcieregs->mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
3553
3554	mdiodata = MDIODATA_START | MDIODATA_WRITE |
3555		(physmedia << MDIODATA_DEVADDR_SHF) |
3556		(regaddr << MDIODATA_REGADDR_SHF) | MDIODATA_TA | val;
3557
3558	W_REG(si->osh, (&pcieregs->mdiodata), mdiodata);
3559
3560	PR28829_DELAY();
3561
3562	/* retry till the transaction is complete */
3563	while (i < 10) {
3564		if (R_REG(si->osh, &(pcieregs->mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
3565			/* Disable mdio access to SERDES */
3566			W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3567			return 0;
3568		}
3569		OSL_DELAY(1000);
3570		i++;
3571	}
3572
3573	SB_ERROR(("sb_pcie_mdiowrite: timed out\n"));
3574	/* Disable mdio access to SERDES */
3575	W_REG(si->osh, (&pcieregs->mdiocontrol), 0);
3576	return 1;
3577
3578}
3579
3580/* indirect way to read pcie config regs */
3581uint
3582sb_pcie_readreg(void *sb, void* arg1, uint offset)
3583{
3584	sb_info_t *si;
3585	sb_t   *sbh;
3586	uint retval = 0xFFFFFFFF;
3587	sbpcieregs_t *pcieregs;
3588	uint addrtype;
3589
3590	sbh = (sb_t *)sb;
3591	si = SB_INFO(sbh);
3592	ASSERT(PCIE(si));
3593
3594	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3595	ASSERT(pcieregs);
3596
3597	addrtype = (uint)((uintptr)arg1);
3598	switch (addrtype) {
3599		case PCIE_CONFIGREGS:
3600			W_REG(si->osh, (&pcieregs->configaddr), offset);
3601			retval = R_REG(si->osh, &(pcieregs->configdata));
3602			break;
3603		case PCIE_PCIEREGS:
3604			W_REG(si->osh, &(pcieregs->pcieindaddr), offset);
3605			retval = R_REG(si->osh, &(pcieregs->pcieinddata));
3606			break;
3607		default:
3608			ASSERT(0);
3609			break;
3610	}
3611	return retval;
3612}
3613
3614/* indirect way to write pcie config/mdio/pciecore regs */
3615uint
3616sb_pcie_writereg(sb_t *sbh, void *arg1,  uint offset, uint val)
3617{
3618	sb_info_t *si;
3619	sbpcieregs_t *pcieregs;
3620	uint addrtype;
3621
3622	si = SB_INFO(sbh);
3623	ASSERT(PCIE(si));
3624
3625	pcieregs = (sbpcieregs_t *)sb_setcore(sbh, SB_PCIE, 0);
3626	ASSERT(pcieregs);
3627
3628	addrtype = (uint)((uintptr)arg1);
3629
3630	switch (addrtype) {
3631		case PCIE_CONFIGREGS:
3632			W_REG(si->osh, (&pcieregs->configaddr), offset);
3633			W_REG(si->osh, (&pcieregs->configdata), val);
3634			break;
3635		case PCIE_PCIEREGS:
3636			W_REG(si->osh, (&pcieregs->pcieindaddr), offset);
3637			W_REG(si->osh, (&pcieregs->pcieinddata), val);
3638			break;
3639		default:
3640			ASSERT(0);
3641			break;
3642	}
3643	return 0;
3644}
3645
3646
3647/* Build device path. Support SB, PCI, and JTAG for now. */
3648int
3649BCMINITFN(sb_devpath)(sb_t *sbh, char *path, int size)
3650{
3651	int slen;
3652	ASSERT(path);
3653	ASSERT(size >= SB_DEVPATH_BUFSZ);
3654
3655	if (!path || size <= 0)
3656		return -1;
3657
3658	switch (BUSTYPE((SB_INFO(sbh))->sb.bustype)) {
3659	case SB_BUS:
3660	case JTAG_BUS:
3661		slen = snprintf(path, (size_t)size, "sb/%u/", sb_coreidx(sbh));
3662		break;
3663	case PCI_BUS:
3664		ASSERT((SB_INFO(sbh))->osh);
3665		slen = snprintf(path, (size_t)size, "pci/%u/%u/",
3666		                OSL_PCI_BUS((SB_INFO(sbh))->osh),
3667		                OSL_PCI_SLOT((SB_INFO(sbh))->osh));
3668		break;
3669	case PCMCIA_BUS:
3670		SB_ERROR(("sb_devpath: OSL_PCMCIA_BUS() not implemented, bus 1 assumed\n"));
3671		SB_ERROR(("sb_devpath: OSL_PCMCIA_SLOT() not implemented, slot 1 assumed\n"));
3672		slen = snprintf(path, (size_t)size, "pc/1/1/");
3673		break;
3674	default:
3675		slen = -1;
3676		ASSERT(0);
3677		break;
3678	}
3679
3680	if (slen < 0 || slen >= size) {
3681		path[0] = '\0';
3682		return -1;
3683	}
3684
3685	return 0;
3686}
3687
3688/* Get a variable, but only if it has a devpath prefix */
3689char *
3690BCMINITFN(sb_getdevpathvar)(sb_t *sbh, const char *name)
3691{
3692	char varname[SB_DEVPATH_BUFSZ + 32];
3693
3694	sb_devpathvar(sbh, varname, sizeof(varname), name);
3695
3696	return (getvar(NULL, varname));
3697}
3698
3699/* Get a variable, but only if it has a devpath prefix */
3700int
3701BCMINITFN(sb_getdevpathintvar)(sb_t *sbh, const char *name)
3702{
3703	char varname[SB_DEVPATH_BUFSZ + 32];
3704
3705	sb_devpathvar(sbh, varname, sizeof(varname), name);
3706
3707	return (getintvar(NULL, varname));
3708}
3709
3710/* Concatenate the dev path with a varname into the given 'var' buffer
3711 * and return the 'var' pointer.
3712 * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
3713 * On overflow, the first char will be set to '\0'.
3714 */
3715static char *
3716BCMINITFN(sb_devpathvar)(sb_t *sbh, char *var, int len, const char *name)
3717{
3718	uint path_len;
3719
3720	if (!var || len <= 0)
3721		return var;
3722
3723	if (sb_devpath(sbh, var, len) == 0) {
3724		path_len = strlen(var);
3725
3726		if (strlen(name) + 1 > (uint)(len - path_len))
3727			var[0] = '\0';
3728		else
3729			strncpy(var + path_len, name, len - path_len - 1);
3730	}
3731
3732	return var;
3733}
3734
3735
3736/*
3737 * Fixup SROMless PCI device's configuration.
3738 * The current core may be changed upon return.
3739 */
3740static int
3741sb_pci_fixcfg(sb_info_t *si)
3742{
3743	uint origidx, pciidx;
3744	sbpciregs_t *pciregs;
3745	sbpcieregs_t *pcieregs = NULL;
3746	uint16 val16, *reg16;
3747	uint32 w;
3748
3749	ASSERT(BUSTYPE(si->sb.bustype) == PCI_BUS);
3750
3751	/* Fixup PI in SROM shadow area to enable the correct PCI core access */
3752	/* save the current index */
3753	origidx = sb_coreidx(&si->sb);
3754
3755	/* check 'pi' is correct and fix it if not */
3756	if (si->sb.buscoretype == SB_PCIE) {
3757		pcieregs = (sbpcieregs_t *)sb_setcore(&si->sb, SB_PCIE, 0);
3758		ASSERT(pcieregs);
3759		reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
3760	} else if (si->sb.buscoretype == SB_PCI) {
3761		pciregs = (sbpciregs_t *)sb_setcore(&si->sb, SB_PCI, 0);
3762		ASSERT(pciregs);
3763		reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
3764	} else {
3765		ASSERT(0);
3766		return -1;
3767	}
3768	pciidx = sb_coreidx(&si->sb);
3769	val16 = R_REG(si->osh, reg16);
3770	if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16)pciidx) {
3771		val16 = (uint16)(pciidx << SRSH_PI_SHIFT) | (val16 & ~SRSH_PI_MASK);
3772		W_REG(si->osh, reg16, val16);
3773	}
3774
3775	if (PCIE_ASPMWARS(si)) {
3776		w = sb_pcie_readreg((void *)(uintptr)&si->sb, (void *)PCIE_PCIEREGS,
3777		                    PCIE_PLP_STATUSREG);
3778
3779		/* Detect the current polarity at attach and force that polarity and
3780		 * disable changing the polarity
3781		 */
3782		if ((w & PCIE_PLP_POLARITYINV_STAT) == 0) {
3783			si->pcie_polarity = (SERDES_RX_CTRL_FORCE);
3784		} else {
3785			si->pcie_polarity = (SERDES_RX_CTRL_FORCE |
3786			                     SERDES_RX_CTRL_POLARITY);
3787		}
3788
3789		w = OSL_PCI_READ_CONFIG(si->osh, si->pciecap_lcreg_offset, sizeof(uint32));
3790		if (w & PCIE_CLKREQ_ENAB) {
3791			reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET];
3792			val16 = R_REG(si->osh, reg16);
3793			/* if clockreq is not advertized clkreq should not be enabled */
3794			if (!(val16 & SRSH_CLKREQ_ENB))
3795				SB_ERROR(("WARNING: CLK REQ enabled already  0x%x\n", w));
3796		}
3797
3798		sb_war43448(&si->sb);
3799
3800		sb_war42767(&si->sb);
3801
3802	}
3803
3804	/* restore the original index */
3805	sb_setcoreidx(&si->sb, origidx);
3806
3807	return 0;
3808}
3809
3810/* Return ADDR64 capability of the backplane */
3811bool
3812sb_backplane64(sb_t *sbh)
3813{
3814	sb_info_t *si;
3815
3816	si = SB_INFO(sbh);
3817	return ((si->sb.cccaps & CC_CAP_BKPLN64) != 0);
3818}
3819
3820void
3821sb_btcgpiowar(sb_t *sbh)
3822{
3823	sb_info_t *si;
3824	uint origidx;
3825	uint intr_val = 0;
3826	chipcregs_t *cc;
3827	si = SB_INFO(sbh);
3828
3829	/* Make sure that there is ChipCommon core present &&
3830	 * UART_TX is strapped to 1
3831	 */
3832	if (!(si->sb.cccaps & CC_CAP_UARTGPIO))
3833		return;
3834
3835	/* sb_corereg cannot be used as we have to guarantee 8-bit read/writes */
3836	INTR_OFF(si, intr_val);
3837
3838	origidx = sb_coreidx(sbh);
3839
3840	cc = (chipcregs_t *)sb_setcore(sbh, SB_CC, 0);
3841	ASSERT(cc);
3842
3843	W_REG(si->osh, &cc->uart0mcr, R_REG(si->osh, &cc->uart0mcr) | 0x04);
3844
3845	/* restore the original index */
3846	sb_setcoreidx(sbh, origidx);
3847
3848	INTR_RESTORE(si, intr_val);
3849}
3850
3851/* check if the device is removed */
3852bool
3853sb_deviceremoved(sb_t *sbh)
3854{
3855	uint32 w;
3856	sb_info_t *si;
3857
3858	si = SB_INFO(sbh);
3859
3860	switch (BUSTYPE(si->sb.bustype)) {
3861	case PCI_BUS:
3862		ASSERT(si->osh);
3863		w = OSL_PCI_READ_CONFIG(si->osh, PCI_CFG_VID, sizeof(uint32));
3864		if ((w & 0xFFFF) != VENDOR_BROADCOM)
3865			return TRUE;
3866		else
3867			return FALSE;
3868	default:
3869		return FALSE;
3870	}
3871	return FALSE;
3872}
3873
3874/* Return the RAM size of the SOCRAM core */
3875uint32
3876BCMINITFN(sb_socram_size)(sb_t *sbh)
3877{
3878	sb_info_t *si;
3879	uint origidx;
3880	uint intr_val = 0;
3881
3882	sbsocramregs_t *regs;
3883	bool wasup;
3884	uint corerev;
3885	uint32 coreinfo;
3886	uint memsize = 0;
3887
3888	si = SB_INFO(sbh);
3889	ASSERT(si);
3890
3891	/* Block ints and save current core */
3892	INTR_OFF(si, intr_val);
3893	origidx = sb_coreidx(sbh);
3894
3895	/* Switch to SOCRAM core */
3896	if (!(regs = sb_setcore(sbh, SB_SOCRAM, 0)))
3897		goto done;
3898
3899	/* Get info for determining size */
3900	if (!(wasup = sb_iscoreup(sbh)))
3901		sb_core_reset(sbh, 0, 0);
3902	corerev = sb_corerev(sbh);
3903	coreinfo = R_REG(si->osh, &regs->coreinfo);
3904
3905	/* Calculate size from coreinfo based on rev */
3906	if (corerev == 0)
3907		memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
3908	else if (corerev < 3) {
3909		memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
3910		memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3911	}
3912	else {
3913		uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
3914		uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
3915		uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
3916		if (lss != 0)
3917			nb --;
3918		memsize = nb * (1 << (bsz + SR_BSZ_BASE));
3919		if (lss != 0)
3920			memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
3921	}
3922	/* Return to previous state and core */
3923	if (!wasup)
3924		sb_core_disable(sbh, 0);
3925	sb_setcoreidx(sbh, origidx);
3926
3927done:
3928	INTR_RESTORE(si, intr_val);
3929	return memsize;
3930}
3931