1/*
2 * Code to operate on PCI/E core, in NIC mode
3 * Implements pci_api.h
4 * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id: nicpci.c 419467 2013-08-21 09:19:48Z $
19 */
20
21#include <bcm_cfg.h>
22#include <typedefs.h>
23#include <bcmdefs.h>
24#include <osl.h>
25#include <bcmutils.h>
26#include <siutils.h>
27#include <hndsoc.h>
28#include <bcmdevs.h>
29#include <sbchipc.h>
30#include <pci_core.h>
31#include <pcie_core.h>
32#include <nicpci.h>
33#include <pcicfg.h>
34
35typedef struct {
36	union {
37		sbpcieregs_t *pcieregs;
38		sbpciregs_t *pciregs;
39	} regs;                         /* Memory mapped register to the core */
40
41	si_t 	*sih;					/* System interconnect handle */
42	osl_t 	*osh;					/* OSL handle */
43	uint8	pciecap_lcreg_offset;	/* PCIE capability LCreg offset in the config space */
44	uint8	pciecap_devctrl_offset;	/* PCIE DevControl reg offset in the config space */
45	bool	pcie_pr42767;
46	uint8	pcie_polarity;
47	uint8	pcie_war_aspm_ovr;	/* Override ASPM/Clkreq settings */
48	uint8	pmecap_offset;		/* PM Capability offset in the config space */
49	bool 	pmecap;				/* Capable of generating PME */
50	bool	pcie_power_save;
51	uint16	pmebits;
52	uint16	pcie_reqsize;
53	uint16	pcie_mps;
54	uint8	pciecap_devctrl2_offset; /* PCIE DevControl2 reg offset in the config space */
55	uint32	pciecap_ltr0_reg_offset; /* PCIE LTR0 reg offset in the config space */
56	uint32	pciecap_ltr1_reg_offset; /* PCIE LTR1 reg offset in the config space */
57	uint32	pciecap_ltr2_reg_offset; /* PCIE LTR2 reg offset in the config space */
58	uint8	pcie_configspace[PCI_CONFIG_SPACE_SIZE];
59} pcicore_info_t;
60
61/* debug/trace */
62#ifdef BCMDBG_ERR
63#define	PCI_ERROR(args)	printf args
64#else
65#define	PCI_ERROR(args)
66#endif	/* BCMDBG_ERR */
67
68/* routines to access mdio slave device registers */
69static bool pcie_mdiosetblock(pcicore_info_t *pi,  uint blk);
70static int pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val);
71static int pciegen1_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
72	uint *val);
73static int pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
74	uint *val, bool slave_bypass);
75static int pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint readdr, uint val);
76static int pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint readdr, uint *ret_val);
77
78static void pcie_extendL1timer(pcicore_info_t *pi, bool extend);
79static void pcie_clkreq_upd(pcicore_info_t *pi, uint state);
80
81static void pcie_war_aspm_clkreq(pcicore_info_t *pi);
82static void pcie_war_serdes(pcicore_info_t *pi);
83static void pcie_war_noplldown(pcicore_info_t *pi);
84static void pcie_war_polarity(pcicore_info_t *pi);
85static void pcie_war_pci_setup(pcicore_info_t *pi);
86static void pcie_power_save_upd(pcicore_info_t *pi, bool up);
87
88static bool pcicore_pmecap(pcicore_info_t *pi);
89static void pcicore_fixlatencytimer(pcicore_info_t* pch, uint8 timer_val);
90
91#define PCIE_GEN1(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
92	((sih)->buscoretype == PCIE_CORE_ID))
93#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
94	((sih)->buscoretype == PCIE2_CORE_ID))
95#define PCIE(sih) (PCIE_GEN1(sih) || PCIE_GEN2(sih))
96
97#define PCIEGEN1_ASPM(sih)	((PCIE_GEN1(sih)) &&	\
98	(((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
99
100#define DWORD_ALIGN(x)  (x & ~(0x03))
101#define BYTE_POS(x) (x & 0x3)
102#define WORD_POS(x) (x & 0x1)
103
104#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
105#define WORD_SHIFT(x)  (16 * WORD_POS(x))
106
107#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
108#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
109
110#define read_pci_cfg_byte(a) \
111	(BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
112
113#define read_pci_cfg_word(a) \
114	(WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
115
116#define write_pci_cfg_byte(a, val) do { \
117	uint32 tmpval; \
118	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
119	        val << BYTE_POS(a); \
120	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
121	} while (0)
122
123#define write_pci_cfg_word(a, val) do { \
124	uint32 tmpval; \
125	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
126	        val << WORD_POS(a); \
127	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
128	} while (0)
129
130/* delay needed between the mdio control/ mdiodata register data access */
131#define PR28829_DELAY() OSL_DELAY(10)
132
133/**
134 * Initialize the PCI core. It's caller's responsibility to make sure that this is done
135 * only once
136 */
137void *
138pcicore_init(si_t *sih, osl_t *osh, void *regs)
139{
140	pcicore_info_t *pi;
141	uint8 cap_ptr;
142
143	ASSERT(sih->bustype == PCI_BUS);
144
145	/* alloc pcicore_info_t */
146	if ((pi = MALLOC(osh, sizeof(pcicore_info_t))) == NULL) {
147		PCI_ERROR(("pci_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
148		return (NULL);
149	}
150
151	bzero(pi, sizeof(pcicore_info_t));
152
153	pi->sih = sih;
154	pi->osh = osh;
155
156	if (sih->buscoretype == PCIE2_CORE_ID) {
157		pi->regs.pcieregs = (sbpcieregs_t*)regs;
158		cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL);
159		ASSERT(cap_ptr);
160		pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET;
161		pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
162		pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET;
163		pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET;
164		pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET;
165		pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET;
166	} else if (sih->buscoretype == PCIE_CORE_ID) {
167		pi->regs.pcieregs = (sbpcieregs_t*)regs;
168		cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID, NULL, NULL);
169		ASSERT(cap_ptr);
170		pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
171		pi->pciecap_devctrl_offset = cap_ptr + PCIE_CAP_DEVCTRL_OFFSET;
172		pi->pciecap_devctrl2_offset = cap_ptr + PCIE_CAP_DEVCTRL2_OFFSET;
173		pi->pciecap_ltr0_reg_offset = cap_ptr + PCIE_CAP_LTR0_REG_OFFSET;
174		pi->pciecap_ltr1_reg_offset = cap_ptr + PCIE_CAP_LTR1_REG_OFFSET;
175		pi->pciecap_ltr2_reg_offset = cap_ptr + PCIE_CAP_LTR2_REG_OFFSET;
176		pi->pcie_power_save = TRUE; /* Enable pcie_power_save by default */
177	} else
178		pi->regs.pciregs = (sbpciregs_t*)regs;
179
180	return pi;
181}
182
183void
184pcicore_deinit(void *pch)
185{
186	pcicore_info_t *pi = (pcicore_info_t *)pch;
187
188
189	if (pi == NULL)
190		return;
191	MFREE(pi->osh, pi, sizeof(pcicore_info_t));
192}
193
194/** return cap_offset if requested capability exists in the PCI config space */
195/* Note that it's caller's responsibility to make sure it's a pci bus */
196uint8
197pcicore_find_pci_capability(osl_t *osh, uint8 req_cap_id, uchar *buf, uint32 *buflen)
198{
199	uint8 cap_id;
200	uint8 cap_ptr = 0;
201	uint32 bufsize;
202	uint8 byte_val;
203
204	/* check for Header type 0 */
205	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
206	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
207		goto end;
208
209	/* check if the capability pointer field exists */
210	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
211	if (!(byte_val & PCI_CAPPTR_PRESENT))
212		goto end;
213
214	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
215	/* check if the capability pointer is 0x00 */
216	if (cap_ptr == 0x00)
217		goto end;
218
219	/* loop thr'u the capability list and see if the pcie capabilty exists */
220
221	cap_id = read_pci_cfg_byte(cap_ptr);
222
223	while (cap_id != req_cap_id) {
224		cap_ptr = read_pci_cfg_byte((cap_ptr+1));
225		if (cap_ptr == 0x00) break;
226		cap_id = read_pci_cfg_byte(cap_ptr);
227	}
228	if (cap_id != req_cap_id) {
229		goto end;
230	}
231	/* found the caller requested capability */
232	if ((buf != NULL) && (buflen != NULL)) {
233		uint8 cap_data;
234
235		bufsize = *buflen;
236		if (!bufsize) goto end;
237		*buflen = 0;
238		/* copy the cpability data excluding cap ID and next ptr */
239		cap_data = cap_ptr + 2;
240		if ((bufsize + cap_data)  > SZPCR)
241			bufsize = SZPCR - cap_data;
242		*buflen = bufsize;
243		while (bufsize--) {
244			*buf = read_pci_cfg_byte(cap_data);
245			cap_data++;
246			buf++;
247		}
248	}
249end:
250	return cap_ptr;
251}
252
253/** Register Access API */
254uint
255pcie_readreg(si_t *sih, sbpcieregs_t *pcieregs, uint addrtype, uint offset)
256{
257	uint retval = 0xFFFFFFFF;
258	osl_t   *osh = si_osh(sih);
259
260	ASSERT(pcieregs != NULL);
261	BCM_REFERENCE(osh);
262
263	if ((BUSTYPE(sih->bustype) == SI_BUS) || PCIE_GEN1(sih)) {
264		switch (addrtype) {
265			case PCIE_CONFIGREGS:
266				W_REG(osh, (&pcieregs->configaddr), offset);
267				(void)R_REG(osh, (&pcieregs->configaddr));
268				retval = R_REG(osh, &(pcieregs->configdata));
269				break;
270			case PCIE_PCIEREGS:
271				W_REG(osh, &(pcieregs->u.pcie1.pcieindaddr), offset);
272				(void)R_REG(osh, (&pcieregs->u.pcie1.pcieindaddr));
273				retval = R_REG(osh, &(pcieregs->u.pcie1.pcieinddata));
274				break;
275			default:
276				ASSERT(0);
277				break;
278		}
279	}
280	else if (PCIE_GEN2(sih)) {
281		W_REG(osh, (&pcieregs->configaddr), offset);
282		(void)R_REG(osh, (&pcieregs->configaddr));
283		retval = R_REG(osh, &(pcieregs->configdata));
284	}
285
286	return retval;
287}
288
289uint
290pcie_writereg(si_t *sih, sbpcieregs_t *pcieregs, uint addrtype, uint offset, uint val)
291{
292	osl_t   *osh = si_osh(sih);
293
294	ASSERT(pcieregs != NULL);
295	BCM_REFERENCE(osh);
296
297	if ((BUSTYPE(sih->bustype) == SI_BUS) || PCIE_GEN1(sih)) {
298		switch (addrtype) {
299			case PCIE_CONFIGREGS:
300				W_REG(osh, (&pcieregs->configaddr), offset);
301				W_REG(osh, (&pcieregs->configdata), val);
302				break;
303			case PCIE_PCIEREGS:
304				W_REG(osh, (&pcieregs->u.pcie1.pcieindaddr), offset);
305				W_REG(osh, (&pcieregs->u.pcie1.pcieinddata), val);
306				break;
307			default:
308				ASSERT(0);
309				break;
310		}
311	}
312	else if (PCIE_GEN2(sih)) {
313		W_REG(osh, (&pcieregs->configaddr), offset);
314		W_REG(osh, (&pcieregs->configdata), val);
315	}
316	return 0;
317}
318
319static bool
320pcie_mdiosetblock(pcicore_info_t *pi, uint blk)
321{
322	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
323	uint mdiodata, i = 0;
324	uint pcie_serdes_spinwait = 200;
325
326	mdiodata = MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
327	        (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk << 4);
328	W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata);
329
330	PR28829_DELAY();
331	/* retry till the transaction is complete */
332	while (i < pcie_serdes_spinwait) {
333		if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
334			break;
335		}
336		OSL_DELAY(1000);
337		i++;
338	}
339
340	if (i >= pcie_serdes_spinwait) {
341		PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
342		return FALSE;
343	}
344
345	return TRUE;
346}
347
348static bool
349pcie2_mdiosetblock(pcicore_info_t *pi, uint blk)
350{
351	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
352	uint mdiodata, mdioctrl, i = 0;
353	uint pcie_serdes_spinwait = 200;
354
355	mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
356	W_REG(pi->osh, &pcieregs->u.pcie2.mdiocontrol, mdioctrl);
357
358	mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
359	W_REG(pi->osh, &pcieregs->u.pcie2.mdiowrdata, mdiodata);
360
361	PR28829_DELAY();
362	/* retry till the transaction is complete */
363	while (i < pcie_serdes_spinwait) {
364		if (!(R_REG(pi->osh, &(pcieregs->u.pcie2.mdiowrdata)) & MDIODATA2_DONE)) {
365			break;
366		}
367		OSL_DELAY(1000);
368		i++;
369	}
370
371	if (i >= pcie_serdes_spinwait) {
372		PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
373		return FALSE;
374	}
375
376	return TRUE;
377}
378
379static int
380pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val)
381{
382	if (PCIE_GEN1(pi->sih))
383		return (pciegen1_mdioop(pi, physmedia, regaddr, write, val));
384	else if (PCIE_GEN2(pi->sih))
385		return (pciegen2_mdioop(pi, physmedia, regaddr, write, val, 0));
386	else
387		return 0xFFFFFFFF;
388}
389
390static int
391pciegen2_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val,
392	bool slave_bypass)
393{
394	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
395	uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
396	uint32 *reg32;
397
398	if (!PCIE_GEN2(pi->sih))
399		ASSERT(0);
400
401	pcie2_mdiosetblock(pi, physmedia);
402
403	/* enable mdio access to SERDES */
404	mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
405	mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
406
407	if (slave_bypass)
408		mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
409
410	if (!write)
411		mdio_ctrl |= MDIOCTL2_READ;
412
413	W_REG(pi->osh, (&pcieregs->u.pcie2.mdiocontrol), mdio_ctrl);
414	if (write) {
415		reg32 =  (uint32 *)&(pcieregs->u.pcie2.mdiowrdata);
416		W_REG(pi->osh, reg32, *val | MDIODATA2_DONE);
417	}
418	else
419		reg32 =  (uint32 *)&(pcieregs->u.pcie2.mdiorddata);
420
421	/* retry till the transaction is complete */
422	while (i < pcie_serdes_spinwait) {
423		if (!(R_REG(pi->osh, reg32) & MDIODATA2_DONE)) {
424			if (!write)
425				*val = (R_REG(pi->osh, reg32) & MDIODATA2_MASK);
426			return 0;
427		}
428		OSL_DELAY(1000);
429		i++;
430	}
431	return 0;
432}
433
434static int
435pciegen1_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write, uint *val)
436{
437	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
438	uint mdiodata;
439	uint i = 0;
440	uint pcie_serdes_spinwait = 10;
441
442	if (!PCIE_GEN1(pi->sih))
443		ASSERT(0);
444
445	/* enable mdio access to SERDES */
446	W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
447
448	if (pi->sih->buscorerev >= 10) {
449		/* new serdes is slower in rw, using two layers of reg address mapping */
450		if (!pcie_mdiosetblock(pi, physmedia))
451			return 1;
452		mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
453			(regaddr << MDIODATA_REGADDR_SHF);
454		pcie_serdes_spinwait *= 20;
455	} else {
456		mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) |
457			(regaddr << MDIODATA_REGADDR_SHF_OLD);
458	}
459
460	if (!write)
461		mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
462	else
463		mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val);
464
465	W_REG(pi->osh, &pcieregs->u.pcie1.mdiodata, mdiodata);
466
467	PR28829_DELAY();
468
469	/* retry till the transaction is complete */
470	while (i < pcie_serdes_spinwait) {
471		if (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiocontrol)) & MDIOCTL_ACCESS_DONE) {
472			if (!write) {
473				PR28829_DELAY();
474				*val = (R_REG(pi->osh, &(pcieregs->u.pcie1.mdiodata)) &
475					MDIODATA_MASK);
476			}
477			/* Disable mdio access to SERDES */
478			W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), 0);
479			return 0;
480		}
481		OSL_DELAY(1000);
482		i++;
483	}
484
485	PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
486	/* Disable mdio access to SERDES */
487	W_REG(pi->osh, (&pcieregs->u.pcie1.mdiocontrol), 0);
488	return 1;
489}
490
491/** use the mdio interface to read from mdio slaves */
492static int
493pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint regaddr, uint *regval)
494{
495	return pcie_mdioop(pi, physmedia, regaddr, FALSE, regval);
496}
497
498/** use the mdio interface to write to mdio slaves */
499static int
500pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint regaddr, uint val)
501{
502	return pcie_mdioop(pi, physmedia, regaddr, TRUE, &val);
503}
504
505/* ***** Support functions ***** */
506
507/**
508 * By default, PCIe devices are not allowed to create payloads of greater than 128 bytes.
509 * Maximum Read Request Size is a PCIe parameter that is advertized to the host, so the host can
510 * choose a balance between high throughput and low 'chunkiness' on the bus. Regardless of the
511 * setting of this (hardware) field, the core does not initiate read requests larger than 512 bytes.
512 */
513static uint32
514pcie_devcontrol_mrrs(void *pch, uint32 mask, uint32 val)
515{
516	pcicore_info_t *pi = (pcicore_info_t *)pch;
517	uint32 reg_val;
518	uint8 offset;
519
520	offset = pi->pciecap_devctrl_offset;
521	if (!offset)
522		return 0;
523
524	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
525	/* set operation */
526	if (mask) {
527		if (val > PCIE_CAP_DEVCTRL_MRRS_128B) {
528			if (PCIE_GEN1(pi->sih) && (pi->sih->buscorerev < 18)) {
529				PCI_ERROR(("%s pcie corerev %d doesn't support >128B MRRS",
530					__FUNCTION__, pi->sih->buscorerev));
531				val = PCIE_CAP_DEVCTRL_MRRS_128B;
532			}
533		}
534
535		reg_val &= ~PCIE_CAP_DEVCTRL_MRRS_MASK;
536		reg_val |= (val << PCIE_CAP_DEVCTRL_MRRS_SHIFT) & PCIE_CAP_DEVCTRL_MRRS_MASK;
537
538		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
539		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
540	}
541	return reg_val;
542}
543
544static uint32
545pcie_devcontrol_mps(void *pch, uint32 mask, uint32 val)
546{
547	pcicore_info_t *pi = (pcicore_info_t *)pch;
548	uint32 reg_val;
549	uint8 offset;
550
551	offset = pi->pciecap_devctrl_offset;
552	if (!offset)
553		return 0;
554
555	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
556	/* set operation */
557	if (mask) {
558		reg_val &= ~PCIE_CAP_DEVCTRL_MPS_MASK;
559		reg_val |= (val << PCIE_CAP_DEVCTRL_MPS_SHIFT) & PCIE_CAP_DEVCTRL_MPS_MASK;
560
561		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
562		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
563	}
564	return reg_val;
565}
566
567uint8
568pcie_clkreq(void *pch, uint32 mask, uint32 val)
569{
570	pcicore_info_t *pi = (pcicore_info_t *)pch;
571	uint32 reg_val;
572	uint8 offset;
573
574	offset = pi->pciecap_lcreg_offset;
575	if (!offset)
576		return 0;
577
578	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
579	/* set operation */
580	if (mask) {
581		if (val)
582			reg_val |= PCIE_CLKREQ_ENAB;
583		else
584			reg_val &= ~PCIE_CLKREQ_ENAB;
585		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
586		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
587	}
588	if (reg_val & PCIE_CLKREQ_ENAB)
589		return 1;
590	else
591		return 0;
592}
593
594uint8
595pcie_ltrenable(void *pch, uint32 mask, uint32 val)
596{
597	pcicore_info_t *pi = (pcicore_info_t *)pch;
598	uint32 reg_val;
599	uint8 offset;
600
601	offset = pi->pciecap_devctrl2_offset;
602	if (!offset)
603		return 0;
604
605	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
606
607	/* set operation */
608	if (mask) {
609		if (val)
610			reg_val |= PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK;
611		else
612			reg_val &= ~PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK;
613		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
614		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
615	}
616	if (reg_val & PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK)
617		return 1;
618	else
619		return 0;
620}
621
622/* JIRA:SWWLAN-28745
623    val and return value:
624	0  Disabled
625	1  Enable using Message signaling[Var A]
626	2  Enable using Message signaling[Var B]
627	3  Enable using WAKE# signaling
628*/
629uint8
630pcie_obffenable(void *pch, uint32 mask, uint32 val)
631{
632	pcicore_info_t *pi = (pcicore_info_t *)pch;
633	uint32 reg_val;
634	uint8 offset;
635
636	offset = pi->pciecap_devctrl2_offset;
637	if (!offset)
638		return 0;
639
640	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
641
642	/* set operation */
643	if (mask) {
644		reg_val = (reg_val & ~PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK) |
645			((val << PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT) &
646			PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK);
647		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
648		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
649	}
650
651	return  (reg_val & PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK) >> PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT;
652}
653
654uint32
655pcie_ltr_reg(void *pch, uint32 reg, uint32 mask, uint32 val)
656{
657	pcicore_info_t *pi = (pcicore_info_t *)pch;
658	uint32 reg_val;
659	uint32 offset;
660
661	if (PCIE_GEN1(pi->sih))
662		return 0;
663
664	if (reg == PCIE_CAP_LTR0_REG)
665		offset = pi->pciecap_ltr0_reg_offset;
666	else if (reg == PCIE_CAP_LTR1_REG)
667		offset = pi->pciecap_ltr1_reg_offset;
668	else if (reg == PCIE_CAP_LTR2_REG)
669		offset = pi->pciecap_ltr2_reg_offset;
670	else {
671		PCI_ERROR(("pcie_ltr_reg: unsupported LTR register offset %d\n",
672			reg));
673		return 0;
674	}
675
676	if (!offset)
677		return 0;
678
679	if (mask) { /* set operation */
680		reg_val = val;
681		pcie_writereg(pi->sih, pi->regs.pcieregs, PCIE_CONFIGREGS, offset, reg_val);
682	}
683	else { /* get operation */
684		reg_val = pcie_readreg(pi->sih, pi->regs.pcieregs, PCIE_CONFIGREGS, offset);
685	}
686
687	return reg_val;
688}
689
690uint32
691pcieltrspacing_reg(void *pch, uint32 mask, uint32 val)
692{
693	pcicore_info_t *pi = (pcicore_info_t *)pch;
694	si_t *sih = pi->sih;
695	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
696	uint32 retval;
697
698	if (PCIE_GEN1(sih))
699		return 0;
700
701	ASSERT(pcieregs != NULL);
702
703	if (mask) { /* set operation */
704		retval = val;
705		W_REG(pi->osh, &(pcieregs->ltrspacing), val);
706	}
707	else { /* get operation */
708		retval = R_REG(pi->osh, &(pcieregs->ltrspacing));
709	}
710
711	return retval;
712}
713
714uint32
715pcieltrhysteresiscnt_reg(void *pch, uint32 mask, uint32 val)
716{
717	pcicore_info_t *pi = (pcicore_info_t *)pch;
718	si_t *sih = pi->sih;
719	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
720	uint32 retval;
721
722	if (PCIE_GEN1(sih))
723		return 0;
724
725	ASSERT(pcieregs != NULL);
726
727	if (mask) { /* set operation */
728		retval = val;
729		W_REG(pi->osh, &(pcieregs->ltrhysteresiscnt), val);
730	}
731	else { /* get operation */
732		retval = R_REG(pi->osh, &(pcieregs->ltrhysteresiscnt));
733	}
734
735	return retval;
736}
737
738static void
739pcie_extendL1timer(pcicore_info_t *pi, bool extend)
740{
741	uint32 w;
742	si_t *sih = pi->sih;
743	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
744
745	if (!PCIE_GEN1(sih))
746		return;
747
748	w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
749
750	if (extend && sih->buscorerev >= 7)
751		w |= PCIE_ASPMTIMER_EXTEND;
752	else
753		w &= ~PCIE_ASPMTIMER_EXTEND;
754	pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
755	w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
756}
757
758/** centralized clkreq control policy */
759static void
760pcie_clkreq_upd(pcicore_info_t *pi, uint state)
761{
762	si_t *sih = pi->sih;
763	ASSERT(PCIE(sih));
764
765	if (!PCIE_GEN1(sih))
766		return;
767
768	switch (state) {
769	case SI_DOATTACH:
770		if (PCIEGEN1_ASPM(sih))
771			pcie_clkreq((void *)pi, 1, 0);
772		break;
773	case SI_PCIDOWN:
774		if (sih->buscorerev == 6) {	/* turn on serdes PLL down */
775			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr),
776			           ~0, 0);
777			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data),
778			           ~0x40, 0);
779		} else if (pi->pcie_pr42767) {
780			pcie_clkreq((void *)pi, 1, 1);
781		}
782		break;
783	case SI_PCIUP:
784		if (sih->buscorerev == 6) {	/* turn off serdes PLL down */
785			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr),
786			           ~0, 0);
787			si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_data),
788			           ~0x40, 0x40);
789		} else if (PCIEGEN1_ASPM(sih)) {		/* disable clkreq */
790			pcie_clkreq((void *)pi, 1, 0);
791		}
792		break;
793	default:
794		ASSERT(0);
795		break;
796	}
797}
798
799/* ***** PCI core WARs ***** */
800/* Done only once at attach time */
801static void
802pcie_war_polarity(pcicore_info_t *pi)
803{
804	uint32 w;
805
806	if (pi->pcie_polarity != 0)
807		return;
808
809	w = pcie_readreg(pi->sih, pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
810
811	/* Detect the current polarity at attach and force that polarity and
812	 * disable changing the polarity
813	 */
814	if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
815		pi->pcie_polarity = (SERDES_RX_CTRL_FORCE);
816	else
817		pi->pcie_polarity = (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY);
818}
819
820/**
821 * enable ASPM and CLKREQ if srom doesn't have it.
822 * Needs to happen when update to shadow SROM is needed
823 *   : Coming out of 'standby'/'hibernate'
824 *   : If pcie_war_aspm_ovr state changed
825 */
826static void
827pcie_war_aspm_clkreq(pcicore_info_t *pi)
828{
829	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
830	si_t *sih = pi->sih;
831	uint16 val16, *reg16;
832	uint32 w;
833
834	if (!PCIEGEN1_ASPM(sih))
835		return;
836
837	/* bypass this on QT or VSIM */
838	if (!ISSIM_ENAB(sih)) {
839
840		reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
841		val16 = R_REG(pi->osh, reg16);
842
843		val16 &= ~SRSH_ASPM_ENB;
844		if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
845			val16 |= SRSH_ASPM_ENB;
846		else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
847			val16 |= SRSH_ASPM_L1_ENB;
848		else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
849			val16 |= SRSH_ASPM_L0s_ENB;
850
851		W_REG(pi->osh, reg16, val16);
852
853		w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32));
854		w &= ~PCIE_ASPM_ENAB;
855		w |= pi->pcie_war_aspm_ovr;
856		OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w);
857	}
858
859	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
860	val16 = R_REG(pi->osh, reg16);
861
862	if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
863		val16 |= SRSH_CLKREQ_ENB;
864		pi->pcie_pr42767 = TRUE;
865	} else
866		val16 &= ~SRSH_CLKREQ_ENB;
867
868	W_REG(pi->osh, reg16, val16);
869}
870
871static void
872pcie_war_pmebits(pcicore_info_t *pi)
873{
874	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
875	uint16 val16, *reg16;
876
877	if (pi->sih->buscorerev != 18 && pi->sih->buscorerev != 19)
878		return;
879
880	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV8];
881	val16 = R_REG(pi->osh, reg16);
882	if (val16 != pi->pmebits) {
883		PCI_ERROR(("pcie_war_pmebits: pmebits mismatch 0x%x (was 0x%x)\n",
884			val16, pi->pmebits));
885		pi->pmebits = 0x1f30;
886		W_REG(pi->osh, reg16, pi->pmebits);
887		val16 = R_REG(pi->osh, reg16);
888		PCI_ERROR(("pcie_war_pmebits: update pmebits to 0x%x\n", val16));
889	}
890}
891
892/** Apply the polarity determined at the start */
893/* Needs to happen when coming out of 'standby'/'hibernate' */
894static void
895pcie_war_serdes(pcicore_info_t *pi)
896{
897	uint32 w = 0;
898
899	if (pi->pcie_polarity != 0)
900		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL, pi->pcie_polarity);
901
902	pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
903	if (w & PLL_CTRL_FREQDET_EN) {
904		w &= ~PLL_CTRL_FREQDET_EN;
905		pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
906	}
907}
908
909/** Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
910/* Needs to happen when coming out of 'standby'/'hibernate' */
911static void
912BCMINITFN(pcie_misc_config_fixup)(pcicore_info_t *pi)
913{
914	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
915	uint16 val16, *reg16;
916
917	reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
918	val16 = R_REG(pi->osh, reg16);
919
920	if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
921		val16 |= SRSH_L23READY_EXIT_NOPERST;
922		W_REG(pi->osh, reg16, val16);
923	}
924}
925
926/* Needs to happen when coming out of 'standby'/'hibernate' */
927static void
928pcie_war_noplldown(pcicore_info_t *pi)
929{
930	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
931	uint16 *reg16;
932
933	ASSERT(pi->sih->buscorerev == 7);
934
935	/* turn off serdes PLL down */
936	si_corereg(pi->sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
937	           CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
938
939	/*  clear srom shadow backdoor */
940	reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
941	W_REG(pi->osh, reg16, 0);
942}
943
944/** Needs to happen when coming out of 'standby'/'hibernate' */
945static void
946pcie_war_pci_setup(pcicore_info_t *pi)
947{
948	si_t *sih = pi->sih;
949	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
950	uint32 w;
951
952	if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) {
953		w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG);
954		w |= 0x8;
955		pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_TLP_WORKAROUNDSREG, w);
956	}
957
958	if (sih->buscorerev == 1) {
959		w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
960		w |= (0x40);
961		pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
962	}
963
964	if (sih->buscorerev == 0) {
965		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
966		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
967		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
968	} else if (PCIEGEN1_ASPM(sih)) {
969		/* Change the L1 threshold for better performance */
970		w = pcie_readreg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
971		w &= ~(PCIE_L1THRESHOLDTIME_MASK);
972		w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
973		pcie_writereg(sih, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
974
975		pcie_war_serdes(pi);
976
977		pcie_war_aspm_clkreq(pi);
978	} else if (pi->sih->buscorerev == 7)
979		pcie_war_noplldown(pi);
980
981	/* Note that the fix is actually in the SROM, that's why this is open-ended */
982	if (pi->sih->buscorerev >= 6)
983		pcie_misc_config_fixup(pi);
984}
985
986void
987pcie_war_ovr_aspm_update(void *pch, uint8 aspm)
988{
989	pcicore_info_t *pi = (pcicore_info_t *)pch;
990
991	if (!PCIE_GEN1(pi->sih))
992		return;
993
994	if (!PCIEGEN1_ASPM(pi->sih))
995		return;
996
997	/* Validate */
998	if (aspm > PCIE_ASPM_ENAB)
999		return;
1000
1001	pi->pcie_war_aspm_ovr = aspm;
1002
1003	/* Update the current state */
1004	pcie_war_aspm_clkreq(pi);
1005}
1006
1007
1008void
1009pcie_power_save_enable(void *pch, bool enable)
1010{
1011	pcicore_info_t *pi = (pcicore_info_t *)pch;
1012
1013
1014	if (!pi)
1015		return;
1016
1017	pi->pcie_power_save = enable;
1018}
1019
1020static void
1021pcie_power_save_upd(pcicore_info_t *pi, bool up)
1022{
1023	si_t *sih = pi->sih;
1024
1025	if (!pi->pcie_power_save)
1026		return;
1027
1028
1029	if ((sih->buscorerev >= 15) && (sih->buscorerev <= 20)) {
1030
1031		pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT1, 1, 0x7F64);
1032
1033		if (up)
1034			pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x74);
1035		else
1036			pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x7C);
1037
1038	} else if ((sih->buscorerev >= 21) && (sih->buscorerev <= 22)) {
1039
1040		pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT1, 1, 0x7E65);
1041
1042		if (up)
1043			pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x175);
1044		else
1045			pcicore_pcieserdesreg(pi, MDIO_DEV_BLK1, BLK1_PWR_MGMT3, 1, 0x17D);
1046	}
1047}
1048
1049void
1050pcie_set_request_size(void *pch, uint16 size)
1051{
1052	pcicore_info_t *pi = (pcicore_info_t *)pch;
1053	si_t *sih;
1054
1055	if (!pi)
1056		return;
1057
1058	sih = pi->sih;
1059
1060	if (size == 128)
1061		pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B;
1062	else if (size == 256)
1063		pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_256B;
1064	else if (size == 512)
1065		pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_512B;
1066	else if (size == 1024)
1067		pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_1024B;
1068	else
1069		return;
1070
1071	if (PCIE_GEN1(sih)) {
1072		if (pi->sih->buscorerev == 18 || pi->sih->buscorerev == 19)
1073			pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK,
1074				(uint32)pi->pcie_reqsize);
1075	}
1076	else if (PCIE_GEN2(sih)) {
1077		pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, (uint32)pi->pcie_reqsize);
1078	}
1079	else
1080		ASSERT(0);
1081}
1082
1083uint16
1084pcie_get_request_size(void *pch)
1085{
1086	pcicore_info_t *pi = (pcicore_info_t *)pch;
1087
1088	if (!pi)
1089		return (0);
1090
1091	if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_128B)
1092		return (128);
1093	else if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_256B)
1094		return (256);
1095	else if (pi->pcie_reqsize == PCIE_CAP_DEVCTRL_MRRS_512B)
1096		return (512);
1097	return (0);
1098}
1099
1100void
1101pcie_set_maxpayload_size(void *pch, uint16 size)
1102{
1103	pcicore_info_t *pi = (pcicore_info_t *)pch;
1104
1105	if (!pi)
1106		return;
1107
1108	if (size == 128)
1109		pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_128B;
1110	else if (size == 256)
1111		pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_256B;
1112	else if (size == 512)
1113		pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_512B;
1114	else if (size == 1024)
1115		pi->pcie_mps = PCIE_CAP_DEVCTRL_MPS_1024B;
1116	else
1117		return;
1118
1119	pcie_devcontrol_mps(pi, PCIE_CAP_DEVCTRL_MPS_MASK, (uint32)pi->pcie_mps);
1120}
1121
1122uint16
1123pcie_get_maxpayload_size(void *pch)
1124{
1125	pcicore_info_t *pi = (pcicore_info_t *)pch;
1126
1127	if (!pi)
1128		return (0);
1129
1130	if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_128B)
1131		return (128);
1132	else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_256B)
1133		return (256);
1134	else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_512B)
1135		return (512);
1136	else if (pi->pcie_mps == PCIE_CAP_DEVCTRL_MPS_1024B)
1137		return (1024);
1138	return (0);
1139}
1140
1141void
1142pcie_disable_TL_clk_gating(void *pch)
1143{
1144	/* disable TL clk gating is located in bit 4 of PCIEControl (Offset 0x000) */
1145	pcicore_info_t *pi = (pcicore_info_t *)pch;
1146	si_t *sih = pi->sih;
1147
1148	if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih))
1149		return;
1150
1151	si_corereg(sih, sih->buscoreidx, 0, 0x10, 0x10);
1152}
1153
1154void
1155pcie_set_L1_entry_time(void *pch, uint32 val)
1156{
1157	/* L1 entry time is located in bits [22:16] of register 0x1004 (pdl_control_1) */
1158	pcicore_info_t *pi = (pcicore_info_t *)pch;
1159	si_t *sih = pi->sih;
1160	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1161	uint32 data;
1162
1163	if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih))
1164		return;
1165
1166	if (val > 0x7F)
1167		return;
1168
1169	data = pcie_readreg(sih, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_PDL_CTRL1);
1170	pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS,
1171		PCIECFGREG_PDL_CTRL1, (data & ~0x7F0000) | (val << 16));
1172}
1173
1174/** mode : 0 -- reset, 1 -- tx, 2 -- rx */
1175void
1176pcie_set_error_injection(void *pch, uint32 mode)
1177{
1178	/* through reg_phy_ctl_7 - 0x181c */
1179	pcicore_info_t *pi = (pcicore_info_t *)pch;
1180	si_t *sih = pi->sih;
1181	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1182
1183	if (!PCIE_GEN1(sih) && !PCIE_GEN2(sih))
1184		return;
1185
1186	if (mode == 0)
1187		pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0);
1188	else if (mode == 1)
1189		pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0x14031);
1190	else
1191		pcie_writereg(pch, pcieregs, PCIE_CONFIGREGS, PCIECFGREG_REG_PHY_CTL7, 0x2c031);
1192}
1193
1194void
1195pcie_set_L1substate(void *pch, uint32 substate)
1196{
1197	pcicore_info_t *pi = (pcicore_info_t *)pch;
1198	si_t *sih = pi->sih;
1199	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1200	uint32 data;
1201
1202	ASSERT(PCIE_GEN2(sih));
1203	ASSERT(substate <= 3);
1204
1205	if (substate != 0) {
1206		/* turn on ASPM L1 */
1207		data = pcie_readreg(sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset);
1208		pcie_writereg(sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset, data | 2);
1209
1210		/* enable LTR */
1211		pcie_ltrenable(pch, 1, 1);
1212	}
1213
1214	/* PML1_sub_control1 can only be accessed by OSL_PCI_xxxx_CONFIG */
1215	data = OSL_PCI_READ_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32)) & 0xfffffff0;
1216
1217	/* JIRA:SWWLAN-28455 */
1218	if (substate & 1)
1219		data |= PCI_PM_L1_2_ENA_MASK | ASPM_L1_2_ENA_MASK;
1220
1221	if (substate & 2)
1222		data |= PCI_PM_L1_1_ENA_MASK | ASPM_L1_1_ENA_MASK;
1223
1224	OSL_PCI_WRITE_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32), data);
1225}
1226
1227uint32
1228pcie_get_L1substate(void *pch)
1229{
1230	pcicore_info_t *pi = (pcicore_info_t *)pch;
1231	si_t *sih = pi->sih;
1232	uint32 data, substate = 0;
1233
1234	ASSERT(PCIE_GEN2(sih));
1235	UNUSED_PARAMETER(sih);
1236
1237	data = OSL_PCI_READ_CONFIG(pi->osh, PCIECFGREG_PML1_SUB_CTRL1, sizeof(uint32));
1238
1239	/* JIRA:SWWLAN-28455 */
1240	if (data & (PCI_PM_L1_2_ENA_MASK | ASPM_L1_2_ENA_MASK))
1241		substate |= 1;
1242
1243	if (data & (PCI_PM_L1_1_ENA_MASK | ASPM_L1_1_ENA_MASK))
1244		substate |= 2;
1245
1246	return substate;
1247}
1248
1249/* ***** Functions called during driver state changes ***** */
1250void
1251BCMATTACHFN(pcicore_attach)(void *pch, char *pvars, int state)
1252{
1253	pcicore_info_t *pi = (pcicore_info_t *)pch;
1254	si_t *sih = pi->sih;
1255
1256	if (!PCIE_GEN1(sih)) {
1257		if ((BCM4360_CHIP_ID == CHIPID(sih->chip)) ||
1258		    (BCM43460_CHIP_ID == CHIPID(sih->chip)) ||
1259		    (BCM4350_CHIP_ID == CHIPID(sih->chip)) ||
1260		    (BCM4352_CHIP_ID == CHIPID(sih->chip)) ||
1261		    (BCM4335_CHIP_ID == CHIPID(sih->chip)))
1262			pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_1024B;
1263		return;
1264	}
1265
1266	if (PCIEGEN1_ASPM(sih)) {
1267		if (((sih->boardvendor == VENDOR_APPLE) &&
1268		     ((uint8)getintvar(pvars, "sromrev") == 4) &&
1269		     ((uint8)getintvar(pvars, "boardrev") <= 0x71)) ||
1270		    ((uint32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR)) {
1271			pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
1272		} else {
1273			pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
1274		}
1275	}
1276
1277	pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B;
1278	if (BCM4331_CHIP_ID == CHIPID(sih->chip))
1279	    pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_512B;
1280
1281	bzero(pi->pcie_configspace, PCI_CONFIG_SPACE_SIZE);
1282
1283	/* These need to happen in this order only */
1284	pcie_war_polarity(pi);
1285
1286	pcie_war_serdes(pi);
1287
1288	pcie_war_aspm_clkreq(pi);
1289
1290	pcie_clkreq_upd(pi, state);
1291
1292	pcie_war_pmebits(pi);
1293
1294	/* Alter default TX drive strength setting */
1295	if (sih->boardvendor == VENDOR_APPLE) {
1296		if (sih->boardtype == 0x8d)
1297			/* change the TX drive strength to max */
1298			pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x7f);
1299		else if (PCIE_DRIVE_STRENGTH_OVERRIDE(sih))
1300			/* change the drive strength to 700mv */
1301			pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x70);
1302	}
1303}
1304
1305void
1306pcicore_hwup(void *pch)
1307{
1308	pcicore_info_t *pi = (pcicore_info_t *)pch;
1309
1310	if (!pi || !PCIE_GEN1(pi->sih))
1311		return;
1312
1313	pcie_power_save_upd(pi, TRUE);
1314
1315	if (pi->sih->boardtype == CB2_4321_BOARD || pi->sih->boardtype == CB2_4321_AG_BOARD)
1316		pcicore_fixlatencytimer(pch, 0x20);
1317
1318	pcie_war_pci_setup(pi);
1319
1320	/* Alter default TX drive strength setting */
1321	if (pi->sih->boardvendor == VENDOR_APPLE) {
1322		if (pi->sih->boardtype == 0x8d)
1323			/* change the TX drive strength to max */
1324			pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x7f);
1325		else if (PCIE_DRIVE_STRENGTH_OVERRIDE(pi->sih))
1326			/* change the drive strength to 700mv */
1327			pcicore_pcieserdesreg(pch, MDIO_DEV_TXCTRL0, 0x18, 0xff, 0x70);
1328	}
1329}
1330
1331void
1332pcicore_up(void *pch, int state)
1333{
1334	pcicore_info_t *pi = (pcicore_info_t *)pch;
1335
1336	if (!pi)
1337		return;
1338
1339	if (PCIE_GEN2(pi->sih)) {
1340		pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, pi->pcie_reqsize);
1341		return;
1342	}
1343
1344	pcie_power_save_upd(pi, TRUE);
1345
1346	/* Restore L1 timer for better performance */
1347	pcie_extendL1timer(pi, TRUE);
1348
1349	pcie_clkreq_upd(pi, state);
1350
1351	if (pi->sih->buscorerev == 18 ||
1352	    (pi->sih->buscorerev == 19 && !PCIE_MRRS_OVERRIDE(sih)))
1353		pi->pcie_reqsize = PCIE_CAP_DEVCTRL_MRRS_128B;
1354
1355	pcie_devcontrol_mrrs(pi, PCIE_CAP_DEVCTRL_MRRS_MASK, pi->pcie_reqsize);
1356}
1357
1358/** When the device is going to enter D3 state (or the system is going to enter S3/S4 states */
1359void
1360pcicore_sleep(void *pch)
1361{
1362	pcicore_info_t *pi = (pcicore_info_t *)pch;
1363	uint32 w;
1364
1365	if (!pi || !PCIE_GEN1(pi->sih))
1366		return;
1367
1368	pcie_power_save_upd(pi, FALSE);
1369
1370
1371	if (!PCIEGEN1_ASPM(pi->sih))
1372		return;
1373
1374
1375	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32));
1376	w &= ~PCIE_CAP_LCREG_ASPML1;
1377	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32), w);
1378
1379
1380	pi->pcie_pr42767 = FALSE;
1381}
1382
1383void
1384pcicore_down(void *pch, int state)
1385{
1386	pcicore_info_t *pi = (pcicore_info_t *)pch;
1387
1388	if (!pi || !PCIE_GEN1(pi->sih))
1389		return;
1390
1391	pcie_clkreq_upd(pi, state);
1392
1393	/* Reduce L1 timer for better power savings */
1394	pcie_extendL1timer(pi, FALSE);
1395
1396	pcie_power_save_upd(pi, FALSE);
1397}
1398
1399/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
1400/** Just uses PCI config accesses to find out, when needed before sb_attach is done */
1401bool
1402pcicore_pmecap_fast(osl_t *osh)
1403{
1404	uint8 cap_ptr;
1405	uint32 pmecap;
1406
1407	cap_ptr = pcicore_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL);
1408
1409	if (!cap_ptr)
1410		return FALSE;
1411
1412	pmecap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
1413
1414	return ((pmecap & PME_CAP_PM_STATES) != 0);
1415}
1416
1417/**
1418 * return TRUE if PM capability exists in the pci config space
1419 * Uses and caches the information using core handle
1420 */
1421static bool
1422pcicore_pmecap(pcicore_info_t *pi)
1423{
1424	uint8 cap_ptr;
1425	uint32 pmecap;
1426	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1427	uint16*reg16;
1428
1429	if (!pi->pmecap_offset) {
1430		cap_ptr = pcicore_find_pci_capability(pi->osh, PCI_CAP_POWERMGMTCAP_ID, NULL, NULL);
1431		if (!cap_ptr)
1432			return FALSE;
1433
1434		pi->pmecap_offset = cap_ptr;
1435
1436		reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV8];
1437		pi->pmebits = R_REG(pi->osh, reg16);
1438
1439		pmecap = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset, sizeof(uint32));
1440
1441		/* At least one state can generate PME */
1442		pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
1443	}
1444
1445	return (pi->pmecap);
1446}
1447
1448/** Enable PME generation */
1449void
1450pcicore_pmeen(void *pch)
1451{
1452	pcicore_info_t *pi = (pcicore_info_t *)pch;
1453	uint32 w;
1454
1455	/* if not pmecapable return */
1456	if (!pcicore_pmecap(pi))
1457		return;
1458
1459	pcie_war_pmebits(pi);
1460
1461	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1462	w |= (PME_CSR_PME_EN);
1463	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1464}
1465
1466/** Return TRUE if PME status set */
1467bool
1468pcicore_pmestat(void *pch)
1469{
1470	pcicore_info_t *pi = (pcicore_info_t *)pch;
1471	uint32 w;
1472
1473	if (!pcicore_pmecap(pi))
1474		return FALSE;
1475
1476	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1477
1478	return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
1479}
1480
1481void
1482pcicore_pmestatclr(void *pch)
1483{
1484	pcicore_info_t *pi = (pcicore_info_t *)pch;
1485	uint32 w;
1486
1487	if (!pcicore_pmecap(pi))
1488		return;
1489
1490	pcie_war_pmebits(pi);
1491	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1492
1493	PCI_ERROR(("pcicore_pmestatclr PMECSR : 0x%x\n", w));
1494
1495	/* Writing a 1 to PMESTAT will clear it */
1496	if ((w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT) {
1497		OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32),
1498			w);
1499	}
1500}
1501
1502/** Disable PME generation, clear the PME status bit if set */
1503void
1504pcicore_pmeclr(void *pch)
1505{
1506	pcicore_info_t *pi = (pcicore_info_t *)pch;
1507	uint32 w;
1508
1509	if (!pcicore_pmecap(pi))
1510		return;
1511
1512	pcie_war_pmebits(pi);
1513
1514	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32));
1515
1516	PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
1517
1518	/* PMESTAT is cleared by writing 1 to it */
1519	w &= ~(PME_CSR_PME_EN);
1520
1521	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET, sizeof(uint32), w);
1522}
1523
1524static void
1525pcicore_fixlatencytimer(pcicore_info_t* pch, uint8 timer_val)
1526{
1527	pcicore_info_t *pi = (pcicore_info_t *)pch;
1528	osl_t *osh;
1529	uint8 lattim;
1530
1531	osh = pi->osh;
1532	lattim = read_pci_cfg_byte(PCI_CFG_LATTIM);
1533
1534	if (!lattim) {
1535		PCI_ERROR(("%s: Modifying PCI_CFG_LATTIM from 0x%x to 0x%x\n",
1536		           __FUNCTION__, lattim, timer_val));
1537		write_pci_cfg_byte(PCI_CFG_LATTIM, timer_val);
1538	}
1539}
1540
1541uint32
1542pcie_lcreg(void *pch, uint32 mask, uint32 val)
1543{
1544	pcicore_info_t *pi = (pcicore_info_t *)pch;
1545	uint8 offset;
1546
1547	offset = pi->pciecap_lcreg_offset;
1548	if (!offset)
1549		return 0;
1550
1551	/* set operation */
1552	if (mask)
1553		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), val);
1554
1555	return OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
1556}
1557
1558#ifdef BCMDBG
1559void
1560pcicore_dump(void *pch, struct bcmstrbuf *b)
1561{
1562	pcicore_info_t *pi = (pcicore_info_t *)pch;
1563
1564	bcm_bprintf(b, "FORCEHT %d pcie_polarity 0x%x pcie_aspm_ovr 0x%x\n",
1565	            pi->sih->pci_pr32414, pi->pcie_polarity, pi->pcie_war_aspm_ovr);
1566}
1567#endif /* BCMDBG */
1568
1569uint32
1570pcicore_pciereg(void *pch, uint32 offset, uint32 mask, uint32 val, uint type)
1571{
1572	uint32 reg_val = 0;
1573	pcicore_info_t *pi = (pcicore_info_t *)pch;
1574	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1575
1576	if (mask) {
1577		PCI_ERROR(("PCIEREG: 0x%x writeval  0x%x\n", offset, val));
1578		pcie_writereg(pi->sih, pcieregs, type, offset, val);
1579	}
1580
1581	/* Should not read register 0x154 */
1582	if (PCIE_GEN1(pi->sih) &&
1583		pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11 && type == PCIE_PCIEREGS)
1584		return reg_val;
1585
1586	reg_val = pcie_readreg(pi->sih, pcieregs, type, offset);
1587	PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val));
1588
1589	return reg_val;
1590}
1591
1592uint32
1593pcicore_pcieserdesreg(void *pch, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val)
1594{
1595	uint32 reg_val = 0;
1596	pcicore_info_t *pi = (pcicore_info_t *)pch;
1597
1598	if (mask) {
1599		pcie_mdiowrite(pi, mdioslave, offset, val);
1600	}
1601
1602	if (pcie_mdioread(pi, mdioslave, offset, &reg_val))
1603		reg_val = 0xFFFFFFFF;
1604
1605	return reg_val;
1606}
1607
1608uint16
1609pcie_get_ssid(void* pch)
1610{
1611	uint32 ssid =
1612		OSL_PCI_READ_CONFIG(((pcicore_info_t *)pch)->osh, PCI_CFG_SVID, sizeof(uint32));
1613	return (uint16)(ssid >> 16);
1614}
1615
1616uint32
1617pcie_get_bar0(void* pch)
1618{
1619	return OSL_PCI_READ_CONFIG(((pcicore_info_t *)pch)->osh, PCI_CFG_BAR0, sizeof(uint32));
1620}
1621
1622int
1623pcie_configspace_cache(void* pch)
1624{
1625	pcicore_info_t *pi = (pcicore_info_t *)pch;
1626	uint offset = 0;
1627	uint32 *tmp = (uint32 *)pi->pcie_configspace;
1628
1629	while (offset < PCI_CONFIG_SPACE_SIZE) {
1630		*tmp++ = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
1631		offset += 4;
1632	}
1633	return 0;
1634}
1635
1636int
1637pcie_configspace_restore(void* pch)
1638{
1639	pcicore_info_t *pi = (pcicore_info_t *)pch;
1640	uint offset = 0;
1641	uint32 *tmp = (uint32 *)pi->pcie_configspace;
1642
1643	/* if config space was not buffered, than abort restore */
1644	if (*tmp == 0)
1645		return -1;
1646
1647	while (offset < PCI_CONFIG_SPACE_SIZE) {
1648		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), *tmp);
1649		tmp++;
1650		offset += 4;
1651	}
1652	return 0;
1653}
1654
1655int
1656pcie_configspace_get(void* pch, uint8 *buf, uint size)
1657{
1658	pcicore_info_t *pi = (pcicore_info_t *)pch;
1659	memcpy(buf, pi->pcie_configspace, size);
1660	return 0;
1661}
1662
1663uint32
1664pcie_get_link_speed(void* pch)
1665{
1666	pcicore_info_t *pi = (pcicore_info_t *)pch;
1667	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1668	uint32 data;
1669
1670	data = pcie_readreg(pi->sih, pcieregs, PCIE_CONFIGREGS, pi->pciecap_lcreg_offset);
1671	return (data & PCIE_LINKSPEED_MASK) >> PCIE_LINKSPEED_SHIFT;
1672}
1673
1674uint32
1675pcie_survive_perst(void* pch, uint32 mask, uint32 val)
1676{
1677#ifdef SURVIVE_PERST_ENAB
1678	pcicore_info_t *pi = (pcicore_info_t *)pch;
1679	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
1680	uint32 w;
1681
1682	/* mask and set */
1683	if (mask || val) {
1684		w = (R_REG(pi->osh, (&pcieregs->control)) & ~mask) | val;
1685		W_REG(pi->osh, (&pcieregs->control), w);
1686	}
1687	/* readback */
1688	return R_REG(pi->osh, (&pcieregs->control));
1689#else
1690	return 0;
1691#endif /* SURVIVE_PERST_ENAB */
1692}
1693
1694
1695#if defined(WLTEST) || defined(BCMDBG)
1696/* Dump PCIE Info */
1697int
1698pcicore_dump_pcieinfo(void *pch, struct bcmstrbuf *b)
1699{
1700	pcicore_info_t *pi = (pcicore_info_t *)pch;
1701
1702	if (!PCIE_GEN1(pi->sih) && !PCIE_GEN2(pi->sih))
1703		return BCME_ERROR;
1704
1705	bcm_bprintf(b, "PCIE link speed: %d\n", pcie_get_link_speed(pch));
1706	return 0;
1707}
1708#endif
1709