• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/edac/
1/*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e752x_chips" below for supported chipsets
8 *
9 * Written by Tom Zimmerman
10 *
11 * Contributors:
12 * 	Thayne Harbaugh at realmsys.com (?)
13 * 	Wang Zhenyu at intel.com
14 * 	Dave Jiang at mvista.com
15 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 Exp $
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/pci.h>
23#include <linux/pci_ids.h>
24#include <linux/edac.h>
25#include "edac_core.h"
26
27#define E752X_REVISION	" Ver: 2.0.2 " __DATE__
28#define EDAC_MOD_STR	"e752x_edac"
29
30static int report_non_memory_errors;
31static int force_function_unhide;
32static int sysbus_parity = -1;
33
34static struct edac_pci_ctl_info *e752x_pci;
35
36#define e752x_printk(level, fmt, arg...) \
37	edac_printk(level, "e752x", fmt, ##arg)
38
39#define e752x_mc_printk(mci, level, fmt, arg...) \
40	edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
41
42#ifndef PCI_DEVICE_ID_INTEL_7520_0
43#define PCI_DEVICE_ID_INTEL_7520_0      0x3590
44#endif				/* PCI_DEVICE_ID_INTEL_7520_0      */
45
46#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
47#define PCI_DEVICE_ID_INTEL_7520_1_ERR  0x3591
48#endif				/* PCI_DEVICE_ID_INTEL_7520_1_ERR  */
49
50#ifndef PCI_DEVICE_ID_INTEL_7525_0
51#define PCI_DEVICE_ID_INTEL_7525_0      0x359E
52#endif				/* PCI_DEVICE_ID_INTEL_7525_0      */
53
54#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
55#define PCI_DEVICE_ID_INTEL_7525_1_ERR  0x3593
56#endif				/* PCI_DEVICE_ID_INTEL_7525_1_ERR  */
57
58#ifndef PCI_DEVICE_ID_INTEL_7320_0
59#define PCI_DEVICE_ID_INTEL_7320_0	0x3592
60#endif				/* PCI_DEVICE_ID_INTEL_7320_0 */
61
62#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
63#define PCI_DEVICE_ID_INTEL_7320_1_ERR	0x3593
64#endif				/* PCI_DEVICE_ID_INTEL_7320_1_ERR */
65
66#ifndef PCI_DEVICE_ID_INTEL_3100_0
67#define PCI_DEVICE_ID_INTEL_3100_0	0x35B0
68#endif				/* PCI_DEVICE_ID_INTEL_3100_0 */
69
70#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
71#define PCI_DEVICE_ID_INTEL_3100_1_ERR	0x35B1
72#endif				/* PCI_DEVICE_ID_INTEL_3100_1_ERR */
73
74#define E752X_NR_CSROWS		8	/* number of csrows */
75
76/* E752X register addresses - device 0 function 0 */
77#define E752X_MCHSCRB		0x52	/* Memory Scrub register (16b) */
78					/*
79					 * 6:5     Scrub Completion Count
80					 * 3:2     Scrub Rate (i3100 only)
81					 *      01=fast 10=normal
82					 * 1:0     Scrub Mode enable
83					 *      00=off 10=on
84					 */
85#define E752X_DRB		0x60	/* DRAM row boundary register (8b) */
86#define E752X_DRA		0x70	/* DRAM row attribute register (8b) */
87					/*
88					 * 31:30   Device width row 7
89					 *      01=x8 10=x4 11=x8 DDR2
90					 * 27:26   Device width row 6
91					 * 23:22   Device width row 5
92					 * 19:20   Device width row 4
93					 * 15:14   Device width row 3
94					 * 11:10   Device width row 2
95					 *  7:6    Device width row 1
96					 *  3:2    Device width row 0
97					 */
98#define E752X_DRC		0x7C	/* DRAM controller mode reg (32b) */
99					/*
100					 * 22    Number channels 0=1,1=2
101					 * 19:18 DRB Granularity 32/64MB
102					 */
103#define E752X_DRM		0x80	/* Dimm mapping register */
104#define E752X_DDRCSR		0x9A	/* DDR control and status reg (16b) */
105					/*
106					 * 14:12 1 single A, 2 single B, 3 dual
107					 */
108#define E752X_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
109#define E752X_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
110#define E752X_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
111#define E752X_REMAPOFFSET	0xCA	/* DRAM remap limit offset reg (16b) */
112
113/* E752X register addresses - device 0 function 1 */
114#define E752X_FERR_GLOBAL	0x40	/* Global first error register (32b) */
115#define E752X_NERR_GLOBAL	0x44	/* Global next error register (32b) */
116#define E752X_HI_FERR		0x50	/* Hub interface first error reg (8b) */
117#define E752X_HI_NERR		0x52	/* Hub interface next error reg (8b) */
118#define E752X_HI_ERRMASK	0x54	/* Hub interface error mask reg (8b) */
119#define E752X_HI_SMICMD		0x5A	/* Hub interface SMI command reg (8b) */
120#define E752X_SYSBUS_FERR	0x60	/* System buss first error reg (16b) */
121#define E752X_SYSBUS_NERR	0x62	/* System buss next error reg (16b) */
122#define E752X_SYSBUS_ERRMASK	0x64	/* System buss error mask reg (16b) */
123#define E752X_SYSBUS_SMICMD	0x6A	/* System buss SMI command reg (16b) */
124#define E752X_BUF_FERR		0x70	/* Memory buffer first error reg (8b) */
125#define E752X_BUF_NERR		0x72	/* Memory buffer next error reg (8b) */
126#define E752X_BUF_ERRMASK	0x74	/* Memory buffer error mask reg (8b) */
127#define E752X_BUF_SMICMD	0x7A	/* Memory buffer SMI cmd reg (8b) */
128#define E752X_DRAM_FERR		0x80	/* DRAM first error register (16b) */
129#define E752X_DRAM_NERR		0x82	/* DRAM next error register (16b) */
130#define E752X_DRAM_ERRMASK	0x84	/* DRAM error mask register (8b) */
131#define E752X_DRAM_SMICMD	0x8A	/* DRAM SMI command register (8b) */
132#define E752X_DRAM_RETR_ADD	0xAC	/* DRAM Retry address register (32b) */
133#define E752X_DRAM_SEC1_ADD	0xA0	/* DRAM first correctable memory */
134					/*     error address register (32b) */
135					/*
136					 * 31    Reserved
137					 * 30:2  CE address (64 byte block 34:6
138					 * 1     Reserved
139					 * 0     HiLoCS
140					 */
141#define E752X_DRAM_SEC2_ADD	0xC8	/* DRAM first correctable memory */
142					/*     error address register (32b) */
143					/*
144					 * 31    Reserved
145					 * 30:2  CE address (64 byte block 34:6)
146					 * 1     Reserved
147					 * 0     HiLoCS
148					 */
149#define E752X_DRAM_DED_ADD	0xA4	/* DRAM first uncorrectable memory */
150					/*     error address register (32b) */
151					/*
152					 * 31    Reserved
153					 * 30:2  CE address (64 byte block 34:6)
154					 * 1     Reserved
155					 * 0     HiLoCS
156					 */
157#define E752X_DRAM_SCRB_ADD	0xA8	/* DRAM 1st uncorrectable scrub mem */
158					/*     error address register (32b) */
159					/*
160					 * 31    Reserved
161					 * 30:2  CE address (64 byte block 34:6
162					 * 1     Reserved
163					 * 0     HiLoCS
164					 */
165#define E752X_DRAM_SEC1_SYNDROME 0xC4	/* DRAM first correctable memory */
166					/*     error syndrome register (16b) */
167#define E752X_DRAM_SEC2_SYNDROME 0xC6	/* DRAM second correctable memory */
168					/*     error syndrome register (16b) */
169#define E752X_DEVPRES1		0xF4	/* Device Present 1 register (8b) */
170
171/* 3100 IMCH specific register addresses - device 0 function 1 */
172#define I3100_NSI_FERR		0x48	/* NSI first error reg (32b) */
173#define I3100_NSI_NERR		0x4C	/* NSI next error reg (32b) */
174#define I3100_NSI_SMICMD	0x54	/* NSI SMI command register (32b) */
175#define I3100_NSI_EMASK		0x90	/* NSI error mask register (32b) */
176
177/* ICH5R register addresses - device 30 function 0 */
178#define ICH5R_PCI_STAT		0x06	/* PCI status register (16b) */
179#define ICH5R_PCI_2ND_STAT	0x1E	/* PCI status secondary reg (16b) */
180#define ICH5R_PCI_BRIDGE_CTL	0x3E	/* PCI bridge control register (16b) */
181
182enum e752x_chips {
183	E7520 = 0,
184	E7525 = 1,
185	E7320 = 2,
186	I3100 = 3
187};
188
189struct e752x_pvt {
190	struct pci_dev *bridge_ck;
191	struct pci_dev *dev_d0f0;
192	struct pci_dev *dev_d0f1;
193	u32 tolm;
194	u32 remapbase;
195	u32 remaplimit;
196	int mc_symmetric;
197	u8 map[8];
198	int map_type;
199	const struct e752x_dev_info *dev_info;
200};
201
202struct e752x_dev_info {
203	u16 err_dev;
204	u16 ctl_dev;
205	const char *ctl_name;
206};
207
208struct e752x_error_info {
209	u32 ferr_global;
210	u32 nerr_global;
211	u32 nsi_ferr;	/* 3100 only */
212	u32 nsi_nerr;	/* 3100 only */
213	u8 hi_ferr;	/* all but 3100 */
214	u8 hi_nerr;	/* all but 3100 */
215	u16 sysbus_ferr;
216	u16 sysbus_nerr;
217	u8 buf_ferr;
218	u8 buf_nerr;
219	u16 dram_ferr;
220	u16 dram_nerr;
221	u32 dram_sec1_add;
222	u32 dram_sec2_add;
223	u16 dram_sec1_syndrome;
224	u16 dram_sec2_syndrome;
225	u32 dram_ded_add;
226	u32 dram_scrb_add;
227	u32 dram_retr_add;
228};
229
230static const struct e752x_dev_info e752x_devs[] = {
231	[E7520] = {
232		.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
233		.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
234		.ctl_name = "E7520"},
235	[E7525] = {
236		.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
237		.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
238		.ctl_name = "E7525"},
239	[E7320] = {
240		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
241		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
242		.ctl_name = "E7320"},
243	[I3100] = {
244		.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
245		.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
246		.ctl_name = "3100"},
247};
248
249/* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
250 * map the scrubbing bandwidth to a hardware register value. The 'set'
251 * operation finds the 'matching or higher value'.  Note that scrubbing
252 * on the e752x can only be enabled/disabled.  The 3100 supports
253 * a normal and fast mode.
254 */
255
256#define SDRATE_EOT 0xFFFFFFFF
257
258struct scrubrate {
259	u32 bandwidth;	/* bandwidth consumed by scrubbing in bytes/sec */
260	u16 scrubval;	/* register value for scrub rate */
261};
262
263/* Rate below assumes same performance as i3100 using PC3200 DDR2 in
264 * normal mode.  e752x bridges don't support choosing normal or fast mode,
265 * so the scrubbing bandwidth value isn't all that important - scrubbing is
266 * either on or off.
267 */
268static const struct scrubrate scrubrates_e752x[] = {
269	{0,		0x00},	/* Scrubbing Off */
270	{500000,	0x02},	/* Scrubbing On */
271	{SDRATE_EOT,	0x00}	/* End of Table */
272};
273
274/* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
275 * Normal mode: 125 (32000 / 256) times slower than fast mode.
276 */
277static const struct scrubrate scrubrates_i3100[] = {
278	{0,		0x00},	/* Scrubbing Off */
279	{500000,	0x0a},	/* Normal mode - 32k clocks */
280	{62500000,	0x06},	/* Fast mode - 256 clocks */
281	{SDRATE_EOT,	0x00}	/* End of Table */
282};
283
284static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
285				unsigned long page)
286{
287	u32 remap;
288	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
289
290	debugf3("%s()\n", __func__);
291
292	if (page < pvt->tolm)
293		return page;
294
295	if ((page >= 0x100000) && (page < pvt->remapbase))
296		return page;
297
298	remap = (page - pvt->tolm) + pvt->remapbase;
299
300	if (remap < pvt->remaplimit)
301		return remap;
302
303	e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
304	return pvt->tolm - 1;
305}
306
307static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
308			u32 sec1_add, u16 sec1_syndrome)
309{
310	u32 page;
311	int row;
312	int channel;
313	int i;
314	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
315
316	debugf3("%s()\n", __func__);
317
318	/* convert the addr to 4k page */
319	page = sec1_add >> (PAGE_SHIFT - 4);
320
321	if (pvt->mc_symmetric) {
322		/* chip select are bits 14 & 13 */
323		row = ((page >> 1) & 3);
324		e752x_printk(KERN_WARNING,
325			"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
326			pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
327			pvt->map[4], pvt->map[5], pvt->map[6],
328			pvt->map[7]);
329
330		/* test for channel remapping */
331		for (i = 0; i < 8; i++) {
332			if (pvt->map[i] == row)
333				break;
334		}
335
336		e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
337
338		if (i < 8)
339			row = i;
340		else
341			e752x_mc_printk(mci, KERN_WARNING,
342					"row %d not found in remap table\n",
343					row);
344	} else
345		row = edac_mc_find_csrow_by_page(mci, page);
346
347	/* 0 = channel A, 1 = channel B */
348	channel = !(error_one & 1);
349
350	/* e752x mc reads 34:6 of the DRAM linear address */
351	edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
352			sec1_syndrome, row, channel, "e752x CE");
353}
354
355static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
356			u32 sec1_add, u16 sec1_syndrome, int *error_found,
357			int handle_error)
358{
359	*error_found = 1;
360
361	if (handle_error)
362		do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
363}
364
365static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
366			u32 ded_add, u32 scrb_add)
367{
368	u32 error_2b, block_page;
369	int row;
370	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
371
372	debugf3("%s()\n", __func__);
373
374	if (error_one & 0x0202) {
375		error_2b = ded_add;
376
377		/* convert to 4k address */
378		block_page = error_2b >> (PAGE_SHIFT - 4);
379
380		row = pvt->mc_symmetric ?
381		/* chip select are bits 14 & 13 */
382			((block_page >> 1) & 3) :
383			edac_mc_find_csrow_by_page(mci, block_page);
384
385		/* e752x mc reads 34:6 of the DRAM linear address */
386		edac_mc_handle_ue(mci, block_page,
387				offset_in_page(error_2b << 4),
388				row, "e752x UE from Read");
389	}
390	if (error_one & 0x0404) {
391		error_2b = scrb_add;
392
393		/* convert to 4k address */
394		block_page = error_2b >> (PAGE_SHIFT - 4);
395
396		row = pvt->mc_symmetric ?
397		/* chip select are bits 14 & 13 */
398			((block_page >> 1) & 3) :
399			edac_mc_find_csrow_by_page(mci, block_page);
400
401		/* e752x mc reads 34:6 of the DRAM linear address */
402		edac_mc_handle_ue(mci, block_page,
403				offset_in_page(error_2b << 4),
404				row, "e752x UE from Scruber");
405	}
406}
407
408static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
409			u32 ded_add, u32 scrb_add, int *error_found,
410			int handle_error)
411{
412	*error_found = 1;
413
414	if (handle_error)
415		do_process_ue(mci, error_one, ded_add, scrb_add);
416}
417
418static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
419					 int *error_found, int handle_error)
420{
421	*error_found = 1;
422
423	if (!handle_error)
424		return;
425
426	debugf3("%s()\n", __func__);
427	edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
428}
429
430static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
431				 u32 retry_add)
432{
433	u32 error_1b, page;
434	int row;
435	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
436
437	error_1b = retry_add;
438	page = error_1b >> (PAGE_SHIFT - 4);  /* convert the addr to 4k page */
439
440	/* chip select are bits 14 & 13 */
441	row = pvt->mc_symmetric ? ((page >> 1) & 3) :
442		edac_mc_find_csrow_by_page(mci, page);
443
444	e752x_mc_printk(mci, KERN_WARNING,
445			"CE page 0x%lx, row %d : Memory read retry\n",
446			(long unsigned int)page, row);
447}
448
449static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
450				u32 retry_add, int *error_found,
451				int handle_error)
452{
453	*error_found = 1;
454
455	if (handle_error)
456		do_process_ded_retry(mci, error, retry_add);
457}
458
459static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
460					int *error_found, int handle_error)
461{
462	*error_found = 1;
463
464	if (handle_error)
465		e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
466}
467
468static char *global_message[11] = {
469	"PCI Express C1",
470	"PCI Express C",
471	"PCI Express B1",
472	"PCI Express B",
473	"PCI Express A1",
474	"PCI Express A",
475	"DMA Controller",
476	"HUB or NS Interface",
477	"System Bus",
478	"DRAM Controller",  /* 9th entry */
479	"Internal Buffer"
480};
481
482#define DRAM_ENTRY	9
483
484static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
485
486static void do_global_error(int fatal, u32 errors)
487{
488	int i;
489
490	for (i = 0; i < 11; i++) {
491		if (errors & (1 << i)) {
492			/* If the error is from DRAM Controller OR
493			 * we are to report ALL errors, then
494			 * report the error
495			 */
496			if ((i == DRAM_ENTRY) || report_non_memory_errors)
497				e752x_printk(KERN_WARNING, "%sError %s\n",
498					fatal_message[fatal],
499					global_message[i]);
500		}
501	}
502}
503
504static inline void global_error(int fatal, u32 errors, int *error_found,
505				int handle_error)
506{
507	*error_found = 1;
508
509	if (handle_error)
510		do_global_error(fatal, errors);
511}
512
513static char *hub_message[7] = {
514	"HI Address or Command Parity", "HI Illegal Access",
515	"HI Internal Parity", "Out of Range Access",
516	"HI Data Parity", "Enhanced Config Access",
517	"Hub Interface Target Abort"
518};
519
520static void do_hub_error(int fatal, u8 errors)
521{
522	int i;
523
524	for (i = 0; i < 7; i++) {
525		if (errors & (1 << i))
526			e752x_printk(KERN_WARNING, "%sError %s\n",
527				fatal_message[fatal], hub_message[i]);
528	}
529}
530
531static inline void hub_error(int fatal, u8 errors, int *error_found,
532			int handle_error)
533{
534	*error_found = 1;
535
536	if (handle_error)
537		do_hub_error(fatal, errors);
538}
539
540#define NSI_FATAL_MASK		0x0c080081
541#define NSI_NON_FATAL_MASK	0x23a0ba64
542#define NSI_ERR_MASK		(NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
543
544static char *nsi_message[30] = {
545	"NSI Link Down",	/* NSI_FERR/NSI_NERR bit 0, fatal error */
546	"",						/* reserved */
547	"NSI Parity Error",				/* bit 2, non-fatal */
548	"",						/* reserved */
549	"",						/* reserved */
550	"Correctable Error Message",			/* bit 5, non-fatal */
551	"Non-Fatal Error Message",			/* bit 6, non-fatal */
552	"Fatal Error Message",				/* bit 7, fatal */
553	"",						/* reserved */
554	"Receiver Error",				/* bit 9, non-fatal */
555	"",						/* reserved */
556	"Bad TLP",					/* bit 11, non-fatal */
557	"Bad DLLP",					/* bit 12, non-fatal */
558	"REPLAY_NUM Rollover",				/* bit 13, non-fatal */
559	"",						/* reserved */
560	"Replay Timer Timeout",				/* bit 15, non-fatal */
561	"",						/* reserved */
562	"",						/* reserved */
563	"",						/* reserved */
564	"Data Link Protocol Error",			/* bit 19, fatal */
565	"",						/* reserved */
566	"Poisoned TLP",					/* bit 21, non-fatal */
567	"",						/* reserved */
568	"Completion Timeout",				/* bit 23, non-fatal */
569	"Completer Abort",				/* bit 24, non-fatal */
570	"Unexpected Completion",			/* bit 25, non-fatal */
571	"Receiver Overflow",				/* bit 26, fatal */
572	"Malformed TLP",				/* bit 27, fatal */
573	"",						/* reserved */
574	"Unsupported Request"				/* bit 29, non-fatal */
575};
576
577static void do_nsi_error(int fatal, u32 errors)
578{
579	int i;
580
581	for (i = 0; i < 30; i++) {
582		if (errors & (1 << i))
583			printk(KERN_WARNING "%sError %s\n",
584			       fatal_message[fatal], nsi_message[i]);
585	}
586}
587
588static inline void nsi_error(int fatal, u32 errors, int *error_found,
589		int handle_error)
590{
591	*error_found = 1;
592
593	if (handle_error)
594		do_nsi_error(fatal, errors);
595}
596
597static char *membuf_message[4] = {
598	"Internal PMWB to DRAM parity",
599	"Internal PMWB to System Bus Parity",
600	"Internal System Bus or IO to PMWB Parity",
601	"Internal DRAM to PMWB Parity"
602};
603
604static void do_membuf_error(u8 errors)
605{
606	int i;
607
608	for (i = 0; i < 4; i++) {
609		if (errors & (1 << i))
610			e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
611				membuf_message[i]);
612	}
613}
614
615static inline void membuf_error(u8 errors, int *error_found, int handle_error)
616{
617	*error_found = 1;
618
619	if (handle_error)
620		do_membuf_error(errors);
621}
622
623static char *sysbus_message[10] = {
624	"Addr or Request Parity",
625	"Data Strobe Glitch",
626	"Addr Strobe Glitch",
627	"Data Parity",
628	"Addr Above TOM",
629	"Non DRAM Lock Error",
630	"MCERR", "BINIT",
631	"Memory Parity",
632	"IO Subsystem Parity"
633};
634
635static void do_sysbus_error(int fatal, u32 errors)
636{
637	int i;
638
639	for (i = 0; i < 10; i++) {
640		if (errors & (1 << i))
641			e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
642				fatal_message[fatal], sysbus_message[i]);
643	}
644}
645
646static inline void sysbus_error(int fatal, u32 errors, int *error_found,
647				int handle_error)
648{
649	*error_found = 1;
650
651	if (handle_error)
652		do_sysbus_error(fatal, errors);
653}
654
655static void e752x_check_hub_interface(struct e752x_error_info *info,
656				int *error_found, int handle_error)
657{
658	u8 stat8;
659
660	//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
661
662	stat8 = info->hi_ferr;
663
664	if (stat8 & 0x7f) {	/* Error, so process */
665		stat8 &= 0x7f;
666
667		if (stat8 & 0x2b)
668			hub_error(1, stat8 & 0x2b, error_found, handle_error);
669
670		if (stat8 & 0x54)
671			hub_error(0, stat8 & 0x54, error_found, handle_error);
672	}
673	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
674
675	stat8 = info->hi_nerr;
676
677	if (stat8 & 0x7f) {	/* Error, so process */
678		stat8 &= 0x7f;
679
680		if (stat8 & 0x2b)
681			hub_error(1, stat8 & 0x2b, error_found, handle_error);
682
683		if (stat8 & 0x54)
684			hub_error(0, stat8 & 0x54, error_found, handle_error);
685	}
686}
687
688static void e752x_check_ns_interface(struct e752x_error_info *info,
689				int *error_found, int handle_error)
690{
691	u32 stat32;
692
693	stat32 = info->nsi_ferr;
694	if (stat32 & NSI_ERR_MASK) { /* Error, so process */
695		if (stat32 & NSI_FATAL_MASK)	/* check for fatal errors */
696			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
697				  handle_error);
698		if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
699			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
700				  handle_error);
701	}
702	stat32 = info->nsi_nerr;
703	if (stat32 & NSI_ERR_MASK) {
704		if (stat32 & NSI_FATAL_MASK)
705			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
706				  handle_error);
707		if (stat32 & NSI_NON_FATAL_MASK)
708			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
709				  handle_error);
710	}
711}
712
713static void e752x_check_sysbus(struct e752x_error_info *info,
714			int *error_found, int handle_error)
715{
716	u32 stat32, error32;
717
718	//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
719	stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
720
721	if (stat32 == 0)
722		return;		/* no errors */
723
724	error32 = (stat32 >> 16) & 0x3ff;
725	stat32 = stat32 & 0x3ff;
726
727	if (stat32 & 0x087)
728		sysbus_error(1, stat32 & 0x087, error_found, handle_error);
729
730	if (stat32 & 0x378)
731		sysbus_error(0, stat32 & 0x378, error_found, handle_error);
732
733	if (error32 & 0x087)
734		sysbus_error(1, error32 & 0x087, error_found, handle_error);
735
736	if (error32 & 0x378)
737		sysbus_error(0, error32 & 0x378, error_found, handle_error);
738}
739
740static void e752x_check_membuf(struct e752x_error_info *info,
741			int *error_found, int handle_error)
742{
743	u8 stat8;
744
745	stat8 = info->buf_ferr;
746
747	if (stat8 & 0x0f) {	/* Error, so process */
748		stat8 &= 0x0f;
749		membuf_error(stat8, error_found, handle_error);
750	}
751
752	stat8 = info->buf_nerr;
753
754	if (stat8 & 0x0f) {	/* Error, so process */
755		stat8 &= 0x0f;
756		membuf_error(stat8, error_found, handle_error);
757	}
758}
759
760static void e752x_check_dram(struct mem_ctl_info *mci,
761			struct e752x_error_info *info, int *error_found,
762			int handle_error)
763{
764	u16 error_one, error_next;
765
766	error_one = info->dram_ferr;
767	error_next = info->dram_nerr;
768
769	/* decode and report errors */
770	if (error_one & 0x0101)	/* check first error correctable */
771		process_ce(mci, error_one, info->dram_sec1_add,
772			info->dram_sec1_syndrome, error_found, handle_error);
773
774	if (error_next & 0x0101)	/* check next error correctable */
775		process_ce(mci, error_next, info->dram_sec2_add,
776			info->dram_sec2_syndrome, error_found, handle_error);
777
778	if (error_one & 0x4040)
779		process_ue_no_info_wr(mci, error_found, handle_error);
780
781	if (error_next & 0x4040)
782		process_ue_no_info_wr(mci, error_found, handle_error);
783
784	if (error_one & 0x2020)
785		process_ded_retry(mci, error_one, info->dram_retr_add,
786				error_found, handle_error);
787
788	if (error_next & 0x2020)
789		process_ded_retry(mci, error_next, info->dram_retr_add,
790				error_found, handle_error);
791
792	if (error_one & 0x0808)
793		process_threshold_ce(mci, error_one, error_found, handle_error);
794
795	if (error_next & 0x0808)
796		process_threshold_ce(mci, error_next, error_found,
797				handle_error);
798
799	if (error_one & 0x0606)
800		process_ue(mci, error_one, info->dram_ded_add,
801			info->dram_scrb_add, error_found, handle_error);
802
803	if (error_next & 0x0606)
804		process_ue(mci, error_next, info->dram_ded_add,
805			info->dram_scrb_add, error_found, handle_error);
806}
807
808static void e752x_get_error_info(struct mem_ctl_info *mci,
809				 struct e752x_error_info *info)
810{
811	struct pci_dev *dev;
812	struct e752x_pvt *pvt;
813
814	memset(info, 0, sizeof(*info));
815	pvt = (struct e752x_pvt *)mci->pvt_info;
816	dev = pvt->dev_d0f1;
817	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
818
819	if (info->ferr_global) {
820		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
821			pci_read_config_dword(dev, I3100_NSI_FERR,
822					     &info->nsi_ferr);
823			info->hi_ferr = 0;
824		} else {
825			pci_read_config_byte(dev, E752X_HI_FERR,
826					     &info->hi_ferr);
827			info->nsi_ferr = 0;
828		}
829		pci_read_config_word(dev, E752X_SYSBUS_FERR,
830				&info->sysbus_ferr);
831		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
832		pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
833		pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
834				&info->dram_sec1_add);
835		pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
836				&info->dram_sec1_syndrome);
837		pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
838				&info->dram_ded_add);
839		pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
840				&info->dram_scrb_add);
841		pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
842				&info->dram_retr_add);
843
844		/* ignore the reserved bits just in case */
845		if (info->hi_ferr & 0x7f)
846			pci_write_config_byte(dev, E752X_HI_FERR,
847					info->hi_ferr);
848
849		if (info->nsi_ferr & NSI_ERR_MASK)
850			pci_write_config_dword(dev, I3100_NSI_FERR,
851					info->nsi_ferr);
852
853		if (info->sysbus_ferr)
854			pci_write_config_word(dev, E752X_SYSBUS_FERR,
855					info->sysbus_ferr);
856
857		if (info->buf_ferr & 0x0f)
858			pci_write_config_byte(dev, E752X_BUF_FERR,
859					info->buf_ferr);
860
861		if (info->dram_ferr)
862			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
863					 info->dram_ferr, info->dram_ferr);
864
865		pci_write_config_dword(dev, E752X_FERR_GLOBAL,
866				info->ferr_global);
867	}
868
869	pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
870
871	if (info->nerr_global) {
872		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
873			pci_read_config_dword(dev, I3100_NSI_NERR,
874					     &info->nsi_nerr);
875			info->hi_nerr = 0;
876		} else {
877			pci_read_config_byte(dev, E752X_HI_NERR,
878					     &info->hi_nerr);
879			info->nsi_nerr = 0;
880		}
881		pci_read_config_word(dev, E752X_SYSBUS_NERR,
882				&info->sysbus_nerr);
883		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
884		pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
885		pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
886				&info->dram_sec2_add);
887		pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
888				&info->dram_sec2_syndrome);
889
890		if (info->hi_nerr & 0x7f)
891			pci_write_config_byte(dev, E752X_HI_NERR,
892					info->hi_nerr);
893
894		if (info->nsi_nerr & NSI_ERR_MASK)
895			pci_write_config_dword(dev, I3100_NSI_NERR,
896					info->nsi_nerr);
897
898		if (info->sysbus_nerr)
899			pci_write_config_word(dev, E752X_SYSBUS_NERR,
900					info->sysbus_nerr);
901
902		if (info->buf_nerr & 0x0f)
903			pci_write_config_byte(dev, E752X_BUF_NERR,
904					info->buf_nerr);
905
906		if (info->dram_nerr)
907			pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
908					 info->dram_nerr, info->dram_nerr);
909
910		pci_write_config_dword(dev, E752X_NERR_GLOBAL,
911				info->nerr_global);
912	}
913}
914
915static int e752x_process_error_info(struct mem_ctl_info *mci,
916				struct e752x_error_info *info,
917				int handle_errors)
918{
919	u32 error32, stat32;
920	int error_found;
921
922	error_found = 0;
923	error32 = (info->ferr_global >> 18) & 0x3ff;
924	stat32 = (info->ferr_global >> 4) & 0x7ff;
925
926	if (error32)
927		global_error(1, error32, &error_found, handle_errors);
928
929	if (stat32)
930		global_error(0, stat32, &error_found, handle_errors);
931
932	error32 = (info->nerr_global >> 18) & 0x3ff;
933	stat32 = (info->nerr_global >> 4) & 0x7ff;
934
935	if (error32)
936		global_error(1, error32, &error_found, handle_errors);
937
938	if (stat32)
939		global_error(0, stat32, &error_found, handle_errors);
940
941	e752x_check_hub_interface(info, &error_found, handle_errors);
942	e752x_check_ns_interface(info, &error_found, handle_errors);
943	e752x_check_sysbus(info, &error_found, handle_errors);
944	e752x_check_membuf(info, &error_found, handle_errors);
945	e752x_check_dram(mci, info, &error_found, handle_errors);
946	return error_found;
947}
948
949static void e752x_check(struct mem_ctl_info *mci)
950{
951	struct e752x_error_info info;
952
953	debugf3("%s()\n", __func__);
954	e752x_get_error_info(mci, &info);
955	e752x_process_error_info(mci, &info, 1);
956}
957
958/* Program byte/sec bandwidth scrub rate to hardware */
959static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
960{
961	const struct scrubrate *scrubrates;
962	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
963	struct pci_dev *pdev = pvt->dev_d0f0;
964	int i;
965
966	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
967		scrubrates = scrubrates_i3100;
968	else
969		scrubrates = scrubrates_e752x;
970
971	/* Translate the desired scrub rate to a e752x/3100 register value.
972	 * Search for the bandwidth that is equal or greater than the
973	 * desired rate and program the cooresponding register value.
974	 */
975	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
976		if (scrubrates[i].bandwidth >= new_bw)
977			break;
978
979	if (scrubrates[i].bandwidth == SDRATE_EOT)
980		return -1;
981
982	pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
983
984	return 0;
985}
986
987/* Convert current scrub rate value into byte/sec bandwidth */
988static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
989{
990	const struct scrubrate *scrubrates;
991	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
992	struct pci_dev *pdev = pvt->dev_d0f0;
993	u16 scrubval;
994	int i;
995
996	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
997		scrubrates = scrubrates_i3100;
998	else
999		scrubrates = scrubrates_e752x;
1000
1001	/* Find the bandwidth matching the memory scrubber configuration */
1002	pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1003	scrubval = scrubval & 0x0f;
1004
1005	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1006		if (scrubrates[i].scrubval == scrubval)
1007			break;
1008
1009	if (scrubrates[i].bandwidth == SDRATE_EOT) {
1010		e752x_printk(KERN_WARNING,
1011			"Invalid sdram scrub control value: 0x%x\n", scrubval);
1012		return -1;
1013	}
1014
1015	*bw = scrubrates[i].bandwidth;
1016
1017	return 0;
1018}
1019
1020/* Return 1 if dual channel mode is active.  Else return 0. */
1021static inline int dual_channel_active(u16 ddrcsr)
1022{
1023	return (((ddrcsr >> 12) & 3) == 3);
1024}
1025
1026/* Remap csrow index numbers if map_type is "reverse"
1027 */
1028static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1029{
1030	struct e752x_pvt *pvt = mci->pvt_info;
1031
1032	if (!pvt->map_type)
1033		return (7 - index);
1034
1035	return (index);
1036}
1037
1038static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1039			u16 ddrcsr)
1040{
1041	struct csrow_info *csrow;
1042	unsigned long last_cumul_size;
1043	int index, mem_dev, drc_chan;
1044	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
1045	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
1046	u8 value;
1047	u32 dra, drc, cumul_size;
1048
1049	dra = 0;
1050	for (index = 0; index < 4; index++) {
1051		u8 dra_reg;
1052		pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1053		dra |= dra_reg << (index * 8);
1054	}
1055	pci_read_config_dword(pdev, E752X_DRC, &drc);
1056	drc_chan = dual_channel_active(ddrcsr);
1057	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
1058	drc_ddim = (drc >> 20) & 0x3;
1059
1060	/* The dram row boundary (DRB) reg values are boundary address for
1061	 * each DRAM row with a granularity of 64 or 128MB (single/dual
1062	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
1063	 * contain the total memory contained in all eight rows.
1064	 */
1065	for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1066		/* mem_dev 0=x8, 1=x4 */
1067		mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1068		csrow = &mci->csrows[remap_csrow_index(mci, index)];
1069
1070		mem_dev = (mem_dev == 2);
1071		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1072		/* convert a 128 or 64 MiB DRB to a page size. */
1073		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1074		debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
1075			cumul_size);
1076		if (cumul_size == last_cumul_size)
1077			continue;	/* not populated */
1078
1079		csrow->first_page = last_cumul_size;
1080		csrow->last_page = cumul_size - 1;
1081		csrow->nr_pages = cumul_size - last_cumul_size;
1082		last_cumul_size = cumul_size;
1083		csrow->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
1084		csrow->mtype = MEM_RDDR;	/* only one type supported */
1085		csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
1086
1087		/*
1088		 * if single channel or x8 devices then SECDED
1089		 * if dual channel and x4 then S4ECD4ED
1090		 */
1091		if (drc_ddim) {
1092			if (drc_chan && mem_dev) {
1093				csrow->edac_mode = EDAC_S4ECD4ED;
1094				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1095			} else {
1096				csrow->edac_mode = EDAC_SECDED;
1097				mci->edac_cap |= EDAC_FLAG_SECDED;
1098			}
1099		} else
1100			csrow->edac_mode = EDAC_NONE;
1101	}
1102}
1103
1104static void e752x_init_mem_map_table(struct pci_dev *pdev,
1105				struct e752x_pvt *pvt)
1106{
1107	int index;
1108	u8 value, last, row;
1109
1110	last = 0;
1111	row = 0;
1112
1113	for (index = 0; index < 8; index += 2) {
1114		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1115		/* test if there is a dimm in this slot */
1116		if (value == last) {
1117			/* no dimm in the slot, so flag it as empty */
1118			pvt->map[index] = 0xff;
1119			pvt->map[index + 1] = 0xff;
1120		} else {	/* there is a dimm in the slot */
1121			pvt->map[index] = row;
1122			row++;
1123			last = value;
1124			/* test the next value to see if the dimm is double
1125			 * sided
1126			 */
1127			pci_read_config_byte(pdev, E752X_DRB + index + 1,
1128					&value);
1129
1130			/* the dimm is single sided, so flag as empty */
1131			/* this is a double sided dimm to save the next row #*/
1132			pvt->map[index + 1] = (value == last) ? 0xff :	row;
1133			row++;
1134			last = value;
1135		}
1136	}
1137}
1138
1139/* Return 0 on success or 1 on failure. */
1140static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1141			struct e752x_pvt *pvt)
1142{
1143	struct pci_dev *dev;
1144
1145	pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1146				pvt->dev_info->err_dev, pvt->bridge_ck);
1147
1148	if (pvt->bridge_ck == NULL)
1149		pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1150							PCI_DEVFN(0, 1));
1151
1152	if (pvt->bridge_ck == NULL) {
1153		e752x_printk(KERN_ERR, "error reporting device not found:"
1154			"vendor %x device 0x%x (broken BIOS?)\n",
1155			PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1156		return 1;
1157	}
1158
1159	dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1160				e752x_devs[dev_idx].ctl_dev,
1161				NULL);
1162
1163	if (dev == NULL)
1164		goto fail;
1165
1166	pvt->dev_d0f0 = dev;
1167	pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
1168
1169	return 0;
1170
1171fail:
1172	pci_dev_put(pvt->bridge_ck);
1173	return 1;
1174}
1175
1176/* Setup system bus parity mask register.
1177 * Sysbus parity supported on:
1178 * e7320/e7520/e7525 + Xeon
1179 */
1180static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1181{
1182	char *cpu_id = cpu_data(0).x86_model_id;
1183	struct pci_dev *dev = pvt->dev_d0f1;
1184	int enable = 1;
1185
1186	/* Allow module parameter override, else see if CPU supports parity */
1187	if (sysbus_parity != -1) {
1188		enable = sysbus_parity;
1189	} else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1190		e752x_printk(KERN_INFO, "System Bus Parity not "
1191			     "supported by CPU, disabling\n");
1192		enable = 0;
1193	}
1194
1195	if (enable)
1196		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1197	else
1198		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1199}
1200
1201static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1202{
1203	struct pci_dev *dev;
1204
1205	dev = pvt->dev_d0f1;
1206	/* Turn off error disable & SMI in case the BIOS turned it on */
1207	if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1208		pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1209		pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1210	} else {
1211		pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1212		pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1213	}
1214
1215	e752x_init_sysbus_parity_mask(pvt);
1216
1217	pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1218	pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1219	pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1220	pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1221	pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1222}
1223
1224static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1225{
1226	u16 pci_data;
1227	u8 stat8;
1228	struct mem_ctl_info *mci;
1229	struct e752x_pvt *pvt;
1230	u16 ddrcsr;
1231	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
1232	struct e752x_error_info discard;
1233
1234	debugf0("%s(): mci\n", __func__);
1235	debugf0("Starting Probe1\n");
1236
1237	/* check to see if device 0 function 1 is enabled; if it isn't, we
1238	 * assume the BIOS has reserved it for a reason and is expecting
1239	 * exclusive access, we take care not to violate that assumption and
1240	 * fail the probe. */
1241	pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1242	if (!force_function_unhide && !(stat8 & (1 << 5))) {
1243		printk(KERN_INFO "Contact your BIOS vendor to see if the "
1244			"E752x error registers can be safely un-hidden\n");
1245		return -ENODEV;
1246	}
1247	stat8 |= (1 << 5);
1248	pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1249
1250	pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1251	/* Dual channel = 1, Single channel = 0 */
1252	drc_chan = dual_channel_active(ddrcsr);
1253
1254	mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
1255
1256	if (mci == NULL) {
1257		return -ENOMEM;
1258	}
1259
1260	debugf3("%s(): init mci\n", __func__);
1261	mci->mtype_cap = MEM_FLAG_RDDR;
1262	/* 3100 IMCH supports SECDEC only */
1263	mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1264		(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1265	mci->mod_name = EDAC_MOD_STR;
1266	mci->mod_ver = E752X_REVISION;
1267	mci->dev = &pdev->dev;
1268
1269	debugf3("%s(): init pvt\n", __func__);
1270	pvt = (struct e752x_pvt *)mci->pvt_info;
1271	pvt->dev_info = &e752x_devs[dev_idx];
1272	pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1273
1274	if (e752x_get_devs(pdev, dev_idx, pvt)) {
1275		edac_mc_free(mci);
1276		return -ENODEV;
1277	}
1278
1279	debugf3("%s(): more mci init\n", __func__);
1280	mci->ctl_name = pvt->dev_info->ctl_name;
1281	mci->dev_name = pci_name(pdev);
1282	mci->edac_check = e752x_check;
1283	mci->ctl_page_to_phys = ctl_page_to_phys;
1284	mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1285	mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1286
1287	/* set the map type.  1 = normal, 0 = reversed
1288	 * Must be set before e752x_init_csrows in case csrow mapping
1289	 * is reversed.
1290	 */
1291	pci_read_config_byte(pdev, E752X_DRM, &stat8);
1292	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1293
1294	e752x_init_csrows(mci, pdev, ddrcsr);
1295	e752x_init_mem_map_table(pdev, pvt);
1296
1297	if (dev_idx == I3100)
1298		mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1299	else
1300		mci->edac_cap |= EDAC_FLAG_NONE;
1301	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1302
1303	/* load the top of low memory, remap base, and remap limit vars */
1304	pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1305	pvt->tolm = ((u32) pci_data) << 4;
1306	pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1307	pvt->remapbase = ((u32) pci_data) << 14;
1308	pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1309	pvt->remaplimit = ((u32) pci_data) << 14;
1310	e752x_printk(KERN_INFO,
1311			"tolm = %x, remapbase = %x, remaplimit = %x\n",
1312			pvt->tolm, pvt->remapbase, pvt->remaplimit);
1313
1314	/* Here we assume that we will never see multiple instances of this
1315	 * type of memory controller.  The ID is therefore hardcoded to 0.
1316	 */
1317	if (edac_mc_add_mc(mci)) {
1318		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1319		goto fail;
1320	}
1321
1322	e752x_init_error_reporting_regs(pvt);
1323	e752x_get_error_info(mci, &discard);	/* clear other MCH errors */
1324
1325	/* allocating generic PCI control info */
1326	e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1327	if (!e752x_pci) {
1328		printk(KERN_WARNING
1329			"%s(): Unable to create PCI control\n", __func__);
1330		printk(KERN_WARNING
1331			"%s(): PCI error report via EDAC not setup\n",
1332			__func__);
1333	}
1334
1335	/* get this far and it's successful */
1336	debugf3("%s(): success\n", __func__);
1337	return 0;
1338
1339fail:
1340	pci_dev_put(pvt->dev_d0f0);
1341	pci_dev_put(pvt->dev_d0f1);
1342	pci_dev_put(pvt->bridge_ck);
1343	edac_mc_free(mci);
1344
1345	return -ENODEV;
1346}
1347
1348/* returns count (>= 0), or negative on error */
1349static int __devinit e752x_init_one(struct pci_dev *pdev,
1350				const struct pci_device_id *ent)
1351{
1352	debugf0("%s()\n", __func__);
1353
1354	/* wake up and enable device */
1355	if (pci_enable_device(pdev) < 0)
1356		return -EIO;
1357
1358	return e752x_probe1(pdev, ent->driver_data);
1359}
1360
1361static void __devexit e752x_remove_one(struct pci_dev *pdev)
1362{
1363	struct mem_ctl_info *mci;
1364	struct e752x_pvt *pvt;
1365
1366	debugf0("%s()\n", __func__);
1367
1368	if (e752x_pci)
1369		edac_pci_release_generic_ctl(e752x_pci);
1370
1371	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1372		return;
1373
1374	pvt = (struct e752x_pvt *)mci->pvt_info;
1375	pci_dev_put(pvt->dev_d0f0);
1376	pci_dev_put(pvt->dev_d0f1);
1377	pci_dev_put(pvt->bridge_ck);
1378	edac_mc_free(mci);
1379}
1380
1381static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1382	{
1383	 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1384	 E7520},
1385	{
1386	 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1387	 E7525},
1388	{
1389	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1390	 E7320},
1391	{
1392	 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1393	 I3100},
1394	{
1395	 0,
1396	 }			/* 0 terminated list. */
1397};
1398
1399MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1400
1401static struct pci_driver e752x_driver = {
1402	.name = EDAC_MOD_STR,
1403	.probe = e752x_init_one,
1404	.remove = __devexit_p(e752x_remove_one),
1405	.id_table = e752x_pci_tbl,
1406};
1407
1408static int __init e752x_init(void)
1409{
1410	int pci_rc;
1411
1412	debugf3("%s()\n", __func__);
1413
1414       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1415       opstate_init();
1416
1417	pci_rc = pci_register_driver(&e752x_driver);
1418	return (pci_rc < 0) ? pci_rc : 0;
1419}
1420
1421static void __exit e752x_exit(void)
1422{
1423	debugf3("%s()\n", __func__);
1424	pci_unregister_driver(&e752x_driver);
1425}
1426
1427module_init(e752x_init);
1428module_exit(e752x_exit);
1429
1430MODULE_LICENSE("GPL");
1431MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1432MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1433
1434module_param(force_function_unhide, int, 0444);
1435MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1436		 " 1=force unhide and hope BIOS doesn't fight driver for "
1437		"Dev0:Fun1 access");
1438
1439module_param(edac_op_state, int, 0444);
1440MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1441
1442module_param(sysbus_parity, int, 0444);
1443MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1444		" 1=enable system bus parity checking, default=auto-detect");
1445module_param(report_non_memory_errors, int, 0644);
1446MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1447		"reporting, 1=enable non-memory error reporting");
1448