• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/alpha/kernel/
1/*
2 *	linux/arch/alpha/kernel/core_cia.c
3 *
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
5 * December 1995.
6 *
7 *	Copyright (C) 1995  David A Rusling
8 *	Copyright (C) 1997, 1998  Jay Estabrook
9 *	Copyright (C) 1998, 1999, 2000  Richard Henderson
10 *
11 * Code common to all CIA core logic chips.
12 */
13
14#define __EXTERN_INLINE inline
15#include <asm/io.h>
16#include <asm/core_cia.h>
17#undef __EXTERN_INLINE
18
19#include <linux/types.h>
20#include <linux/pci.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24
25#include <asm/ptrace.h>
26
27#include "proto.h"
28#include "pci_impl.h"
29
30
31/*
32 * NOTE: Herein lie back-to-back mb instructions.  They are magic.
33 * One plausible explanation is that the i/o controller does not properly
34 * handle the system transaction.  Another involves timing.  Ho hum.
35 */
36
37#define DEBUG_CONFIG 0
38#if DEBUG_CONFIG
39# define DBGC(args)	printk args
40#else
41# define DBGC(args)
42#endif
43
44#define vip	volatile int  *
45
46/*
47 * Given a bus, device, and function number, compute resulting
48 * configuration space address.  It is therefore not safe to have
49 * concurrent invocations to configuration space access routines, but
50 * there really shouldn't be any need for this.
51 *
52 * Type 0:
53 *
54 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
55 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
56 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
57 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
58 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
59 *
60 *	31:11	Device select bit.
61 * 	10:8	Function number
62 * 	 7:2	Register number
63 *
64 * Type 1:
65 *
66 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
67 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
68 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
69 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
70 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
71 *
72 *	31:24	reserved
73 *	23:16	bus number (8 bits = 128 possible buses)
74 *	15:11	Device number (5 bits)
75 *	10:8	function number
76 *	 7:2	register number
77 *
78 * Notes:
79 *	The function number selects which function of a multi-function device
80 *	(e.g., SCSI and Ethernet).
81 *
82 *	The register selects a DWORD (32 bit) register offset.  Hence it
83 *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
84 *	bits.
85 */
86
87static int
88mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
89	     unsigned long *pci_addr, unsigned char *type1)
90{
91	u8 bus = bus_dev->number;
92
93	*type1 = (bus != 0);
94	*pci_addr = (bus << 16) | (device_fn << 8) | where;
95
96	DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
97	      " returning address 0x%p\n"
98	      bus, device_fn, where, *pci_addr));
99
100	return 0;
101}
102
103static unsigned int
104conf_read(unsigned long addr, unsigned char type1)
105{
106	unsigned long flags;
107	int stat0, value;
108	int cia_cfg = 0;
109
110	DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
111	local_irq_save(flags);
112
113	/* Reset status register to avoid losing errors.  */
114	stat0 = *(vip)CIA_IOC_CIA_ERR;
115	*(vip)CIA_IOC_CIA_ERR = stat0;
116	mb();
117	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
118
119	/* If Type1 access, must set CIA CFG. */
120	if (type1) {
121		cia_cfg = *(vip)CIA_IOC_CFG;
122		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
123		mb();
124		*(vip)CIA_IOC_CFG;
125	}
126
127	mb();
128	draina();
129	mcheck_expected(0) = 1;
130	mcheck_taken(0) = 0;
131	mb();
132
133	/* Access configuration space.  */
134	value = *(vip)addr;
135	mb();
136	mb();  /* magic */
137	if (mcheck_taken(0)) {
138		mcheck_taken(0) = 0;
139		value = 0xffffffff;
140		mb();
141	}
142	mcheck_expected(0) = 0;
143	mb();
144
145	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
146	if (type1) {
147		*(vip)CIA_IOC_CFG = cia_cfg;
148		mb();
149		*(vip)CIA_IOC_CFG;
150	}
151
152	local_irq_restore(flags);
153	DBGC(("done\n"));
154
155	return value;
156}
157
158static void
159conf_write(unsigned long addr, unsigned int value, unsigned char type1)
160{
161	unsigned long flags;
162	int stat0, cia_cfg = 0;
163
164	DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
165	local_irq_save(flags);
166
167	/* Reset status register to avoid losing errors.  */
168	stat0 = *(vip)CIA_IOC_CIA_ERR;
169	*(vip)CIA_IOC_CIA_ERR = stat0;
170	mb();
171	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
172
173	/* If Type1 access, must set CIA CFG.  */
174	if (type1) {
175		cia_cfg = *(vip)CIA_IOC_CFG;
176		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
177		mb();
178		*(vip)CIA_IOC_CFG;
179	}
180
181	mb();
182	draina();
183	mcheck_expected(0) = 1;
184	mcheck_taken(0) = 0;
185	mb();
186
187	/* Access configuration space.  */
188	*(vip)addr = value;
189	mb();
190	*(vip)addr; /* read back to force the write */
191
192	mcheck_expected(0) = 0;
193	mb();
194
195	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
196	if (type1) {
197		*(vip)CIA_IOC_CFG = cia_cfg;
198		mb();
199		*(vip)CIA_IOC_CFG;
200	}
201
202	local_irq_restore(flags);
203	DBGC(("done\n"));
204}
205
206static int
207cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
208		u32 *value)
209{
210	unsigned long addr, pci_addr;
211	long mask;
212	unsigned char type1;
213	int shift;
214
215	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
216		return PCIBIOS_DEVICE_NOT_FOUND;
217
218	mask = (size - 1) * 8;
219	shift = (where & 3) * 8;
220	addr = (pci_addr << 5) + mask + CIA_CONF;
221	*value = conf_read(addr, type1) >> (shift);
222	return PCIBIOS_SUCCESSFUL;
223}
224
225static int
226cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
227		 u32 value)
228{
229	unsigned long addr, pci_addr;
230	long mask;
231	unsigned char type1;
232
233	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
234		return PCIBIOS_DEVICE_NOT_FOUND;
235
236	mask = (size - 1) * 8;
237	addr = (pci_addr << 5) + mask + CIA_CONF;
238	conf_write(addr, value << ((where & 3) * 8), type1);
239	return PCIBIOS_SUCCESSFUL;
240}
241
242struct pci_ops cia_pci_ops =
243{
244	.read = 	cia_read_config,
245	.write =	cia_write_config,
246};
247
248/*
249 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
250 * It cannot be invalidated.  Rather than hard code the pass numbers,
251 * actually try the tbia to see if it works.
252 */
253
254void
255cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
256{
257	wmb();
258	*(vip)CIA_IOC_PCI_TBIA = 3;	/* Flush all locked and unlocked.  */
259	mb();
260	*(vip)CIA_IOC_PCI_TBIA;
261}
262
263/*
264 * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
265 * the chip (as well as direct write to the tag registers) if there is a
266 * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
267 * so always use the method below.
268 */
269/*
270 * This is the method NT and NetBSD use.
271 *
272 * Allocate mappings, and put the chip into DMA loopback mode to read a
273 * garbage page.  This works by causing TLB misses, causing old entries to
274 * be purged to make room for the new entries coming in for the garbage page.
275 */
276
277#define CIA_BROKEN_TBIA_BASE	0x30000000
278#define CIA_BROKEN_TBIA_SIZE	1024
279
280/* Always called with interrupts disabled */
281void
282cia_pci_tbi_try2(struct pci_controller *hose,
283		 dma_addr_t start, dma_addr_t end)
284{
285	void __iomem *bus_addr;
286	int ctrl;
287
288	/* Put the chip into PCI loopback mode.  */
289	mb();
290	ctrl = *(vip)CIA_IOC_CIA_CTRL;
291	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
292	mb();
293	*(vip)CIA_IOC_CIA_CTRL;
294	mb();
295
296	/* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
297	   each read.  This forces SG TLB misses.  NetBSD claims that the
298	   TLB entries are not quite LRU, meaning that we need to read more
299	   times than there are actual tags.  The 2117x docs claim strict
300	   round-robin.  Oh well, we've come this far...  */
301	/* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
302	   be filled by the TLB misses *only once* after being invalidated
303	   (by tbia or direct write). Next misses won't update them even
304	   though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
305	   so use them and read at window 3 base exactly 4 times. Reading
306	   more sometimes makes the chip crazy.  -ink */
307
308	bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
309
310	cia_readl(bus_addr + 0x00000);
311	cia_readl(bus_addr + 0x08000);
312	cia_readl(bus_addr + 0x10000);
313	cia_readl(bus_addr + 0x18000);
314
315	cia_iounmap(bus_addr);
316
317	/* Restore normal PCI operation.  */
318	mb();
319	*(vip)CIA_IOC_CIA_CTRL = ctrl;
320	mb();
321	*(vip)CIA_IOC_CIA_CTRL;
322	mb();
323}
324
325static inline void
326cia_prepare_tbia_workaround(int window)
327{
328	unsigned long *ppte, pte;
329	long i;
330
331	/* Use minimal 1K map. */
332	ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
333	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
334
335	for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
336		ppte[i] = pte;
337
338	*(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
339	*(vip)CIA_IOC_PCI_Wn_MASK(window)
340	  = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
341	*(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
342}
343
344static void __init
345verify_tb_operation(void)
346{
347	static int page[PAGE_SIZE/4]
348		__attribute__((aligned(PAGE_SIZE)))
349		__initdata = { 0 };
350
351	struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
352	int ctrl, addr0, tag0, pte0, data0;
353	int temp, use_tbia_try2 = 0;
354	void __iomem *bus_addr;
355
356	/* pyxis -- tbia is broken */
357	if (pci_isa_hose->dense_io_base)
358		use_tbia_try2 = 1;
359
360	/* Put the chip into PCI loopback mode.  */
361	mb();
362	ctrl = *(vip)CIA_IOC_CIA_CTRL;
363	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
364	mb();
365	*(vip)CIA_IOC_CIA_CTRL;
366	mb();
367
368	/* Write a valid entry directly into the TLB registers.  */
369
370	addr0 = arena->dma_base;
371	tag0 = addr0 | 1;
372	pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
373
374	*(vip)CIA_IOC_TB_TAGn(0) = tag0;
375	*(vip)CIA_IOC_TB_TAGn(1) = 0;
376	*(vip)CIA_IOC_TB_TAGn(2) = 0;
377	*(vip)CIA_IOC_TB_TAGn(3) = 0;
378	*(vip)CIA_IOC_TB_TAGn(4) = 0;
379	*(vip)CIA_IOC_TB_TAGn(5) = 0;
380	*(vip)CIA_IOC_TB_TAGn(6) = 0;
381	*(vip)CIA_IOC_TB_TAGn(7) = 0;
382	*(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
383	*(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
384	*(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
385	*(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
386	mb();
387
388	/* Get a usable bus address */
389	bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
390
391	/* First, verify we can read back what we've written.  If
392	   this fails, we can't be sure of any of the other testing
393	   we're going to do, so bail.  */
394	/* ??? Actually, we could do the work with machine checks.
395	   By passing this register update test, we pretty much
396	   guarantee that cia_pci_tbi_try1 works.  If this test
397	   fails, cia_pci_tbi_try2 might still work.  */
398
399	temp = *(vip)CIA_IOC_TB_TAGn(0);
400	if (temp != tag0) {
401		printk("pci: failed tb register update test "
402		       "(tag0 %#x != %#x)\n", temp, tag0);
403		goto failed;
404	}
405	temp = *(vip)CIA_IOC_TB_TAGn(1);
406	if (temp != 0) {
407		printk("pci: failed tb register update test "
408		       "(tag1 %#x != 0)\n", temp);
409		goto failed;
410	}
411	temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
412	if (temp != pte0) {
413		printk("pci: failed tb register update test "
414		       "(pte0 %#x != %#x)\n", temp, pte0);
415		goto failed;
416	}
417	printk("pci: passed tb register update test\n");
418
419	/* Second, verify we can actually do I/O through this entry.  */
420
421	data0 = 0xdeadbeef;
422	page[0] = data0;
423	mcheck_expected(0) = 1;
424	mcheck_taken(0) = 0;
425	mb();
426	temp = cia_readl(bus_addr);
427	mb();
428	mcheck_expected(0) = 0;
429	mb();
430	if (mcheck_taken(0)) {
431		printk("pci: failed sg loopback i/o read test (mcheck)\n");
432		goto failed;
433	}
434	if (temp != data0) {
435		printk("pci: failed sg loopback i/o read test "
436		       "(%#x != %#x)\n", temp, data0);
437		goto failed;
438	}
439	printk("pci: passed sg loopback i/o read test\n");
440
441	/* Third, try to invalidate the TLB.  */
442
443	if (! use_tbia_try2) {
444		cia_pci_tbi(arena->hose, 0, -1);
445		temp = *(vip)CIA_IOC_TB_TAGn(0);
446		if (temp & 1) {
447			use_tbia_try2 = 1;
448			printk("pci: failed tbia test; workaround available\n");
449		} else {
450			printk("pci: passed tbia test\n");
451		}
452	}
453
454	/* Fourth, verify the TLB snoops the EV5's caches when
455	   doing a tlb fill.  */
456
457	data0 = 0x5adda15e;
458	page[0] = data0;
459	arena->ptes[4] = pte0;
460	mcheck_expected(0) = 1;
461	mcheck_taken(0) = 0;
462	mb();
463	temp = cia_readl(bus_addr + 4*PAGE_SIZE);
464	mb();
465	mcheck_expected(0) = 0;
466	mb();
467	if (mcheck_taken(0)) {
468		printk("pci: failed pte write cache snoop test (mcheck)\n");
469		goto failed;
470	}
471	if (temp != data0) {
472		printk("pci: failed pte write cache snoop test "
473		       "(%#x != %#x)\n", temp, data0);
474		goto failed;
475	}
476	printk("pci: passed pte write cache snoop test\n");
477
478	/* Fifth, verify that a previously invalid PTE entry gets
479	   filled from the page table.  */
480
481	data0 = 0xabcdef12;
482	page[0] = data0;
483	arena->ptes[5] = pte0;
484	mcheck_expected(0) = 1;
485	mcheck_taken(0) = 0;
486	mb();
487	temp = cia_readl(bus_addr + 5*PAGE_SIZE);
488	mb();
489	mcheck_expected(0) = 0;
490	mb();
491	if (mcheck_taken(0)) {
492		printk("pci: failed valid tag invalid pte reload test "
493		       "(mcheck; workaround available)\n");
494		arena->align_entry = 4;
495	} else if (temp != data0) {
496		printk("pci: failed valid tag invalid pte reload test "
497		       "(%#x != %#x)\n", temp, data0);
498		goto failed;
499	} else {
500		printk("pci: passed valid tag invalid pte reload test\n");
501	}
502
503	/* Sixth, verify machine checks are working.  Test invalid
504	   pte under the same valid tag as we used above.  */
505
506	mcheck_expected(0) = 1;
507	mcheck_taken(0) = 0;
508	mb();
509	temp = cia_readl(bus_addr + 6*PAGE_SIZE);
510	mb();
511	mcheck_expected(0) = 0;
512	mb();
513	printk("pci: %s pci machine check test\n",
514	       mcheck_taken(0) ? "passed" : "failed");
515
516	/* Clean up after the tests.  */
517	arena->ptes[4] = 0;
518	arena->ptes[5] = 0;
519
520	if (use_tbia_try2) {
521		alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
522
523		/* Tags 0-3 must be disabled if we use this workaraund. */
524		wmb();
525		*(vip)CIA_IOC_TB_TAGn(0) = 2;
526		*(vip)CIA_IOC_TB_TAGn(1) = 2;
527		*(vip)CIA_IOC_TB_TAGn(2) = 2;
528		*(vip)CIA_IOC_TB_TAGn(3) = 2;
529
530		printk("pci: tbia workaround enabled\n");
531	}
532	alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
533
534exit:
535	/* unmap the bus addr */
536	cia_iounmap(bus_addr);
537
538	/* Restore normal PCI operation.  */
539	mb();
540	*(vip)CIA_IOC_CIA_CTRL = ctrl;
541	mb();
542	*(vip)CIA_IOC_CIA_CTRL;
543	mb();
544	return;
545
546failed:
547	printk("pci: disabling sg translation window\n");
548	*(vip)CIA_IOC_PCI_W0_BASE = 0;
549	*(vip)CIA_IOC_PCI_W1_BASE = 0;
550	pci_isa_hose->sg_isa = NULL;
551	alpha_mv.mv_pci_tbi = NULL;
552	goto exit;
553}
554
555#if defined(ALPHA_RESTORE_SRM_SETUP)
556/* Save CIA configuration data as the console had it set up.  */
557struct
558{
559    unsigned int hae_mem;
560    unsigned int hae_io;
561    unsigned int pci_dac_offset;
562    unsigned int err_mask;
563    unsigned int cia_ctrl;
564    unsigned int cia_cnfg;
565    struct {
566	unsigned int w_base;
567	unsigned int w_mask;
568	unsigned int t_base;
569    } window[4];
570} saved_config __attribute((common));
571
572void
573cia_save_srm_settings(int is_pyxis)
574{
575	int i;
576
577	/* Save some important registers. */
578	saved_config.err_mask       = *(vip)CIA_IOC_ERR_MASK;
579	saved_config.cia_ctrl       = *(vip)CIA_IOC_CIA_CTRL;
580	saved_config.hae_mem        = *(vip)CIA_IOC_HAE_MEM;
581	saved_config.hae_io         = *(vip)CIA_IOC_HAE_IO;
582	saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
583
584	if (is_pyxis)
585	    saved_config.cia_cnfg   = *(vip)CIA_IOC_CIA_CNFG;
586	else
587	    saved_config.cia_cnfg   = 0;
588
589	/* Save DMA windows configuration. */
590	for (i = 0; i < 4; i++) {
591	    saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
592	    saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
593	    saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
594	}
595	mb();
596}
597
598void
599cia_restore_srm_settings(void)
600{
601	int i;
602
603	for (i = 0; i < 4; i++) {
604	    *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
605	    *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
606	    *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
607	}
608
609	*(vip)CIA_IOC_HAE_MEM   = saved_config.hae_mem;
610	*(vip)CIA_IOC_HAE_IO    = saved_config.hae_io;
611	*(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
612	*(vip)CIA_IOC_ERR_MASK  = saved_config.err_mask;
613	*(vip)CIA_IOC_CIA_CTRL  = saved_config.cia_ctrl;
614
615	if (saved_config.cia_cnfg) /* Must be pyxis. */
616	    *(vip)CIA_IOC_CIA_CNFG  = saved_config.cia_cnfg;
617
618	mb();
619}
620#else /* ALPHA_RESTORE_SRM_SETUP */
621#define cia_save_srm_settings(p)	do {} while (0)
622#define cia_restore_srm_settings()	do {} while (0)
623#endif /* ALPHA_RESTORE_SRM_SETUP */
624
625
626static void __init
627do_init_arch(int is_pyxis)
628{
629	struct pci_controller *hose;
630	int temp, cia_rev, tbia_window;
631
632	cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
633	printk("pci: cia revision %d%s\n",
634	       cia_rev, is_pyxis ? " (pyxis)" : "");
635
636	if (alpha_using_srm)
637		cia_save_srm_settings(is_pyxis);
638
639	/* Set up error reporting.  */
640	temp = *(vip)CIA_IOC_ERR_MASK;
641	temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
642		  | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
643	*(vip)CIA_IOC_ERR_MASK = temp;
644
645	/* Clear all currently pending errors.  */
646	temp = *(vip)CIA_IOC_CIA_ERR;
647	*(vip)CIA_IOC_CIA_ERR = temp;
648
649	/* Turn on mchecks.  */
650	temp = *(vip)CIA_IOC_CIA_CTRL;
651	temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
652	*(vip)CIA_IOC_CIA_CTRL = temp;
653
654	/* Clear the CFG register, which gets used for PCI config space
655	   accesses.  That is the way we want to use it, and we do not
656	   want to depend on what ARC or SRM might have left behind.  */
657	*(vip)CIA_IOC_CFG = 0;
658
659	/* Zero the HAEs.  */
660	*(vip)CIA_IOC_HAE_MEM = 0;
661	*(vip)CIA_IOC_HAE_IO = 0;
662
663	/* For PYXIS, we always use BWX bus and i/o accesses.  To that end,
664	   make sure they're enabled on the controller.  At the same time,
665	   enable the monster window.  */
666	if (is_pyxis) {
667		temp = *(vip)CIA_IOC_CIA_CNFG;
668		temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
669		*(vip)CIA_IOC_CIA_CNFG = temp;
670	}
671
672	/* Synchronize with all previous changes.  */
673	mb();
674	*(vip)CIA_IOC_CIA_REV;
675
676	/*
677	 * Create our single hose.
678	 */
679
680	pci_isa_hose = hose = alloc_pci_controller();
681	hose->io_space = &ioport_resource;
682	hose->mem_space = &iomem_resource;
683	hose->index = 0;
684
685	if (! is_pyxis) {
686		struct resource *hae_mem = alloc_resource();
687		hose->mem_space = hae_mem;
688
689		hae_mem->start = 0;
690		hae_mem->end = CIA_MEM_R1_MASK;
691		hae_mem->name = pci_hae0_name;
692		hae_mem->flags = IORESOURCE_MEM;
693
694		if (request_resource(&iomem_resource, hae_mem) < 0)
695			printk(KERN_ERR "Failed to request HAE_MEM\n");
696
697		hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
698		hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
699		hose->sparse_io_base = CIA_IO - IDENT_ADDR;
700		hose->dense_io_base = 0;
701	} else {
702		hose->sparse_mem_base = 0;
703		hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
704		hose->sparse_io_base = 0;
705		hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
706	}
707
708	/*
709	 * Set up the PCI to main memory translation windows.
710	 *
711	 * Window 0 is S/G 8MB at 8MB (for isa)
712	 * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
713	 * Window 2 is direct access 2GB at 2GB
714	 * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
715	 *
716	 * ??? NetBSD hints that page tables must be aligned to 32K,
717	 * possibly due to a hardware bug.  This is over-aligned
718	 * from the 8K alignment one would expect for an 8MB window.
719	 * No description of what revisions affected.
720	 */
721
722	hose->sg_pci = NULL;
723	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
724
725	__direct_map_base = 0x80000000;
726	__direct_map_size = 0x80000000;
727
728	*(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
729	*(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
730	*(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
731
732	*(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
733	*(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
734	*(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
735
736
737	tbia_window = 1;
738	if (is_pyxis) {
739		*(vip)CIA_IOC_PCI_W3_BASE = 0;
740	} else if (cia_rev == 1) {
741		*(vip)CIA_IOC_PCI_W1_BASE = 0;
742		tbia_window = 3;
743	} else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
744		*(vip)CIA_IOC_PCI_W3_BASE = 0;
745	} else {
746		*(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
747		*(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
748		*(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
749
750		alpha_mv.pci_dac_offset = 0x200000000UL;
751		*(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
752	}
753
754	cia_prepare_tbia_workaround(tbia_window);
755}
756
757void __init
758cia_init_arch(void)
759{
760	do_init_arch(0);
761}
762
763void __init
764pyxis_init_arch(void)
765{
766	/* On pyxis machines we can precisely calculate the
767	   CPU clock frequency using pyxis real time counter.
768	   It's especially useful for SX164 with broken RTC.
769
770	   Both CPU and chipset are driven by the single 16.666M
771	   or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
772	   66.66 MHz. -ink */
773
774	unsigned int cc0, cc1;
775	unsigned long pyxis_cc;
776
777	__asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
778	pyxis_cc = *(vulp)PYXIS_RT_COUNT;
779	do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
780	__asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
781	cc1 -= cc0;
782	hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
783	hwrpb_update_checksum(hwrpb);
784
785	do_init_arch(1);
786}
787
788void
789cia_kill_arch(int mode)
790{
791	if (alpha_using_srm)
792		cia_restore_srm_settings();
793}
794
795void __init
796cia_init_pci(void)
797{
798	/* Must delay this from init_arch, as we need machine checks.  */
799	verify_tb_operation();
800	common_init_pci();
801}
802
803static inline void
804cia_pci_clr_err(void)
805{
806	int jd;
807
808	jd = *(vip)CIA_IOC_CIA_ERR;
809	*(vip)CIA_IOC_CIA_ERR = jd;
810	mb();
811	*(vip)CIA_IOC_CIA_ERR;		/* re-read to force write.  */
812}
813
814#ifdef CONFIG_VERBOSE_MCHECK
815static void
816cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
817{
818	static const char * const pci_cmd_desc[16] = {
819		"Interrupt Acknowledge", "Special Cycle", "I/O Read",
820		"I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
821		"Memory Write", "Reserved 0x8", "Reserved 0x9",
822		"Configuration Read", "Configuration Write",
823		"Memory Read Multiple", "Dual Address Cycle",
824		"Memory Read Line", "Memory Write and Invalidate"
825	};
826
827	if (cia->cia_err & (CIA_ERR_COR_ERR
828			    | CIA_ERR_UN_COR_ERR
829			    | CIA_ERR_MEM_NEM
830			    | CIA_ERR_PA_PTE_INV)) {
831		static const char * const window_desc[6] = {
832			"No window active", "Window 0 hit", "Window 1 hit",
833			"Window 2 hit", "Window 3 hit", "Monster window hit"
834		};
835
836		const char *window;
837		const char *cmd;
838		unsigned long addr, tmp;
839		int lock, dac;
840
841		cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
842		lock = (cia->pci_err0 >> 4) & 1;
843		dac = (cia->pci_err0 >> 5) & 1;
844
845		tmp = (cia->pci_err0 >> 8) & 0x1F;
846		tmp = ffs(tmp);
847		window = window_desc[tmp];
848
849		addr = cia->pci_err1;
850		if (dac) {
851			tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
852			addr |= tmp << 32;
853		}
854
855		printk(KERN_CRIT "CIA machine check: %s\n", msg);
856		printk(KERN_CRIT "  DMA command: %s\n", cmd);
857		printk(KERN_CRIT "  PCI address: %#010lx\n", addr);
858		printk(KERN_CRIT "  %s, Lock: %d, DAC: %d\n",
859		       window, lock, dac);
860	} else if (cia->cia_err & (CIA_ERR_PERR
861				   | CIA_ERR_PCI_ADDR_PE
862				   | CIA_ERR_RCVD_MAS_ABT
863				   | CIA_ERR_RCVD_TAR_ABT
864				   | CIA_ERR_IOA_TIMEOUT)) {
865		static const char * const master_st_desc[16] = {
866			"Idle", "Drive bus", "Address step cycle",
867			"Address cycle", "Data cycle", "Last read data cycle",
868			"Last write data cycle", "Read stop cycle",
869			"Write stop cycle", "Read turnaround cycle",
870			"Write turnaround cycle", "Reserved 0xB",
871			"Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
872			"Unknown state"
873		};
874		static const char * const target_st_desc[16] = {
875			"Idle", "Busy", "Read data cycle", "Write data cycle",
876			"Read stop cycle", "Write stop cycle",
877			"Read turnaround cycle", "Write turnaround cycle",
878			"Read wait cycle", "Write wait cycle",
879			"Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
880			"Reserved 0xD", "Reserved 0xE", "Unknown state"
881		};
882
883		const char *cmd;
884		const char *master, *target;
885		unsigned long addr, tmp;
886		int dac;
887
888		master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
889		target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
890		cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
891		dac = (cia->pci_err0 >> 28) & 1;
892
893		addr = cia->pci_err2;
894		if (dac) {
895			tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
896			addr |= tmp << 32;
897		}
898
899		printk(KERN_CRIT "CIA machine check: %s\n", msg);
900		printk(KERN_CRIT "  PCI command: %s\n", cmd);
901		printk(KERN_CRIT "  Master state: %s, Target state: %s\n",
902		       master, target);
903		printk(KERN_CRIT "  PCI address: %#010lx, DAC: %d\n",
904		       addr, dac);
905	} else {
906		printk(KERN_CRIT "CIA machine check: %s\n", msg);
907		printk(KERN_CRIT "  Unknown PCI error\n");
908		printk(KERN_CRIT "  PCI_ERR0 = %#08lx", cia->pci_err0);
909		printk(KERN_CRIT "  PCI_ERR1 = %#08lx", cia->pci_err1);
910		printk(KERN_CRIT "  PCI_ERR2 = %#08lx", cia->pci_err2);
911	}
912}
913
914static void
915cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
916{
917	unsigned long mem_port_addr;
918	unsigned long mem_port_mask;
919	const char *mem_port_cmd;
920	const char *seq_state;
921	const char *set_select;
922	unsigned long tmp;
923
924	/* If this is a DMA command, also decode the PCI bits.  */
925	if ((cia->mem_err1 >> 20) & 1)
926		cia_decode_pci_error(cia, msg);
927	else
928		printk(KERN_CRIT "CIA machine check: %s\n", msg);
929
930	mem_port_addr = cia->mem_err0 & 0xfffffff0;
931	mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
932
933	mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
934
935	tmp = (cia->mem_err1 >> 8) & 0xF;
936	tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
937	if ((tmp & 0x1E) == 0x06)
938		mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
939	else if ((tmp & 0x1C) == 0x08)
940		mem_port_cmd = "READ MISS or READ MISS MODIFY";
941	else if (tmp == 0x1C)
942		mem_port_cmd = "BC VICTIM";
943	else if ((tmp & 0x1E) == 0x0E)
944		mem_port_cmd = "READ MISS MODIFY";
945	else if ((tmp & 0x1C) == 0x18)
946		mem_port_cmd = "DMA READ or DMA READ MODIFY";
947	else if ((tmp & 0x1E) == 0x12)
948		mem_port_cmd = "DMA WRITE";
949	else
950		mem_port_cmd = "Unknown";
951
952	tmp = (cia->mem_err1 >> 16) & 0xF;
953	switch (tmp) {
954	case 0x0:
955		seq_state = "Idle";
956		break;
957	case 0x1:
958		seq_state = "DMA READ or DMA WRITE";
959		break;
960	case 0x2: case 0x3:
961		seq_state = "READ MISS (or READ MISS MODIFY) with victim";
962		break;
963	case 0x4: case 0x5: case 0x6:
964		seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
965		break;
966	case 0x8: case 0x9: case 0xB:
967		seq_state = "Refresh";
968		break;
969	case 0xC:
970		seq_state = "Idle, waiting for DMA pending read";
971		break;
972	case 0xE: case 0xF:
973		seq_state = "Idle, ras precharge";
974		break;
975	default:
976		seq_state = "Unknown";
977		break;
978	}
979
980	tmp = (cia->mem_err1 >> 24) & 0x1F;
981	switch (tmp) {
982	case 0x00: set_select = "Set 0 selected"; break;
983	case 0x01: set_select = "Set 1 selected"; break;
984	case 0x02: set_select = "Set 2 selected"; break;
985	case 0x03: set_select = "Set 3 selected"; break;
986	case 0x04: set_select = "Set 4 selected"; break;
987	case 0x05: set_select = "Set 5 selected"; break;
988	case 0x06: set_select = "Set 6 selected"; break;
989	case 0x07: set_select = "Set 7 selected"; break;
990	case 0x08: set_select = "Set 8 selected"; break;
991	case 0x09: set_select = "Set 9 selected"; break;
992	case 0x0A: set_select = "Set A selected"; break;
993	case 0x0B: set_select = "Set B selected"; break;
994	case 0x0C: set_select = "Set C selected"; break;
995	case 0x0D: set_select = "Set D selected"; break;
996	case 0x0E: set_select = "Set E selected"; break;
997	case 0x0F: set_select = "Set F selected"; break;
998	case 0x10: set_select = "No set selected"; break;
999	case 0x1F: set_select = "Refresh cycle"; break;
1000	default:   set_select = "Unknown"; break;
1001	}
1002
1003	printk(KERN_CRIT "  Memory port command: %s\n", mem_port_cmd);
1004	printk(KERN_CRIT "  Memory port address: %#010lx, mask: %#lx\n",
1005	       mem_port_addr, mem_port_mask);
1006	printk(KERN_CRIT "  Memory sequencer state: %s\n", seq_state);
1007	printk(KERN_CRIT "  Memory set: %s\n", set_select);
1008}
1009
1010static void
1011cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
1012{
1013	long syn;
1014	long i;
1015	const char *fmt;
1016
1017	cia_decode_mem_error(cia, msg);
1018
1019	syn = cia->cia_syn & 0xff;
1020	if (syn == (syn & -syn)) {
1021		fmt = KERN_CRIT "  ECC syndrome %#x -- check bit %d\n";
1022		i = ffs(syn) - 1;
1023	} else {
1024		static unsigned char const data_bit[64] = {
1025			0xCE, 0xCB, 0xD3, 0xD5,
1026			0xD6, 0xD9, 0xDA, 0xDC,
1027			0x23, 0x25, 0x26, 0x29,
1028			0x2A, 0x2C, 0x31, 0x34,
1029			0x0E, 0x0B, 0x13, 0x15,
1030			0x16, 0x19, 0x1A, 0x1C,
1031			0xE3, 0xE5, 0xE6, 0xE9,
1032			0xEA, 0xEC, 0xF1, 0xF4,
1033			0x4F, 0x4A, 0x52, 0x54,
1034			0x57, 0x58, 0x5B, 0x5D,
1035			0xA2, 0xA4, 0xA7, 0xA8,
1036			0xAB, 0xAD, 0xB0, 0xB5,
1037			0x8F, 0x8A, 0x92, 0x94,
1038			0x97, 0x98, 0x9B, 0x9D,
1039			0x62, 0x64, 0x67, 0x68,
1040			0x6B, 0x6D, 0x70, 0x75
1041		};
1042
1043		for (i = 0; i < 64; ++i)
1044			if (data_bit[i] == syn)
1045				break;
1046
1047		if (i < 64)
1048			fmt = KERN_CRIT "  ECC syndrome %#x -- data bit %d\n";
1049		else
1050			fmt = KERN_CRIT "  ECC syndrome %#x -- unknown bit\n";
1051	}
1052
1053	printk (fmt, syn, i);
1054}
1055
1056static void
1057cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
1058{
1059	static const char * const cmd_desc[16] = {
1060		"NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
1061		"SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
1062		"READ MISS0", "READ MISS1", "READ MISS MOD0",
1063		"READ MISS MOD1", "BCACHE VICTIM", "Spare",
1064		"READ MISS MOD STC0", "READ MISS MOD STC1"
1065	};
1066
1067	unsigned long addr;
1068	unsigned long mask;
1069	const char *cmd;
1070	int par;
1071
1072	addr = cia->cpu_err0 & 0xfffffff0;
1073	addr |= (cia->cpu_err1 & 0x83UL) << 32;
1074	cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
1075	mask = (cia->cpu_err1 >> 12) & 0xF;
1076	par = (cia->cpu_err1 >> 21) & 1;
1077
1078	printk(KERN_CRIT "CIA machine check: System bus parity error\n");
1079	printk(KERN_CRIT "  Command: %s, Parity bit: %d\n", cmd, par);
1080	printk(KERN_CRIT "  Address: %#010lx, Mask: %#lx\n", addr, mask);
1081}
1082#endif /* CONFIG_VERBOSE_MCHECK */
1083
1084
1085static int
1086cia_decode_mchk(unsigned long la_ptr)
1087{
1088	struct el_common *com;
1089	struct el_CIA_sysdata_mcheck *cia;
1090
1091	com = (void *)la_ptr;
1092	cia = (void *)(la_ptr + com->sys_offset);
1093
1094	if ((cia->cia_err & CIA_ERR_VALID) == 0)
1095		return 0;
1096
1097#ifdef CONFIG_VERBOSE_MCHECK
1098	if (!alpha_verbose_mcheck)
1099		return 1;
1100
1101	switch (ffs(cia->cia_err & 0xfff) - 1) {
1102	case 0: /* CIA_ERR_COR_ERR */
1103		cia_decode_ecc_error(cia, "Corrected ECC error");
1104		break;
1105	case 1: /* CIA_ERR_UN_COR_ERR */
1106		cia_decode_ecc_error(cia, "Uncorrected ECC error");
1107		break;
1108	case 2: /* CIA_ERR_CPU_PE */
1109		cia_decode_parity_error(cia);
1110		break;
1111	case 3: /* CIA_ERR_MEM_NEM */
1112		cia_decode_mem_error(cia, "Access to nonexistent memory");
1113		break;
1114	case 4: /* CIA_ERR_PCI_SERR */
1115		cia_decode_pci_error(cia, "PCI bus system error");
1116		break;
1117	case 5: /* CIA_ERR_PERR */
1118		cia_decode_pci_error(cia, "PCI data parity error");
1119		break;
1120	case 6: /* CIA_ERR_PCI_ADDR_PE */
1121		cia_decode_pci_error(cia, "PCI address parity error");
1122		break;
1123	case 7: /* CIA_ERR_RCVD_MAS_ABT */
1124		cia_decode_pci_error(cia, "PCI master abort");
1125		break;
1126	case 8: /* CIA_ERR_RCVD_TAR_ABT */
1127		cia_decode_pci_error(cia, "PCI target abort");
1128		break;
1129	case 9: /* CIA_ERR_PA_PTE_INV */
1130		cia_decode_pci_error(cia, "PCI invalid PTE");
1131		break;
1132	case 10: /* CIA_ERR_FROM_WRT_ERR */
1133		cia_decode_mem_error(cia, "Write to flash ROM attempted");
1134		break;
1135	case 11: /* CIA_ERR_IOA_TIMEOUT */
1136		cia_decode_pci_error(cia, "I/O timeout");
1137		break;
1138	}
1139
1140	if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
1141		printk(KERN_CRIT "CIA lost machine check: "
1142		       "Correctable ECC error\n");
1143	if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
1144		printk(KERN_CRIT "CIA lost machine check: "
1145		       "Uncorrectable ECC error\n");
1146	if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
1147		printk(KERN_CRIT "CIA lost machine check: "
1148		       "System bus parity error\n");
1149	if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
1150		printk(KERN_CRIT "CIA lost machine check: "
1151		       "Access to nonexistent memory\n");
1152	if (cia->cia_err & CIA_ERR_LOST_PERR)
1153		printk(KERN_CRIT "CIA lost machine check: "
1154		       "PCI data parity error\n");
1155	if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
1156		printk(KERN_CRIT "CIA lost machine check: "
1157		       "PCI address parity error\n");
1158	if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
1159		printk(KERN_CRIT "CIA lost machine check: "
1160		       "PCI master abort\n");
1161	if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
1162		printk(KERN_CRIT "CIA lost machine check: "
1163		       "PCI target abort\n");
1164	if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
1165		printk(KERN_CRIT "CIA lost machine check: "
1166		       "PCI invalid PTE\n");
1167	if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
1168		printk(KERN_CRIT "CIA lost machine check: "
1169		       "Write to flash ROM attempted\n");
1170	if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
1171		printk(KERN_CRIT "CIA lost machine check: "
1172		       "I/O timeout\n");
1173#endif /* CONFIG_VERBOSE_MCHECK */
1174
1175	return 1;
1176}
1177
1178void
1179cia_machine_check(unsigned long vector, unsigned long la_ptr)
1180{
1181	int expected;
1182
1183	/* Clear the error before any reporting.  */
1184	mb();
1185	mb();  /* magic */
1186	draina();
1187	cia_pci_clr_err();
1188	wrmces(rdmces());	/* reset machine check pending flag.  */
1189	mb();
1190
1191	expected = mcheck_expected(0);
1192	if (!expected && vector == 0x660)
1193		expected = cia_decode_mchk(la_ptr);
1194	process_mcheck_info(vector, la_ptr, "CIA", expected);
1195}
1196