1// SPDX-License-Identifier: GPL-2.0
2/*
3 *	linux/arch/alpha/kernel/core_t2.c
4 *
5 * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com).
6 * December 1996.
7 *
8 * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com)
9 *
10 * Code common to all T2 core logic chips.
11 */
12
13#define __EXTERN_INLINE
14#include <asm/io.h>
15#include <asm/core_t2.h>
16#undef __EXTERN_INLINE
17
18#include <linux/types.h>
19#include <linux/pci.h>
20#include <linux/sched.h>
21#include <linux/init.h>
22
23#include <asm/ptrace.h>
24#include <asm/delay.h>
25#include <asm/mce.h>
26
27#include "proto.h"
28#include "pci_impl.h"
29
30/* For dumping initial DMA window settings. */
31#define DEBUG_PRINT_INITIAL_SETTINGS 0
32
33/* For dumping final DMA window settings. */
34#define DEBUG_PRINT_FINAL_SETTINGS 0
35
36/*
37 * By default, we direct-map starting at 2GB, in order to allow the
38 * maximum size direct-map window (2GB) to match the maximum amount of
39 * memory (2GB) that can be present on SABLEs. But that limits the
40 * floppy to DMA only via the scatter/gather window set up for 8MB
41 * ISA DMA, since the maximum ISA DMA address is 2GB-1.
42 *
43 * For now, this seems a reasonable trade-off: even though most SABLEs
44 * have less than 1GB of memory, floppy usage/performance will not
45 * really be affected by forcing it to go via scatter/gather...
46 */
47#define T2_DIRECTMAP_2G 1
48
49#if T2_DIRECTMAP_2G
50# define T2_DIRECTMAP_START	0x80000000UL
51# define T2_DIRECTMAP_LENGTH	0x80000000UL
52#else
53# define T2_DIRECTMAP_START	0x40000000UL
54# define T2_DIRECTMAP_LENGTH	0x40000000UL
55#endif
56
57/* The ISA scatter/gather window settings. */
58#define T2_ISA_SG_START		0x00800000UL
59#define T2_ISA_SG_LENGTH	0x00800000UL
60
61/*
62 * NOTE: Herein lie back-to-back mb instructions.  They are magic.
63 * One plausible explanation is that the i/o controller does not properly
64 * handle the system transaction.  Another involves timing.  Ho hum.
65 */
66
67/*
68 * BIOS32-style PCI interface:
69 */
70
71#define DEBUG_CONFIG 0
72
73#if DEBUG_CONFIG
74# define DBG(args)	printk args
75#else
76# define DBG(args)
77#endif
78
79static volatile unsigned int t2_mcheck_any_expected;
80static volatile unsigned int t2_mcheck_last_taken;
81
82/* Place to save the DMA Window registers as set up by SRM
83   for restoration during shutdown. */
84static struct
85{
86	struct {
87		unsigned long wbase;
88		unsigned long wmask;
89		unsigned long tbase;
90	} window[2];
91	unsigned long hae_1;
92  	unsigned long hae_2;
93	unsigned long hae_3;
94	unsigned long hae_4;
95	unsigned long hbase;
96} t2_saved_config __attribute((common));
97
98/*
99 * Given a bus, device, and function number, compute resulting
100 * configuration space address and setup the T2_HAXR2 register
101 * accordingly.  It is therefore not safe to have concurrent
102 * invocations to configuration space access routines, but there
103 * really shouldn't be any need for this.
104 *
105 * Type 0:
106 *
107 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
108 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 *
113 *	31:11	Device select bit.
114 * 	10:8	Function number
115 * 	 7:2	Register number
116 *
117 * Type 1:
118 *
119 *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
120 *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
121 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
123 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
124 *
125 *	31:24	reserved
126 *	23:16	bus number (8 bits = 128 possible buses)
127 *	15:11	Device number (5 bits)
128 *	10:8	function number
129 *	 7:2	register number
130 *
131 * Notes:
132 *	The function number selects which function of a multi-function device
133 *	(e.g., SCSI and Ethernet).
134 *
135 *	The register selects a DWORD (32 bit) register offset.  Hence it
136 *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
137 *	bits.
138 */
139
140static int
141mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
142	     unsigned long *pci_addr, unsigned char *type1)
143{
144	unsigned long addr;
145	u8 bus = pbus->number;
146
147	DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x,"
148	     " addr=0x%lx, type1=0x%x)\n",
149	     bus, device_fn, where, pci_addr, type1));
150
151	if (bus == 0) {
152		int device = device_fn >> 3;
153
154		/* Type 0 configuration cycle.  */
155
156		if (device > 8) {
157			DBG(("mk_conf_addr: device (%d)>20, returning -1\n",
158			     device));
159			return -1;
160		}
161
162		*type1 = 0;
163		addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where);
164	} else {
165		/* Type 1 configuration cycle.  */
166		*type1 = 1;
167		addr = (bus << 16) | (device_fn << 8) | (where);
168	}
169	*pci_addr = addr;
170	DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
171	return 0;
172}
173
174/*
175 * NOTE: both conf_read() and conf_write() may set HAE_3 when needing
176 *       to do type1 access. This is protected by the use of spinlock IRQ
177 *       primitives in the wrapper functions pci_{read,write}_config_*()
178 *       defined in drivers/pci/pci.c.
179 */
180static unsigned int
181conf_read(unsigned long addr, unsigned char type1)
182{
183	unsigned int value, cpu, taken;
184	unsigned long t2_cfg = 0;
185
186	cpu = smp_processor_id();
187
188	DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
189
190	/* If Type1 access, must set T2 CFG.  */
191	if (type1) {
192		t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
193		*(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
194		mb();
195	}
196	mb();
197	draina();
198
199	mcheck_expected(cpu) = 1;
200	mcheck_taken(cpu) = 0;
201	t2_mcheck_any_expected |= (1 << cpu);
202	mb();
203
204	/* Access configuration space. */
205	value = *(vuip)addr;
206	mb();
207	mb();  /* magic */
208
209	/* Wait for possible mcheck. Also, this lets other CPUs clear
210	   their mchecks as well, as they can reliably tell when
211	   another CPU is in the midst of handling a real mcheck via
212	   the "taken" function. */
213	udelay(100);
214
215	if ((taken = mcheck_taken(cpu))) {
216		mcheck_taken(cpu) = 0;
217		t2_mcheck_last_taken |= (1 << cpu);
218		value = 0xffffffffU;
219		mb();
220	}
221	mcheck_expected(cpu) = 0;
222	t2_mcheck_any_expected = 0;
223	mb();
224
225	/* If Type1 access, must reset T2 CFG so normal IO space ops work.  */
226	if (type1) {
227		*(vulp)T2_HAE_3 = t2_cfg;
228		mb();
229	}
230
231	return value;
232}
233
234static void
235conf_write(unsigned long addr, unsigned int value, unsigned char type1)
236{
237	unsigned int cpu, taken;
238	unsigned long t2_cfg = 0;
239
240	cpu = smp_processor_id();
241
242	/* If Type1 access, must set T2 CFG.  */
243	if (type1) {
244		t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
245		*(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
246		mb();
247	}
248	mb();
249	draina();
250
251	mcheck_expected(cpu) = 1;
252	mcheck_taken(cpu) = 0;
253	t2_mcheck_any_expected |= (1 << cpu);
254	mb();
255
256	/* Access configuration space.  */
257	*(vuip)addr = value;
258	mb();
259	mb();  /* magic */
260
261	/* Wait for possible mcheck. Also, this lets other CPUs clear
262	   their mchecks as well, as they can reliably tell when
263	   this CPU is in the midst of handling a real mcheck via
264	   the "taken" function. */
265	udelay(100);
266
267	if ((taken = mcheck_taken(cpu))) {
268		mcheck_taken(cpu) = 0;
269		t2_mcheck_last_taken |= (1 << cpu);
270		mb();
271	}
272	mcheck_expected(cpu) = 0;
273	t2_mcheck_any_expected = 0;
274	mb();
275
276	/* If Type1 access, must reset T2 CFG so normal IO space ops work.  */
277	if (type1) {
278		*(vulp)T2_HAE_3 = t2_cfg;
279		mb();
280	}
281}
282
283static int
284t2_read_config(struct pci_bus *bus, unsigned int devfn, int where,
285	       int size, u32 *value)
286{
287	unsigned long addr, pci_addr;
288	unsigned char type1;
289	int shift;
290	long mask;
291
292	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
293		return PCIBIOS_DEVICE_NOT_FOUND;
294
295	mask = (size - 1) * 8;
296	shift = (where & 3) * 8;
297	addr = (pci_addr << 5) + mask + T2_CONF;
298	*value = conf_read(addr, type1) >> (shift);
299	return PCIBIOS_SUCCESSFUL;
300}
301
302static int
303t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
304		u32 value)
305{
306	unsigned long addr, pci_addr;
307	unsigned char type1;
308	long mask;
309
310	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
311		return PCIBIOS_DEVICE_NOT_FOUND;
312
313	mask = (size - 1) * 8;
314	addr = (pci_addr << 5) + mask + T2_CONF;
315	conf_write(addr, value << ((where & 3) * 8), type1);
316	return PCIBIOS_SUCCESSFUL;
317}
318
319struct pci_ops t2_pci_ops =
320{
321	.read =		t2_read_config,
322	.write =	t2_write_config,
323};
324
325static void __init
326t2_direct_map_window1(unsigned long base, unsigned long length)
327{
328	unsigned long temp;
329
330	__direct_map_base = base;
331	__direct_map_size = length;
332
333	temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
334	*(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
335	temp = (length - 1) & 0xfff00000UL;
336	*(vulp)T2_WMASK1 = temp;
337	*(vulp)T2_TBASE1 = 0;
338
339#if DEBUG_PRINT_FINAL_SETTINGS
340	printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
341	       __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
342#endif
343}
344
345static void __init
346t2_sg_map_window2(struct pci_controller *hose,
347		  unsigned long base,
348		  unsigned long length)
349{
350	unsigned long temp;
351
352	/* Note we can only do 1 SG window, as the other is for direct, so
353	   do an ISA SG area, especially for the floppy. */
354	hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES);
355	hose->sg_pci = NULL;
356
357	temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
358	*(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
359	temp = (length - 1) & 0xfff00000UL;
360	*(vulp)T2_WMASK2 = temp;
361	*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
362	mb();
363
364	t2_pci_tbi(hose, 0, -1); /* flush TLB all */
365
366#if DEBUG_PRINT_FINAL_SETTINGS
367	printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
368	       __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
369#endif
370}
371
372static void __init
373t2_save_configuration(void)
374{
375#if DEBUG_PRINT_INITIAL_SETTINGS
376	printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */
377	printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2);
378	printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3);
379	printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4);
380	printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE);
381
382	printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__,
383	       *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
384	printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__,
385	       *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
386#endif
387
388	/*
389	 * Save the DMA Window registers.
390	 */
391	t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
392	t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
393	t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
394	t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
395	t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
396	t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
397
398	t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
399	t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
400	t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
401	t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
402	t2_saved_config.hbase = *(vulp)T2_HBASE;
403}
404
405void __init
406t2_init_arch(void)
407{
408	struct pci_controller *hose;
409	struct resource *hae_mem;
410	unsigned long temp;
411	unsigned int i;
412
413	for (i = 0; i < NR_CPUS; i++) {
414		mcheck_expected(i) = 0;
415		mcheck_taken(i) = 0;
416	}
417	t2_mcheck_any_expected = 0;
418	t2_mcheck_last_taken = 0;
419
420	/* Enable scatter/gather TLB use.  */
421	temp = *(vulp)T2_IOCSR;
422	if (!(temp & (0x1UL << 26))) {
423		printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
424		       temp);
425		*(vulp)T2_IOCSR = temp | (0x1UL << 26);
426		mb();
427		*(vulp)T2_IOCSR; /* read it back to make sure */
428	}
429
430	t2_save_configuration();
431
432	/*
433	 * Create our single hose.
434	 */
435	pci_isa_hose = hose = alloc_pci_controller();
436	hose->io_space = &ioport_resource;
437	hae_mem = alloc_resource();
438	hae_mem->start = 0;
439	hae_mem->end = T2_MEM_R1_MASK;
440	hae_mem->name = pci_hae0_name;
441	if (request_resource(&iomem_resource, hae_mem) < 0)
442		printk(KERN_ERR "Failed to request HAE_MEM\n");
443	hose->mem_space = hae_mem;
444	hose->index = 0;
445
446	hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
447	hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR;
448	hose->sparse_io_base = T2_IO - IDENT_ADDR;
449	hose->dense_io_base = 0;
450
451	/*
452	 * Set up the PCI->physical memory translation windows.
453	 *
454	 * Window 1 is direct mapped.
455	 * Window 2 is scatter/gather (for ISA).
456	 */
457
458	t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
459
460	/* Always make an ISA DMA window. */
461	t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
462
463	*(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
464
465	/* Zero HAE.  */
466	*(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
467	*(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
468	*(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
469
470	/*
471	 * We also now zero out HAE_4, the dense memory HAE, so that
472	 * we need not account for its "offset" when accessing dense
473	 * memory resources which we allocated in our normal way. This
474	 * HAE would need to stay untouched were we to keep the SRM
475	 * resource settings.
476	 *
477	 * Thus we can now run standard X servers on SABLE/LYNX. :-)
478	 */
479	*(vulp)T2_HAE_4 = 0; mb();
480}
481
482void
483t2_kill_arch(int mode)
484{
485	/*
486	 * Restore the DMA Window registers.
487	 */
488	*(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
489	*(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
490	*(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
491	*(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
492	*(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
493	*(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
494	mb();
495
496	*(vulp)T2_HAE_1 = srm_hae;
497	*(vulp)T2_HAE_2 = t2_saved_config.hae_2;
498	*(vulp)T2_HAE_3 = t2_saved_config.hae_3;
499	*(vulp)T2_HAE_4 = t2_saved_config.hae_4;
500	*(vulp)T2_HBASE = t2_saved_config.hbase;
501	mb();
502	*(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
503}
504
505void
506t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
507{
508	unsigned long t2_iocsr;
509
510	t2_iocsr = *(vulp)T2_IOCSR;
511
512	/* set the TLB Clear bit */
513	*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28);
514	mb();
515	*(vulp)T2_IOCSR; /* read it back to make sure */
516
517	/* clear the TLB Clear bit */
518	*(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28);
519	mb();
520	*(vulp)T2_IOCSR; /* read it back to make sure */
521}
522
523#define SIC_SEIC (1UL << 33)    /* System Event Clear */
524
525static void
526t2_clear_errors(int cpu)
527{
528	struct sable_cpu_csr *cpu_regs;
529
530	cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
531
532	cpu_regs->sic &= ~SIC_SEIC;
533
534	/* Clear CPU errors.  */
535	cpu_regs->bcce |= cpu_regs->bcce;
536	cpu_regs->cbe  |= cpu_regs->cbe;
537	cpu_regs->bcue |= cpu_regs->bcue;
538	cpu_regs->dter |= cpu_regs->dter;
539
540	*(vulp)T2_CERR1 |= *(vulp)T2_CERR1;
541	*(vulp)T2_PERR1 |= *(vulp)T2_PERR1;
542
543	mb();
544	mb();  /* magic */
545}
546
547/*
548 * SABLE seems to have a "broadcast" style machine check, in that all
549 * CPUs receive it. And, the issuing CPU, in the case of PCI Config
550 * space read/write faults, will also receive a second mcheck, upon
551 * lowering IPL during completion processing in pci_read_config_byte()
552 * et al.
553 *
554 * Hence all the taken/expected/any_expected/last_taken stuff...
555 */
556void
557t2_machine_check(unsigned long vector, unsigned long la_ptr)
558{
559	int cpu = smp_processor_id();
560#ifdef CONFIG_VERBOSE_MCHECK
561	struct el_common *mchk_header = (struct el_common *)la_ptr;
562#endif
563
564	/* Clear the error before any reporting.  */
565	mb();
566	mb();  /* magic */
567	draina();
568	t2_clear_errors(cpu);
569
570	/* This should not actually be done until the logout frame is
571	   examined, but, since we don't do that, go on and do this... */
572	wrmces(0x7);
573	mb();
574
575	/* Now, do testing for the anomalous conditions. */
576	if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
577		/*
578		 * FUNKY: Received mcheck on a CPU and not
579		 * expecting it, but another CPU is expecting one.
580		 *
581		 * Just dismiss it for now on this CPU...
582		 */
583#ifdef CONFIG_VERBOSE_MCHECK
584		if (alpha_verbose_mcheck > 1) {
585			printk("t2_machine_check(cpu%d): any_expected 0x%x -"
586			       " (assumed) spurious -"
587			       " code 0x%x\n", cpu, t2_mcheck_any_expected,
588			       (unsigned int)mchk_header->code);
589		}
590#endif
591		return;
592	}
593
594	if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
595		if (t2_mcheck_last_taken & (1 << cpu)) {
596#ifdef CONFIG_VERBOSE_MCHECK
597		    if (alpha_verbose_mcheck > 1) {
598			printk("t2_machine_check(cpu%d): last_taken 0x%x - "
599			       "unexpected mcheck - code 0x%x\n",
600			       cpu, t2_mcheck_last_taken,
601			       (unsigned int)mchk_header->code);
602		    }
603#endif
604		    t2_mcheck_last_taken = 0;
605		    mb();
606		    return;
607		} else {
608			t2_mcheck_last_taken = 0;
609			mb();
610		}
611	}
612
613#ifdef CONFIG_VERBOSE_MCHECK
614	if (alpha_verbose_mcheck > 1) {
615		printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
616		       "any_expected 0x%x - code 0x%x\n",
617		       (mcheck_expected(cpu) ? "EX" : "UN"), cpu,
618		       t2_mcheck_last_taken, t2_mcheck_any_expected,
619		       (unsigned int)mchk_header->code);
620	}
621#endif
622
623	process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu));
624}
625