1/*
2 * Board setup routines for the Sky Computers HDPU Compute Blade.
3 *
4 * Written by Brian Waite <waite@skycomputers.com>
5 *
6 * Based on code done by - Mark A. Greer <mgreer@mvista.com>
7 *                         Rabeeh Khoury - rabeeh@galileo.co.il
8 *
9 * This program is free software; you can redistribute  it and/or modify it
10 * under  the terms of  the GNU General  Public License as published by the
11 * Free Software Foundation;  either version 2 of the  License, or (at your
12 * option) any later version.
13 */
14
15
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/irq.h>
19#include <linux/ide.h>
20#include <linux/seq_file.h>
21#include <linux/platform_device.h>
22
23#include <linux/initrd.h>
24#include <linux/root_dev.h>
25#include <linux/smp.h>
26
27#include <asm/time.h>
28#include <asm/machdep.h>
29#include <asm/todc.h>
30#include <asm/mv64x60.h>
31#include <asm/ppcboot.h>
32#include <platforms/hdpu.h>
33#include <linux/mv643xx.h>
34#include <linux/hdpu_features.h>
35#include <linux/device.h>
36#include <linux/mtd/physmap.h>
37
38#define BOARD_VENDOR	"Sky Computers"
39#define BOARD_MACHINE	"HDPU-CB-A"
40
41bd_t ppcboot_bd;
42int ppcboot_bd_valid = 0;
43
44static mv64x60_handle_t bh;
45
46extern char cmd_line[];
47
48unsigned long hdpu_find_end_of_memory(void);
49void hdpu_mpsc_progress(char *s, unsigned short hex);
50void hdpu_heartbeat(void);
51
52static void parse_bootinfo(unsigned long r3,
53			   unsigned long r4, unsigned long r5,
54			   unsigned long r6, unsigned long r7);
55static void hdpu_set_l1pe(void);
56static void hdpu_cpustate_set(unsigned char new_state);
57#ifdef CONFIG_SMP
58static DEFINE_SPINLOCK(timebase_lock);
59static unsigned int timebase_upper = 0, timebase_lower = 0;
60extern int smp_tb_synchronized;
61
62void __devinit hdpu_tben_give(void);
63void __devinit hdpu_tben_take(void);
64#endif
65
66static int __init
67hdpu_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
68{
69	struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
70
71	if (hose->index == 0) {
72		static char pci_irq_table[][4] = {
73			{HDPU_PCI_0_IRQ, 0, 0, 0},
74			{HDPU_PCI_0_IRQ, 0, 0, 0},
75		};
76
77		const long min_idsel = 1, max_idsel = 2, irqs_per_slot = 4;
78		return PCI_IRQ_TABLE_LOOKUP;
79	} else {
80		static char pci_irq_table[][4] = {
81			{HDPU_PCI_1_IRQ, 0, 0, 0},
82		};
83
84		const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
85		return PCI_IRQ_TABLE_LOOKUP;
86	}
87}
88
89static void __init hdpu_intr_setup(void)
90{
91	mv64x60_write(&bh, MV64x60_GPP_IO_CNTL,
92		      (1 | (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
93		       (1 << 6) | (1 << 7) | (1 << 12) | (1 << 16) |
94		       (1 << 18) | (1 << 19) | (1 << 20) | (1 << 21) |
95		       (1 << 22) | (1 << 23) | (1 << 24) | (1 << 25) |
96		       (1 << 26) | (1 << 27) | (1 << 28) | (1 << 29)));
97
98	mv64x60_clr_bits(&bh, MV64x60_PCI0_CMD, (1 << 5) | (1 << 9));
99	mv64x60_clr_bits(&bh, MV64x60_PCI1_CMD, (1 << 5) | (1 << 9));
100
101	/*
102	 * Dismiss and then enable interrupt on GPP interrupt cause
103	 * for CPU #0
104	 */
105	mv64x60_write(&bh, MV64x60_GPP_INTR_CAUSE, ~((1 << 8) | (1 << 13)));
106	mv64x60_set_bits(&bh, MV64x60_GPP_INTR_MASK, (1 << 8) | (1 << 13));
107
108	/*
109	 * Dismiss and then enable interrupt on CPU #0 high cause reg
110	 * BIT25 summarizes GPP interrupts 8-15
111	 */
112	mv64x60_set_bits(&bh, MV64360_IC_CPU0_INTR_MASK_HI, (1 << 25));
113}
114
115static void __init hdpu_setup_peripherals(void)
116{
117	unsigned int val;
118
119	mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
120				 HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
121	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
122
123	mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_0_WIN,
124				 HDPU_TBEN_BASE, HDPU_TBEN_SIZE, 0);
125	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_0_WIN);
126
127	mv64x60_set_32bit_window(&bh, MV64x60_CPU2DEV_1_WIN,
128				 HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE, 0);
129	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
130
131	mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
132				 HDPU_INTERNAL_SRAM_BASE,
133				 HDPU_INTERNAL_SRAM_SIZE, 0);
134	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
135
136	bh.ci->disable_window_32bit(&bh, MV64x60_ENET2MEM_4_WIN);
137	mv64x60_set_32bit_window(&bh, MV64x60_ENET2MEM_4_WIN, 0, 0, 0);
138
139	mv64x60_clr_bits(&bh, MV64x60_PCI0_PCI_DECODE_CNTL, (1 << 3));
140	mv64x60_clr_bits(&bh, MV64x60_PCI1_PCI_DECODE_CNTL, (1 << 3));
141	mv64x60_clr_bits(&bh, MV64x60_TIMR_CNTR_0_3_CNTL,
142			 ((1 << 0) | (1 << 8) | (1 << 16) | (1 << 24)));
143
144	/* Enable pipelining */
145	mv64x60_set_bits(&bh, MV64x60_CPU_CONFIG, (1 << 13));
146	/* Enable Snoop Pipelining */
147	mv64x60_set_bits(&bh, MV64360_D_UNIT_CONTROL_HIGH, (1 << 24));
148
149	/*
150	 * Change DRAM read buffer assignment.
151	 * Assign read buffer 0 dedicated only for CPU,
152	 * and the rest read buffer 1.
153	 */
154	val = mv64x60_read(&bh, MV64360_SDRAM_CONFIG);
155	val = val & 0x03ffffff;
156	val = val | 0xf8000000;
157	mv64x60_write(&bh, MV64360_SDRAM_CONFIG, val);
158
159	/*
160	 * Configure internal SRAM -
161	 * Cache coherent write back, if CONFIG_MV64360_SRAM_CACHE_COHERENT set
162	 * Parity enabled.
163	 * Parity error propagation
164	 * Arbitration not parked for CPU only
165	 * Other bits are reserved.
166	 */
167#ifdef CONFIG_MV64360_SRAM_CACHE_COHERENT
168	mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b2);
169#else
170	mv64x60_write(&bh, MV64360_SRAM_CONFIG, 0x001600b0);
171#endif
172
173	hdpu_intr_setup();
174}
175
176static void __init hdpu_setup_bridge(void)
177{
178	struct mv64x60_setup_info si;
179	int i;
180
181	memset(&si, 0, sizeof(si));
182
183	si.phys_reg_base = HDPU_BRIDGE_REG_BASE;
184	si.pci_0.enable_bus = 1;
185	si.pci_0.pci_io.cpu_base = HDPU_PCI0_IO_START_PROC_ADDR;
186	si.pci_0.pci_io.pci_base_hi = 0;
187	si.pci_0.pci_io.pci_base_lo = HDPU_PCI0_IO_START_PCI_ADDR;
188	si.pci_0.pci_io.size = HDPU_PCI0_IO_SIZE;
189	si.pci_0.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
190	si.pci_0.pci_mem[0].cpu_base = HDPU_PCI0_MEM_START_PROC_ADDR;
191	si.pci_0.pci_mem[0].pci_base_hi = HDPU_PCI0_MEM_START_PCI_HI_ADDR;
192	si.pci_0.pci_mem[0].pci_base_lo = HDPU_PCI0_MEM_START_PCI_LO_ADDR;
193	si.pci_0.pci_mem[0].size = HDPU_PCI0_MEM_SIZE;
194	si.pci_0.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
195	si.pci_0.pci_cmd_bits = 0;
196	si.pci_0.latency_timer = 0x80;
197
198	si.pci_1.enable_bus = 1;
199	si.pci_1.pci_io.cpu_base = HDPU_PCI1_IO_START_PROC_ADDR;
200	si.pci_1.pci_io.pci_base_hi = 0;
201	si.pci_1.pci_io.pci_base_lo = HDPU_PCI1_IO_START_PCI_ADDR;
202	si.pci_1.pci_io.size = HDPU_PCI1_IO_SIZE;
203	si.pci_1.pci_io.swap = MV64x60_CPU2PCI_SWAP_NONE;
204	si.pci_1.pci_mem[0].cpu_base = HDPU_PCI1_MEM_START_PROC_ADDR;
205	si.pci_1.pci_mem[0].pci_base_hi = HDPU_PCI1_MEM_START_PCI_HI_ADDR;
206	si.pci_1.pci_mem[0].pci_base_lo = HDPU_PCI1_MEM_START_PCI_LO_ADDR;
207	si.pci_1.pci_mem[0].size = HDPU_PCI1_MEM_SIZE;
208	si.pci_1.pci_mem[0].swap = MV64x60_CPU2PCI_SWAP_NONE;
209	si.pci_1.pci_cmd_bits = 0;
210	si.pci_1.latency_timer = 0x80;
211
212	for (i = 0; i < MV64x60_CPU2MEM_WINDOWS; i++) {
213#if defined(CONFIG_NOT_COHERENT_CACHE)
214		si.cpu_prot_options[i] = 0;
215		si.enet_options[i] = MV64360_ENET2MEM_SNOOP_NONE;
216		si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_NONE;
217		si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_NONE;
218
219		si.pci_1.acc_cntl_options[i] =
220		    MV64360_PCI_ACC_CNTL_SNOOP_NONE |
221		    MV64360_PCI_ACC_CNTL_SWAP_NONE |
222		    MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
223		    MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
224
225		si.pci_0.acc_cntl_options[i] =
226		    MV64360_PCI_ACC_CNTL_SNOOP_NONE |
227		    MV64360_PCI_ACC_CNTL_SWAP_NONE |
228		    MV64360_PCI_ACC_CNTL_MBURST_128_BYTES |
229		    MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
230
231#else
232		si.cpu_prot_options[i] = 0;
233		si.enet_options[i] = MV64360_ENET2MEM_SNOOP_WB;	/* errata */
234		si.mpsc_options[i] = MV64360_MPSC2MEM_SNOOP_WB;	/* errata */
235		si.idma_options[i] = MV64360_IDMA2MEM_SNOOP_WB;	/* errata */
236
237		si.pci_0.acc_cntl_options[i] =
238		    MV64360_PCI_ACC_CNTL_SNOOP_WB |
239		    MV64360_PCI_ACC_CNTL_SWAP_NONE |
240		    MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
241		    MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
242
243		si.pci_1.acc_cntl_options[i] =
244		    MV64360_PCI_ACC_CNTL_SNOOP_WB |
245		    MV64360_PCI_ACC_CNTL_SWAP_NONE |
246		    MV64360_PCI_ACC_CNTL_MBURST_32_BYTES |
247		    MV64360_PCI_ACC_CNTL_RDSIZE_256_BYTES;
248#endif
249	}
250
251	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_PCI);
252
253	/* Lookup PCI host bridges */
254	mv64x60_init(&bh, &si);
255	pci_dram_offset = 0;	/* System mem at same addr on PCI & cpu bus */
256	ppc_md.pci_swizzle = common_swizzle;
257	ppc_md.pci_map_irq = hdpu_map_irq;
258
259	mv64x60_set_bus(&bh, 0, 0);
260	bh.hose_a->first_busno = 0;
261	bh.hose_a->last_busno = 0xff;
262	bh.hose_a->last_busno = pciauto_bus_scan(bh.hose_a, 0);
263
264	bh.hose_b->first_busno = bh.hose_a->last_busno + 1;
265	mv64x60_set_bus(&bh, 1, bh.hose_b->first_busno);
266	bh.hose_b->last_busno = 0xff;
267	bh.hose_b->last_busno = pciauto_bus_scan(bh.hose_b,
268		bh.hose_b->first_busno);
269
270	ppc_md.pci_exclude_device = mv64x60_pci_exclude_device;
271
272	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_INIT_REG);
273	/*
274	 * Enabling of PCI internal-vs-external arbitration
275	 * is a platform- and errata-dependent decision.
276	 */
277	return;
278}
279
280#if defined(CONFIG_SERIAL_MPSC_CONSOLE)
281static void __init hdpu_early_serial_map(void)
282{
283#ifdef	CONFIG_KGDB
284	static char first_time = 1;
285
286#if defined(CONFIG_KGDB_TTYS0)
287#define KGDB_PORT 0
288#elif defined(CONFIG_KGDB_TTYS1)
289#define KGDB_PORT 1
290#else
291#error "Invalid kgdb_tty port"
292#endif
293
294	if (first_time) {
295		gt_early_mpsc_init(KGDB_PORT,
296				   B9600 | CS8 | CREAD | HUPCL | CLOCAL);
297		first_time = 0;
298	}
299
300	return;
301#endif
302}
303#endif
304
305static void hdpu_init2(void)
306{
307	return;
308}
309
310#if defined(CONFIG_MV643XX_ETH)
311static void __init hdpu_fixup_eth_pdata(struct platform_device *pd)
312{
313
314	struct mv643xx_eth_platform_data *eth_pd;
315	eth_pd = pd->dev.platform_data;
316
317	eth_pd->force_phy_addr = 1;
318	eth_pd->phy_addr = pd->id;
319	eth_pd->speed = SPEED_100;
320	eth_pd->duplex = DUPLEX_FULL;
321	eth_pd->tx_queue_size = 400;
322	eth_pd->rx_queue_size = 800;
323}
324#endif
325
326static void __init hdpu_fixup_mpsc_pdata(struct platform_device *pd)
327{
328
329	struct mpsc_pdata *pdata;
330
331	pdata = (struct mpsc_pdata *)pd->dev.platform_data;
332
333	pdata->max_idle = 40;
334	if (ppcboot_bd_valid)
335		pdata->default_baud = ppcboot_bd.bi_baudrate;
336	else
337		pdata->default_baud = HDPU_DEFAULT_BAUD;
338	pdata->brg_clk_src = HDPU_MPSC_CLK_SRC;
339	pdata->brg_clk_freq = HDPU_MPSC_CLK_FREQ;
340}
341
342#if defined(CONFIG_HDPU_FEATURES)
343static void __init hdpu_fixup_cpustate_pdata(struct platform_device *pd)
344{
345	struct platform_device *pds[1];
346	pds[0] = pd;
347	mv64x60_pd_fixup(&bh, pds, 1);
348}
349#endif
350
351static int hdpu_platform_notify(struct device *dev)
352{
353	static struct {
354		char *bus_id;
355		void ((*rtn) (struct platform_device * pdev));
356	} dev_map[] = {
357		{
358		MPSC_CTLR_NAME ".0", hdpu_fixup_mpsc_pdata},
359#if defined(CONFIG_MV643XX_ETH)
360		{
361		MV643XX_ETH_NAME ".0", hdpu_fixup_eth_pdata},
362#endif
363#if defined(CONFIG_HDPU_FEATURES)
364		{
365		HDPU_CPUSTATE_NAME ".0", hdpu_fixup_cpustate_pdata},
366#endif
367	};
368	struct platform_device *pdev;
369	int i;
370
371	if (dev && dev->bus_id)
372		for (i = 0; i < ARRAY_SIZE(dev_map); i++)
373			if (!strncmp(dev->bus_id, dev_map[i].bus_id,
374				     BUS_ID_SIZE)) {
375
376				pdev = container_of(dev,
377						    struct platform_device,
378						    dev);
379				dev_map[i].rtn(pdev);
380			}
381
382	return 0;
383}
384
385static void __init hdpu_setup_arch(void)
386{
387	if (ppc_md.progress)
388		ppc_md.progress("hdpu_setup_arch: enter", 0);
389#ifdef CONFIG_BLK_DEV_INITRD
390	if (initrd_start)
391		ROOT_DEV = Root_RAM0;
392	else
393#endif
394#ifdef	CONFIG_ROOT_NFS
395		ROOT_DEV = Root_NFS;
396#else
397		ROOT_DEV = Root_SDA2;
398#endif
399
400	ppc_md.heartbeat = hdpu_heartbeat;
401
402	ppc_md.heartbeat_reset = HZ;
403	ppc_md.heartbeat_count = 1;
404
405	if (ppc_md.progress)
406		ppc_md.progress("hdpu_setup_arch: Enabling L2 cache", 0);
407
408	/* Enable L1 Parity Bits */
409	hdpu_set_l1pe();
410
411	/* Enable L2 and L3 caches (if 745x) */
412	_set_L2CR(0x80080000);
413
414	if (ppc_md.progress)
415		ppc_md.progress("hdpu_setup_arch: enter", 0);
416
417	hdpu_setup_bridge();
418
419	hdpu_setup_peripherals();
420
421#ifdef CONFIG_SERIAL_MPSC_CONSOLE
422	hdpu_early_serial_map();
423#endif
424
425	printk("SKY HDPU Compute Blade \n");
426
427	if (ppc_md.progress)
428		ppc_md.progress("hdpu_setup_arch: exit", 0);
429
430	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_OK);
431	return;
432}
433static void __init hdpu_init_irq(void)
434{
435	mv64360_init_irq();
436}
437
438static void __init hdpu_set_l1pe()
439{
440	unsigned long ictrl;
441	asm volatile ("mfspr %0, 1011":"=r" (ictrl):);
442	ictrl |= ICTRL_EICE | ICTRL_EDC | ICTRL_EICP;
443	asm volatile ("mtspr 1011, %0"::"r" (ictrl));
444}
445
446/*
447 * Set BAT 1 to map 0xf1000000 to end of physical memory space.
448 */
449static __inline__ void hdpu_set_bat(void)
450{
451	mb();
452	mtspr(SPRN_DBAT1U, 0xf10001fe);
453	mtspr(SPRN_DBAT1L, 0xf100002a);
454	mb();
455
456	return;
457}
458
459unsigned long __init hdpu_find_end_of_memory(void)
460{
461	return mv64x60_get_mem_size(CONFIG_MV64X60_NEW_BASE,
462				    MV64x60_TYPE_MV64360);
463}
464
465static void hdpu_reset_board(void)
466{
467	volatile int infinite = 1;
468
469	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_RESET);
470
471	local_irq_disable();
472
473	/* Clear all the LEDs */
474	mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) |
475						   (1 << 5) | (1 << 6)));
476
477	/* disable and invalidate the L2 cache */
478	_set_L2CR(0);
479	_set_L2CR(0x200000);
480
481	/* flush and disable L1 I/D cache */
482	__asm__ __volatile__
483	    ("\n"
484	     "mfspr   3,1008\n"
485	     "ori	5,5,0xcc00\n"
486	     "ori	4,3,0xc00\n"
487	     "andc	5,3,5\n"
488	     "sync\n"
489	     "mtspr	1008,4\n"
490	     "isync\n" "sync\n" "mtspr	1008,5\n" "isync\n" "sync\n");
491
492	/* Hit the reset bit */
493	mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 3));
494
495	while (infinite)
496		infinite = infinite;
497
498	return;
499}
500
501static void hdpu_restart(char *cmd)
502{
503	volatile ulong i = 10000000;
504
505	hdpu_reset_board();
506
507	while (i-- > 0) ;
508	panic("restart failed\n");
509}
510
511static void hdpu_halt(void)
512{
513	local_irq_disable();
514
515	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_HALT);
516
517	/* Clear all the LEDs */
518	mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, ((1 << 4) | (1 << 5) |
519						   (1 << 6)));
520	while (1) ;
521	/* NOTREACHED */
522}
523
524static void hdpu_power_off(void)
525{
526	hdpu_halt();
527	/* NOTREACHED */
528}
529
530static int hdpu_show_cpuinfo(struct seq_file *m)
531{
532	uint pvid;
533
534	pvid = mfspr(SPRN_PVR);
535	seq_printf(m, "vendor\t\t: Sky Computers\n");
536	seq_printf(m, "machine\t\t: HDPU Compute Blade\n");
537	seq_printf(m, "PVID\t\t: 0x%x, vendor: %s\n",
538		   pvid, (pvid & (1 << 15) ? "IBM" : "Motorola"));
539
540	return 0;
541}
542
543static void __init hdpu_calibrate_decr(void)
544{
545	ulong freq;
546
547	if (ppcboot_bd_valid)
548		freq = ppcboot_bd.bi_busfreq / 4;
549	else
550		freq = 133000000;
551
552	printk("time_init: decrementer frequency = %lu.%.6lu MHz\n",
553	       freq / 1000000, freq % 1000000);
554
555	tb_ticks_per_jiffy = freq / HZ;
556	tb_to_us = mulhwu_scale_factor(freq, 1000000);
557
558	return;
559}
560
561static void parse_bootinfo(unsigned long r3,
562			   unsigned long r4, unsigned long r5,
563			   unsigned long r6, unsigned long r7)
564{
565	bd_t *bd = NULL;
566	char *cmdline_start = NULL;
567	int cmdline_len = 0;
568
569	if (r3) {
570		if ((r3 & 0xf0000000) == 0)
571			r3 += KERNELBASE;
572		if ((r3 & 0xf0000000) == KERNELBASE) {
573			bd = (void *)r3;
574
575			memcpy(&ppcboot_bd, bd, sizeof(ppcboot_bd));
576			ppcboot_bd_valid = 1;
577		}
578	}
579#ifdef CONFIG_BLK_DEV_INITRD
580	if (r4 && r5 && r5 > r4) {
581		if ((r4 & 0xf0000000) == 0)
582			r4 += KERNELBASE;
583		if ((r5 & 0xf0000000) == 0)
584			r5 += KERNELBASE;
585		if ((r4 & 0xf0000000) == KERNELBASE) {
586			initrd_start = r4;
587			initrd_end = r5;
588			initrd_below_start_ok = 1;
589		}
590	}
591#endif				/* CONFIG_BLK_DEV_INITRD */
592
593	if (r6 && r7 && r7 > r6) {
594		if ((r6 & 0xf0000000) == 0)
595			r6 += KERNELBASE;
596		if ((r7 & 0xf0000000) == 0)
597			r7 += KERNELBASE;
598		if ((r6 & 0xf0000000) == KERNELBASE) {
599			cmdline_start = (void *)r6;
600			cmdline_len = (r7 - r6);
601			strncpy(cmd_line, cmdline_start, cmdline_len);
602		}
603	}
604}
605
606#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
607static void
608hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
609{
610	request_region(from, extent, name);
611	return;
612}
613
614static void hdpu_ide_release_region(ide_ioreg_t from, unsigned int extent)
615{
616	release_region(from, extent);
617	return;
618}
619
620static void __init
621hdpu_ide_pci_init_hwif_ports(hw_regs_t * hw, ide_ioreg_t data_port,
622			     ide_ioreg_t ctrl_port, int *irq)
623{
624	struct pci_dev *dev;
625
626	pci_for_each_dev(dev) {
627		if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) ||
628		    ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)) {
629			hw->irq = dev->irq;
630
631			if (irq != NULL) {
632				*irq = dev->irq;
633			}
634		}
635	}
636
637	return;
638}
639#endif
640
641void hdpu_heartbeat(void)
642{
643	if (mv64x60_read(&bh, MV64x60_GPP_VALUE) & (1 << 5))
644		mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (1 << 5));
645	else
646		mv64x60_write(&bh, MV64x60_GPP_VALUE_SET, (1 << 5));
647
648	ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
649
650}
651
652static void __init hdpu_map_io(void)
653{
654	io_block_mapping(0xf1000000, 0xf1000000, 0x20000, _PAGE_IO);
655}
656
657#ifdef CONFIG_SMP
658char hdpu_smp0[] = "SMP Cpu #0";
659char hdpu_smp1[] = "SMP Cpu #1";
660
661static irqreturn_t hdpu_smp_cpu0_int_handler(int irq, void *dev_id)
662{
663	volatile unsigned int doorbell;
664
665	doorbell = mv64x60_read(&bh, MV64360_CPU0_DOORBELL);
666
667	/* Ack the doorbell interrupts */
668	mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, doorbell);
669
670	if (doorbell & 1) {
671		smp_message_recv(0);
672	}
673	if (doorbell & 2) {
674		smp_message_recv(1);
675	}
676	if (doorbell & 4) {
677		smp_message_recv(2);
678	}
679	if (doorbell & 8) {
680		smp_message_recv(3);
681	}
682	return IRQ_HANDLED;
683}
684
685static irqreturn_t hdpu_smp_cpu1_int_handler(int irq, void *dev_id)
686{
687	volatile unsigned int doorbell;
688
689	doorbell = mv64x60_read(&bh, MV64360_CPU1_DOORBELL);
690
691	/* Ack the doorbell interrupts */
692	mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, doorbell);
693
694	if (doorbell & 1) {
695		smp_message_recv(0);
696	}
697	if (doorbell & 2) {
698		smp_message_recv(1);
699	}
700	if (doorbell & 4) {
701		smp_message_recv(2);
702	}
703	if (doorbell & 8) {
704		smp_message_recv(3);
705	}
706	return IRQ_HANDLED;
707}
708
709static void smp_hdpu_CPU_two(void)
710{
711	__asm__ __volatile__
712	    ("\n"
713	     "lis     3,0x0000\n"
714	     "ori     3,3,0x00c0\n"
715	     "mtspr   26, 3\n" "li      4,0\n" "mtspr   27,4\n" "rfi");
716
717}
718
719static int smp_hdpu_probe(void)
720{
721	int *cpu_count_reg;
722	int num_cpus = 0;
723
724	cpu_count_reg = ioremap(HDPU_NEXUS_ID_BASE, HDPU_NEXUS_ID_SIZE);
725	if (cpu_count_reg) {
726		num_cpus = (*cpu_count_reg >> 20) & 0x3;
727		iounmap(cpu_count_reg);
728	}
729
730	/* Validate the bits in the CPLD. If we could not map the reg, return 2.
731	 * If the register reported 0 or 3, return 2.
732	 * Older CPLD revisions set these bits to all ones (val = 3).
733	 */
734	if ((num_cpus < 1) || (num_cpus > 2)) {
735		printk
736		    ("Unable to determine the number of processors %d . deafulting to 2.\n",
737		     num_cpus);
738		num_cpus = 2;
739	}
740	return num_cpus;
741}
742
743static void
744smp_hdpu_message_pass(int target, int msg)
745{
746	if (msg > 0x3) {
747		printk("SMP %d: smp_message_pass: unknown msg %d\n",
748		       smp_processor_id(), msg);
749		return;
750	}
751	switch (target) {
752	case MSG_ALL:
753		mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
754		mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
755		break;
756	case MSG_ALL_BUT_SELF:
757		if (smp_processor_id())
758			mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
759		else
760			mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
761		break;
762	default:
763		if (target == 0)
764			mv64x60_write(&bh, MV64360_CPU0_DOORBELL, 1 << msg);
765		else
766			mv64x60_write(&bh, MV64360_CPU1_DOORBELL, 1 << msg);
767		break;
768	}
769}
770
771static void smp_hdpu_kick_cpu(int nr)
772{
773	volatile unsigned int *bootaddr;
774
775	if (ppc_md.progress)
776		ppc_md.progress("smp_hdpu_kick_cpu", 0);
777
778	hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR | CPUSTATE_KERNEL_CPU1_KICK);
779
780       /* Disable BootCS. Must also reduce the windows size to zero. */
781	bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
782	mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN, 0, 0, 0);
783
784	bootaddr = ioremap(HDPU_INTERNAL_SRAM_BASE, HDPU_INTERNAL_SRAM_SIZE);
785	if (!bootaddr) {
786		if (ppc_md.progress)
787			ppc_md.progress("smp_hdpu_kick_cpu: ioremap failed", 0);
788		return;
789	}
790
791	memcpy((void *)(bootaddr + 0x40), (void *)&smp_hdpu_CPU_two, 0x20);
792
793	/* map SRAM to 0xfff00000 */
794	bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
795
796	mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
797				 0xfff00000, HDPU_INTERNAL_SRAM_SIZE, 0);
798	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
799
800	/* Enable CPU1 arbitration */
801	mv64x60_clr_bits(&bh, MV64x60_CPU_MASTER_CNTL, (1 << 9));
802
803	/*
804	 * Wait 100mSecond until other CPU has reached __secondary_start.
805	 * When it reaches, it is permittable to rever the SRAM mapping etc...
806	 */
807	mdelay(100);
808	*(unsigned long *)KERNELBASE = nr;
809	asm volatile ("dcbf 0,%0"::"r" (KERNELBASE):"memory");
810
811	iounmap(bootaddr);
812
813	/* Set up window for internal sram (256KByte insize) */
814	bh.ci->disable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
815	mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
816				 HDPU_INTERNAL_SRAM_BASE,
817				 HDPU_INTERNAL_SRAM_SIZE, 0);
818	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
819	/*
820	 * Set up windows for embedded FLASH (using boot CS window).
821	 */
822
823	bh.ci->disable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
824	mv64x60_set_32bit_window(&bh, MV64x60_CPU2BOOT_WIN,
825				 HDPU_EMB_FLASH_BASE, HDPU_EMB_FLASH_SIZE, 0);
826	bh.ci->enable_window_32bit(&bh, MV64x60_CPU2BOOT_WIN);
827}
828
829static void smp_hdpu_setup_cpu(int cpu_nr)
830{
831	if (cpu_nr == 0) {
832		if (ppc_md.progress)
833			ppc_md.progress("smp_hdpu_setup_cpu 0", 0);
834		mv64x60_write(&bh, MV64360_CPU0_DOORBELL_CLR, 0xff);
835		mv64x60_write(&bh, MV64360_CPU0_DOORBELL_MASK, 0xff);
836		request_irq(60, hdpu_smp_cpu0_int_handler,
837			    IRQF_DISABLED, hdpu_smp0, 0);
838	}
839
840	if (cpu_nr == 1) {
841		if (ppc_md.progress)
842			ppc_md.progress("smp_hdpu_setup_cpu 1", 0);
843
844		hdpu_cpustate_set(CPUSTATE_KERNEL_MAJOR |
845				  CPUSTATE_KERNEL_CPU1_OK);
846
847		/* Enable L1 Parity Bits */
848		hdpu_set_l1pe();
849
850		/* Enable L2 cache */
851		_set_L2CR(0);
852		_set_L2CR(0x80080000);
853
854		mv64x60_write(&bh, MV64360_CPU1_DOORBELL_CLR, 0x0);
855		mv64x60_write(&bh, MV64360_CPU1_DOORBELL_MASK, 0xff);
856		request_irq(28, hdpu_smp_cpu1_int_handler,
857			    IRQF_DISABLED, hdpu_smp1, 0);
858	}
859
860}
861
862void __devinit hdpu_tben_give()
863{
864	volatile unsigned long *val = 0;
865
866	/* By writing 0 to the TBEN_BASE, the timebases is frozen */
867	val = ioremap(HDPU_TBEN_BASE, 4);
868	*val = 0;
869	mb();
870
871	spin_lock(&timebase_lock);
872	timebase_upper = get_tbu();
873	timebase_lower = get_tbl();
874	spin_unlock(&timebase_lock);
875
876	while (timebase_upper || timebase_lower)
877		barrier();
878
879	/* By writing 1 to the TBEN_BASE, the timebases is thawed */
880	*val = 1;
881	mb();
882
883	iounmap(val);
884
885}
886
887void __devinit hdpu_tben_take()
888{
889	while (!(timebase_upper || timebase_lower))
890		barrier();
891
892	spin_lock(&timebase_lock);
893	set_tb(timebase_upper, timebase_lower);
894	timebase_upper = 0;
895	timebase_lower = 0;
896	spin_unlock(&timebase_lock);
897}
898
899static struct smp_ops_t hdpu_smp_ops = {
900	.message_pass = smp_hdpu_message_pass,
901	.probe = smp_hdpu_probe,
902	.kick_cpu = smp_hdpu_kick_cpu,
903	.setup_cpu = smp_hdpu_setup_cpu,
904	.give_timebase = hdpu_tben_give,
905	.take_timebase = hdpu_tben_take,
906};
907#endif				/* CONFIG_SMP */
908
909void __init
910platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
911	      unsigned long r6, unsigned long r7)
912{
913	parse_bootinfo(r3, r4, r5, r6, r7);
914
915	isa_mem_base = 0;
916
917	ppc_md.setup_arch = hdpu_setup_arch;
918	ppc_md.init = hdpu_init2;
919	ppc_md.show_cpuinfo = hdpu_show_cpuinfo;
920	ppc_md.init_IRQ = hdpu_init_irq;
921	ppc_md.get_irq = mv64360_get_irq;
922	ppc_md.restart = hdpu_restart;
923	ppc_md.power_off = hdpu_power_off;
924	ppc_md.halt = hdpu_halt;
925	ppc_md.find_end_of_memory = hdpu_find_end_of_memory;
926	ppc_md.calibrate_decr = hdpu_calibrate_decr;
927	ppc_md.setup_io_mappings = hdpu_map_io;
928
929	bh.p_base = CONFIG_MV64X60_NEW_BASE;
930	bh.v_base = (unsigned long *)bh.p_base;
931
932	hdpu_set_bat();
933
934#if defined(CONFIG_SERIAL_TEXT_DEBUG)
935	ppc_md.progress = hdpu_mpsc_progress;	/* embedded UART */
936	mv64x60_progress_init(bh.p_base);
937#endif				/* CONFIG_SERIAL_TEXT_DEBUG */
938
939#ifdef CONFIG_SMP
940	smp_ops = &hdpu_smp_ops;
941#endif				/* CONFIG_SMP */
942
943#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
944	platform_notify = hdpu_platform_notify;
945#endif
946	return;
947}
948
949#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
950/* SMP safe version of the serial text debug routine. Uses Semaphore 0 */
951void hdpu_mpsc_progress(char *s, unsigned short hex)
952{
953	while (mv64x60_read(&bh, MV64360_WHO_AM_I) !=
954	       mv64x60_read(&bh, MV64360_SEMAPHORE_0)) {
955	}
956	mv64x60_mpsc_progress(s, hex);
957	mv64x60_write(&bh, MV64360_SEMAPHORE_0, 0xff);
958}
959#endif
960
961static void hdpu_cpustate_set(unsigned char new_state)
962{
963	unsigned int state = (new_state << 21);
964	mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, (0xff << 21));
965	mv64x60_write(&bh, MV64x60_GPP_VALUE_CLR, state);
966}
967
968#ifdef CONFIG_MTD_PHYSMAP
969static struct mtd_partition hdpu_partitions[] = {
970	{
971	 .name = "Root FS",
972	 .size = 0x03400000,
973	 .offset = 0,
974	 .mask_flags = 0,
975	 },{
976	 .name = "User FS",
977	 .size = 0x00800000,
978	 .offset = 0x03400000,
979	 .mask_flags = 0,
980	 },{
981	 .name = "Kernel Image",
982	 .size = 0x002C0000,
983	 .offset = 0x03C00000,
984	 .mask_flags = 0,
985	 },{
986	 .name = "bootEnv",
987	 .size = 0x00040000,
988	 .offset = 0x03EC0000,
989	 .mask_flags = 0,
990	 },{
991	 .name = "bootROM",
992	 .size = 0x00100000,
993	 .offset = 0x03F00000,
994	 .mask_flags = 0,
995	 }
996};
997
998static int __init hdpu_setup_mtd(void)
999{
1000
1001	physmap_set_partitions(hdpu_partitions, 5);
1002	return 0;
1003}
1004
1005arch_initcall(hdpu_setup_mtd);
1006#endif
1007
1008#ifdef CONFIG_HDPU_FEATURES
1009
1010static struct resource hdpu_cpustate_resources[] = {
1011	[0] = {
1012	       .name = "addr base",
1013	       .start = MV64x60_GPP_VALUE_SET,
1014	       .end = MV64x60_GPP_VALUE_CLR + 1,
1015	       .flags = IORESOURCE_MEM,
1016	       },
1017};
1018
1019static struct resource hdpu_nexus_resources[] = {
1020	[0] = {
1021	       .name = "nexus register",
1022	       .start = HDPU_NEXUS_ID_BASE,
1023	       .end = HDPU_NEXUS_ID_BASE + HDPU_NEXUS_ID_SIZE,
1024	       .flags = IORESOURCE_MEM,
1025	       },
1026};
1027
1028static struct platform_device hdpu_cpustate_device = {
1029	.name = HDPU_CPUSTATE_NAME,
1030	.id = 0,
1031	.num_resources = ARRAY_SIZE(hdpu_cpustate_resources),
1032	.resource = hdpu_cpustate_resources,
1033};
1034
1035static struct platform_device hdpu_nexus_device = {
1036	.name = HDPU_NEXUS_NAME,
1037	.id = 0,
1038	.num_resources = ARRAY_SIZE(hdpu_nexus_resources),
1039	.resource = hdpu_nexus_resources,
1040};
1041
1042static int __init hdpu_add_pds(void)
1043{
1044	platform_device_register(&hdpu_cpustate_device);
1045	platform_device_register(&hdpu_nexus_device);
1046	return 0;
1047}
1048
1049arch_initcall(hdpu_add_pds);
1050#endif
1051