• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/blackfin/kernel/
1/*
2 * Copyright 2004-2010 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#include <linux/delay.h>
8#include <linux/console.h>
9#include <linux/bootmem.h>
10#include <linux/seq_file.h>
11#include <linux/cpu.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/tty.h>
15#include <linux/pfn.h>
16
17#ifdef CONFIG_MTD_UCLINUX
18#include <linux/mtd/map.h>
19#include <linux/ext2_fs.h>
20#include <linux/cramfs_fs.h>
21#include <linux/romfs_fs.h>
22#endif
23
24#include <asm/cplb.h>
25#include <asm/cacheflush.h>
26#include <asm/blackfin.h>
27#include <asm/cplbinit.h>
28#include <asm/div64.h>
29#include <asm/cpu.h>
30#include <asm/fixed_code.h>
31#include <asm/early_printk.h>
32
33u16 _bfin_swrst;
34EXPORT_SYMBOL(_bfin_swrst);
35
36unsigned long memory_start, memory_end, physical_mem_end;
37unsigned long _rambase, _ramstart, _ramend;
38unsigned long reserved_mem_dcache_on;
39unsigned long reserved_mem_icache_on;
40EXPORT_SYMBOL(memory_start);
41EXPORT_SYMBOL(memory_end);
42EXPORT_SYMBOL(physical_mem_end);
43EXPORT_SYMBOL(_ramend);
44EXPORT_SYMBOL(reserved_mem_dcache_on);
45
46#ifdef CONFIG_MTD_UCLINUX
47extern struct map_info uclinux_ram_map;
48unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
49unsigned long _ebss;
50EXPORT_SYMBOL(memory_mtd_end);
51EXPORT_SYMBOL(memory_mtd_start);
52EXPORT_SYMBOL(mtd_size);
53#endif
54
55char __initdata command_line[COMMAND_LINE_SIZE];
56void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
57	*init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
58
59/* boot memmap, for parsing "memmap=" */
60#define BFIN_MEMMAP_MAX		128 /* number of entries in bfin_memmap */
61#define BFIN_MEMMAP_RAM		1
62#define BFIN_MEMMAP_RESERVED	2
63static struct bfin_memmap {
64	int nr_map;
65	struct bfin_memmap_entry {
66		unsigned long long addr; /* start of memory segment */
67		unsigned long long size;
68		unsigned long type;
69	} map[BFIN_MEMMAP_MAX];
70} bfin_memmap __initdata;
71
72/* for memmap sanitization */
73struct change_member {
74	struct bfin_memmap_entry *pentry; /* pointer to original entry */
75	unsigned long long addr; /* address for this change point */
76};
77static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
78static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81
82DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83
84static int early_init_clkin_hz(char *buf);
85
86#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
87void __init generate_cplb_tables(void)
88{
89	unsigned int cpu;
90
91	generate_cplb_tables_all();
92	/* Generate per-CPU I&D CPLB tables */
93	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
94		generate_cplb_tables_cpu(cpu);
95}
96#endif
97
98void __cpuinit bfin_setup_caches(unsigned int cpu)
99{
100#ifdef CONFIG_BFIN_ICACHE
101	bfin_icache_init(icplb_tbl[cpu]);
102#endif
103
104#ifdef CONFIG_BFIN_DCACHE
105	bfin_dcache_init(dcplb_tbl[cpu]);
106#endif
107
108	/*
109	 * In cache coherence emulation mode, we need to have the
110	 * D-cache enabled before running any atomic operation which
111	 * might involve cache invalidation (i.e. spinlock, rwlock).
112	 * So printk's are deferred until then.
113	 */
114#ifdef CONFIG_BFIN_ICACHE
115	printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
116	printk(KERN_INFO "  External memory:"
117# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
118	       " cacheable"
119# else
120	       " uncacheable"
121# endif
122	       " in instruction cache\n");
123	if (L2_LENGTH)
124		printk(KERN_INFO "  L2 SRAM        :"
125# ifdef CONFIG_BFIN_L2_ICACHEABLE
126		       " cacheable"
127# else
128		       " uncacheable"
129# endif
130		       " in instruction cache\n");
131
132#else
133	printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
134#endif
135
136#ifdef CONFIG_BFIN_DCACHE
137	printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
138	printk(KERN_INFO "  External memory:"
139# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
140	       " cacheable (write-back)"
141# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
142	       " cacheable (write-through)"
143# else
144	       " uncacheable"
145# endif
146	       " in data cache\n");
147	if (L2_LENGTH)
148		printk(KERN_INFO "  L2 SRAM        :"
149# if defined CONFIG_BFIN_L2_WRITEBACK
150		       " cacheable (write-back)"
151# elif defined CONFIG_BFIN_L2_WRITETHROUGH
152		       " cacheable (write-through)"
153# else
154		       " uncacheable"
155# endif
156		       " in data cache\n");
157#else
158	printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
159#endif
160}
161
162void __cpuinit bfin_setup_cpudata(unsigned int cpu)
163{
164	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
165
166	cpudata->idle = current;
167	cpudata->imemctl = bfin_read_IMEM_CONTROL();
168	cpudata->dmemctl = bfin_read_DMEM_CONTROL();
169}
170
171void __init bfin_cache_init(void)
172{
173#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
174	generate_cplb_tables();
175#endif
176	bfin_setup_caches(0);
177}
178
179void __init bfin_relocate_l1_mem(void)
180{
181	unsigned long text_l1_len = (unsigned long)_text_l1_len;
182	unsigned long data_l1_len = (unsigned long)_data_l1_len;
183	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
184	unsigned long l2_len = (unsigned long)_l2_len;
185
186	early_shadow_stamp();
187
188	/*
189	 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
190	 * we know that everything about l1 text/data is nice and aligned,
191	 * so copy by 4 byte chunks, and don't worry about overlapping
192	 * src/dest.
193	 *
194	 * We can't use the dma_memcpy functions, since they can call
195	 * scheduler functions which might be in L1 :( and core writes
196	 * into L1 instruction cause bad access errors, so we are stuck,
197	 * we are required to use DMA, but can't use the common dma
198	 * functions. We can't use memcpy either - since that might be
199	 * going to be in the relocated L1
200	 */
201
202	blackfin_dma_early_init();
203
204	/* if necessary, copy L1 text to L1 instruction SRAM */
205	if (L1_CODE_LENGTH && text_l1_len)
206		early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
207
208	/* if necessary, copy L1 data to L1 data bank A SRAM */
209	if (L1_DATA_A_LENGTH && data_l1_len)
210		early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
211
212	/* if necessary, copy L1 data B to L1 data bank B SRAM */
213	if (L1_DATA_B_LENGTH && data_b_l1_len)
214		early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
215
216	early_dma_memcpy_done();
217
218	/* if necessary, copy L2 text/data to L2 SRAM */
219	if (L2_LENGTH && l2_len)
220		memcpy(_stext_l2, _l2_lma, l2_len);
221}
222
223#ifdef CONFIG_ROMKERNEL
224void __init bfin_relocate_xip_data(void)
225{
226	early_shadow_stamp();
227
228	memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
229	memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
230}
231#endif
232
233/* add_memory_region to memmap */
234static void __init add_memory_region(unsigned long long start,
235			      unsigned long long size, int type)
236{
237	int i;
238
239	i = bfin_memmap.nr_map;
240
241	if (i == BFIN_MEMMAP_MAX) {
242		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
243		return;
244	}
245
246	bfin_memmap.map[i].addr = start;
247	bfin_memmap.map[i].size = size;
248	bfin_memmap.map[i].type = type;
249	bfin_memmap.nr_map++;
250}
251
252/*
253 * Sanitize the boot memmap, removing overlaps.
254 */
255static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
256{
257	struct change_member *change_tmp;
258	unsigned long current_type, last_type;
259	unsigned long long last_addr;
260	int chgidx, still_changing;
261	int overlap_entries;
262	int new_entry;
263	int old_nr, new_nr, chg_nr;
264	int i;
265
266	/*
267		Visually we're performing the following (1,2,3,4 = memory types)
268
269		Sample memory map (w/overlaps):
270		   ____22__________________
271		   ______________________4_
272		   ____1111________________
273		   _44_____________________
274		   11111111________________
275		   ____________________33__
276		   ___________44___________
277		   __________33333_________
278		   ______________22________
279		   ___________________2222_
280		   _________111111111______
281		   _____________________11_
282		   _________________4______
283
284		Sanitized equivalent (no overlap):
285		   1_______________________
286		   _44_____________________
287		   ___1____________________
288		   ____22__________________
289		   ______11________________
290		   _________1______________
291		   __________3_____________
292		   ___________44___________
293		   _____________33_________
294		   _______________2________
295		   ________________1_______
296		   _________________4______
297		   ___________________2____
298		   ____________________33__
299		   ______________________4_
300	*/
301	/* if there's only one memory region, don't bother */
302	if (*pnr_map < 2)
303		return -1;
304
305	old_nr = *pnr_map;
306
307	/* bail out if we find any unreasonable addresses in memmap */
308	for (i = 0; i < old_nr; i++)
309		if (map[i].addr + map[i].size < map[i].addr)
310			return -1;
311
312	/* create pointers for initial change-point information (for sorting) */
313	for (i = 0; i < 2*old_nr; i++)
314		change_point[i] = &change_point_list[i];
315
316	/* record all known change-points (starting and ending addresses),
317	   omitting those that are for empty memory regions */
318	chgidx = 0;
319	for (i = 0; i < old_nr; i++) {
320		if (map[i].size != 0) {
321			change_point[chgidx]->addr = map[i].addr;
322			change_point[chgidx++]->pentry = &map[i];
323			change_point[chgidx]->addr = map[i].addr + map[i].size;
324			change_point[chgidx++]->pentry = &map[i];
325		}
326	}
327	chg_nr = chgidx;	/* true number of change-points */
328
329	/* sort change-point list by memory addresses (low -> high) */
330	still_changing = 1;
331	while (still_changing) {
332		still_changing = 0;
333		for (i = 1; i < chg_nr; i++) {
334			/* if <current_addr> > <last_addr>, swap */
335			/* or, if current=<start_addr> & last=<end_addr>, swap */
336			if ((change_point[i]->addr < change_point[i-1]->addr) ||
337				((change_point[i]->addr == change_point[i-1]->addr) &&
338				 (change_point[i]->addr == change_point[i]->pentry->addr) &&
339				 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
340			   ) {
341				change_tmp = change_point[i];
342				change_point[i] = change_point[i-1];
343				change_point[i-1] = change_tmp;
344				still_changing = 1;
345			}
346		}
347	}
348
349	/* create a new memmap, removing overlaps */
350	overlap_entries = 0;	/* number of entries in the overlap table */
351	new_entry = 0;		/* index for creating new memmap entries */
352	last_type = 0;		/* start with undefined memory type */
353	last_addr = 0;		/* start with 0 as last starting address */
354	/* loop through change-points, determining affect on the new memmap */
355	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
356		/* keep track of all overlapping memmap entries */
357		if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
358			/* add map entry to overlap list (> 1 entry implies an overlap) */
359			overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
360		} else {
361			/* remove entry from list (order independent, so swap with last) */
362			for (i = 0; i < overlap_entries; i++) {
363				if (overlap_list[i] == change_point[chgidx]->pentry)
364					overlap_list[i] = overlap_list[overlap_entries-1];
365			}
366			overlap_entries--;
367		}
368		/* if there are overlapping entries, decide which "type" to use */
369		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
370		current_type = 0;
371		for (i = 0; i < overlap_entries; i++)
372			if (overlap_list[i]->type > current_type)
373				current_type = overlap_list[i]->type;
374		/* continue building up new memmap based on this information */
375		if (current_type != last_type) {
376			if (last_type != 0) {
377				new_map[new_entry].size =
378					change_point[chgidx]->addr - last_addr;
379				/* move forward only if the new size was non-zero */
380				if (new_map[new_entry].size != 0)
381					if (++new_entry >= BFIN_MEMMAP_MAX)
382						break;	/* no more space left for new entries */
383			}
384			if (current_type != 0) {
385				new_map[new_entry].addr = change_point[chgidx]->addr;
386				new_map[new_entry].type = current_type;
387				last_addr = change_point[chgidx]->addr;
388			}
389			last_type = current_type;
390		}
391	}
392	new_nr = new_entry;	/* retain count for new entries */
393
394	/* copy new mapping into original location */
395	memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
396	*pnr_map = new_nr;
397
398	return 0;
399}
400
401static void __init print_memory_map(char *who)
402{
403	int i;
404
405	for (i = 0; i < bfin_memmap.nr_map; i++) {
406		printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
407			bfin_memmap.map[i].addr,
408			bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
409		switch (bfin_memmap.map[i].type) {
410		case BFIN_MEMMAP_RAM:
411			printk(KERN_CONT "(usable)\n");
412			break;
413		case BFIN_MEMMAP_RESERVED:
414			printk(KERN_CONT "(reserved)\n");
415			break;
416		default:
417			printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
418			break;
419		}
420	}
421}
422
423static __init int parse_memmap(char *arg)
424{
425	unsigned long long start_at, mem_size;
426
427	if (!arg)
428		return -EINVAL;
429
430	mem_size = memparse(arg, &arg);
431	if (*arg == '@') {
432		start_at = memparse(arg+1, &arg);
433		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
434	} else if (*arg == '$') {
435		start_at = memparse(arg+1, &arg);
436		add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
437	}
438
439	return 0;
440}
441
442static __init void parse_cmdline_early(char *cmdline_p)
443{
444	char c = ' ', *to = cmdline_p;
445	unsigned int memsize;
446	for (;;) {
447		if (c == ' ') {
448			if (!memcmp(to, "mem=", 4)) {
449				to += 4;
450				memsize = memparse(to, &to);
451				if (memsize)
452					_ramend = memsize;
453
454			} else if (!memcmp(to, "max_mem=", 8)) {
455				to += 8;
456				memsize = memparse(to, &to);
457				if (memsize) {
458					physical_mem_end = memsize;
459					if (*to != ' ') {
460						if (*to == '$'
461						    || *(to + 1) == '$')
462							reserved_mem_dcache_on = 1;
463						if (*to == '#'
464						    || *(to + 1) == '#')
465							reserved_mem_icache_on = 1;
466					}
467				}
468			} else if (!memcmp(to, "clkin_hz=", 9)) {
469				to += 9;
470				early_init_clkin_hz(to);
471#ifdef CONFIG_EARLY_PRINTK
472			} else if (!memcmp(to, "earlyprintk=", 12)) {
473				to += 12;
474				setup_early_printk(to);
475#endif
476			} else if (!memcmp(to, "memmap=", 7)) {
477				to += 7;
478				parse_memmap(to);
479			}
480		}
481		c = *(to++);
482		if (!c)
483			break;
484	}
485}
486
487/*
488 * Setup memory defaults from user config.
489 * The physical memory layout looks like:
490 *
491 *  [_rambase, _ramstart]:		kernel image
492 *  [memory_start, memory_end]:		dynamic memory managed by kernel
493 *  [memory_end, _ramend]:		reserved memory
494 *  	[memory_mtd_start(memory_end),
495 *  		memory_mtd_start + mtd_size]:	rootfs (if any)
496 *	[_ramend - DMA_UNCACHED_REGION,
497 *		_ramend]:			uncached DMA region
498 *  [_ramend, physical_mem_end]:	memory not managed by kernel
499 */
500static __init void memory_setup(void)
501{
502#ifdef CONFIG_MTD_UCLINUX
503	unsigned long mtd_phys = 0;
504#endif
505	unsigned long max_mem;
506
507	_rambase = CONFIG_BOOT_LOAD;
508	_ramstart = (unsigned long)_end;
509
510	if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
511		console_init();
512		panic("DMA region exceeds memory limit: %lu.",
513			_ramend - _ramstart);
514	}
515	max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
516
517#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
518	/* Due to a Hardware Anomaly we need to limit the size of usable
519	 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
520	 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
521	 */
522# if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
523	if (max_mem >= 56 * 1024 * 1024)
524		max_mem = 56 * 1024 * 1024;
525# else
526	if (max_mem >= 60 * 1024 * 1024)
527		max_mem = 60 * 1024 * 1024;
528# endif				/* CONFIG_DEBUG_HUNT_FOR_ZERO */
529#endif				/* ANOMALY_05000263 */
530
531
532#ifdef CONFIG_MPU
533	/* Round up to multiple of 4MB */
534	memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
535#else
536	memory_start = PAGE_ALIGN(_ramstart);
537#endif
538
539#if defined(CONFIG_MTD_UCLINUX)
540	/* generic memory mapped MTD driver */
541	memory_mtd_end = memory_end;
542
543	mtd_phys = _ramstart;
544	mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
545
546# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
547	if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
548		mtd_size =
549		    PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
550# endif
551
552# if defined(CONFIG_CRAMFS)
553	if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
554		mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
555# endif
556
557# if defined(CONFIG_ROMFS_FS)
558	if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
559	    && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
560		mtd_size =
561		    PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
562
563		/* ROM_FS is XIP, so if we found it, we need to limit memory */
564		if (memory_end > max_mem) {
565			pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
566			memory_end = max_mem;
567		}
568	}
569# endif				/* CONFIG_ROMFS_FS */
570
571	/* Since the default MTD_UCLINUX has no magic number, we just blindly
572	 * read 8 past the end of the kernel's image, and look at it.
573	 * When no image is attached, mtd_size is set to a random number
574	 * Do some basic sanity checks before operating on things
575	 */
576	if (mtd_size == 0 || memory_end <= mtd_size) {
577		pr_emerg("Could not find valid ram mtd attached.\n");
578	} else {
579		memory_end -= mtd_size;
580
581		/* Relocate MTD image to the top of memory after the uncached memory area */
582		uclinux_ram_map.phys = memory_mtd_start = memory_end;
583		uclinux_ram_map.size = mtd_size;
584		pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
585			_end, mtd_size, (void *)memory_mtd_start);
586		dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
587	}
588#endif				/* CONFIG_MTD_UCLINUX */
589
590	/* We need lo limit memory, since everything could have a text section
591	 * of userspace in it, and expose anomaly 05000263. If the anomaly
592	 * doesn't exist, or we don't need to - then dont.
593	 */
594	if (memory_end > max_mem) {
595		pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
596		memory_end = max_mem;
597	}
598
599#ifdef CONFIG_MPU
600#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
601	page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
602					ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
603#else
604	page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
605#endif
606	page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
607#endif
608
609	init_mm.start_code = (unsigned long)_stext;
610	init_mm.end_code = (unsigned long)_etext;
611	init_mm.end_data = (unsigned long)_edata;
612	init_mm.brk = (unsigned long)0;
613
614	printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
615	printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
616
617	printk(KERN_INFO "Memory map:\n"
618	       "  fixedcode = 0x%p-0x%p\n"
619	       "  text      = 0x%p-0x%p\n"
620	       "  rodata    = 0x%p-0x%p\n"
621	       "  bss       = 0x%p-0x%p\n"
622	       "  data      = 0x%p-0x%p\n"
623	       "    stack   = 0x%p-0x%p\n"
624	       "  init      = 0x%p-0x%p\n"
625	       "  available = 0x%p-0x%p\n"
626#ifdef CONFIG_MTD_UCLINUX
627	       "  rootfs    = 0x%p-0x%p\n"
628#endif
629#if DMA_UNCACHED_REGION > 0
630	       "  DMA Zone  = 0x%p-0x%p\n"
631#endif
632		, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
633		_stext, _etext,
634		__start_rodata, __end_rodata,
635		__bss_start, __bss_stop,
636		_sdata, _edata,
637		(void *)&init_thread_union,
638		(void *)((int)(&init_thread_union) + THREAD_SIZE),
639		__init_begin, __init_end,
640		(void *)_ramstart, (void *)memory_end
641#ifdef CONFIG_MTD_UCLINUX
642		, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
643#endif
644#if DMA_UNCACHED_REGION > 0
645		, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
646#endif
647		);
648}
649
650/*
651 * Find the lowest, highest page frame number we have available
652 */
653void __init find_min_max_pfn(void)
654{
655	int i;
656
657	max_pfn = 0;
658	min_low_pfn = memory_end;
659
660	for (i = 0; i < bfin_memmap.nr_map; i++) {
661		unsigned long start, end;
662		/* RAM? */
663		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
664			continue;
665		start = PFN_UP(bfin_memmap.map[i].addr);
666		end = PFN_DOWN(bfin_memmap.map[i].addr +
667				bfin_memmap.map[i].size);
668		if (start >= end)
669			continue;
670		if (end > max_pfn)
671			max_pfn = end;
672		if (start < min_low_pfn)
673			min_low_pfn = start;
674	}
675}
676
677static __init void setup_bootmem_allocator(void)
678{
679	int bootmap_size;
680	int i;
681	unsigned long start_pfn, end_pfn;
682	unsigned long curr_pfn, last_pfn, size;
683
684	/* mark memory between memory_start and memory_end usable */
685	add_memory_region(memory_start,
686		memory_end - memory_start, BFIN_MEMMAP_RAM);
687	/* sanity check for overlap */
688	sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
689	print_memory_map("boot memmap");
690
691	/* initialize globals in linux/bootmem.h */
692	find_min_max_pfn();
693	/* pfn of the last usable page frame */
694	if (max_pfn > memory_end >> PAGE_SHIFT)
695		max_pfn = memory_end >> PAGE_SHIFT;
696	/* pfn of last page frame directly mapped by kernel */
697	max_low_pfn = max_pfn;
698	/* pfn of the first usable page frame after kernel image*/
699	if (min_low_pfn < memory_start >> PAGE_SHIFT)
700		min_low_pfn = memory_start >> PAGE_SHIFT;
701
702	start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
703	end_pfn = memory_end >> PAGE_SHIFT;
704
705	/*
706	 * give all the memory to the bootmap allocator, tell it to put the
707	 * boot mem_map at the start of memory.
708	 */
709	bootmap_size = init_bootmem_node(NODE_DATA(0),
710			memory_start >> PAGE_SHIFT,	/* map goes here */
711			start_pfn, end_pfn);
712
713	/* register the memmap regions with the bootmem allocator */
714	for (i = 0; i < bfin_memmap.nr_map; i++) {
715		/*
716		 * Reserve usable memory
717		 */
718		if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
719			continue;
720		/*
721		 * We are rounding up the start address of usable memory:
722		 */
723		curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
724		if (curr_pfn >= end_pfn)
725			continue;
726		/*
727		 * ... and at the end of the usable range downwards:
728		 */
729		last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
730					 bfin_memmap.map[i].size);
731
732		if (last_pfn > end_pfn)
733			last_pfn = end_pfn;
734
735		/*
736		 * .. finally, did all the rounding and playing
737		 * around just make the area go away?
738		 */
739		if (last_pfn <= curr_pfn)
740			continue;
741
742		size = last_pfn - curr_pfn;
743		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
744	}
745
746	/* reserve memory before memory_start, including bootmap */
747	reserve_bootmem(PAGE_OFFSET,
748		memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
749		BOOTMEM_DEFAULT);
750}
751
752#define EBSZ_TO_MEG(ebsz) \
753({ \
754	int meg = 0; \
755	switch (ebsz & 0xf) { \
756		case 0x1: meg =  16; break; \
757		case 0x3: meg =  32; break; \
758		case 0x5: meg =  64; break; \
759		case 0x7: meg = 128; break; \
760		case 0x9: meg = 256; break; \
761		case 0xb: meg = 512; break; \
762	} \
763	meg; \
764})
765static inline int __init get_mem_size(void)
766{
767#if defined(EBIU_SDBCTL)
768# if defined(BF561_FAMILY)
769	int ret = 0;
770	u32 sdbctl = bfin_read_EBIU_SDBCTL();
771	ret += EBSZ_TO_MEG(sdbctl >>  0);
772	ret += EBSZ_TO_MEG(sdbctl >>  8);
773	ret += EBSZ_TO_MEG(sdbctl >> 16);
774	ret += EBSZ_TO_MEG(sdbctl >> 24);
775	return ret;
776# else
777	return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
778# endif
779#elif defined(EBIU_DDRCTL1)
780	u32 ddrctl = bfin_read_EBIU_DDRCTL1();
781	int ret = 0;
782	switch (ddrctl & 0xc0000) {
783		case DEVSZ_64:  ret = 64 / 8;
784		case DEVSZ_128: ret = 128 / 8;
785		case DEVSZ_256: ret = 256 / 8;
786		case DEVSZ_512: ret = 512 / 8;
787	}
788	switch (ddrctl & 0x30000) {
789		case DEVWD_4:  ret *= 2;
790		case DEVWD_8:  ret *= 2;
791		case DEVWD_16: break;
792	}
793	if ((ddrctl & 0xc000) == 0x4000)
794		ret *= 2;
795	return ret;
796#endif
797	BUG();
798}
799
800__attribute__((weak))
801void __init native_machine_early_platform_add_devices(void)
802{
803}
804
805void __init setup_arch(char **cmdline_p)
806{
807	unsigned long sclk, cclk;
808
809	native_machine_early_platform_add_devices();
810
811	enable_shadow_console();
812
813	/* Check to make sure we are running on the right processor */
814	if (unlikely(CPUID != bfin_cpuid()))
815		printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
816			CPU, bfin_cpuid(), bfin_revid());
817
818#ifdef CONFIG_DUMMY_CONSOLE
819	conswitchp = &dummy_con;
820#endif
821
822#if defined(CONFIG_CMDLINE_BOOL)
823	strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
824	command_line[sizeof(command_line) - 1] = 0;
825#endif
826
827	/* Keep a copy of command line */
828	*cmdline_p = &command_line[0];
829	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
830	boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
831
832	memset(&bfin_memmap, 0, sizeof(bfin_memmap));
833
834	/* If the user does not specify things on the command line, use
835	 * what the bootloader set things up as
836	 */
837	physical_mem_end = 0;
838	parse_cmdline_early(&command_line[0]);
839
840	if (_ramend == 0)
841		_ramend = get_mem_size() * 1024 * 1024;
842
843	if (physical_mem_end == 0)
844		physical_mem_end = _ramend;
845
846	memory_setup();
847
848	/* Initialize Async memory banks */
849	bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
850	bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
851	bfin_write_EBIU_AMGCTL(AMGCTLVAL);
852#ifdef CONFIG_EBIU_MBSCTLVAL
853	bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
854	bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
855	bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
856#endif
857#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
858	bfin_write_PORTF_HYSTERISIS(HYST_PORTF_0_15);
859	bfin_write_PORTG_HYSTERISIS(HYST_PORTG_0_15);
860	bfin_write_PORTH_HYSTERISIS(HYST_PORTH_0_15);
861	bfin_write_MISCPORT_HYSTERISIS((bfin_read_MISCPORT_HYSTERISIS() &
862					~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
863#endif
864
865	cclk = get_cclk();
866	sclk = get_sclk();
867
868	if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
869		panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
870
871#ifdef BF561_FAMILY
872	if (ANOMALY_05000266) {
873		bfin_read_IMDMA_D0_IRQ_STATUS();
874		bfin_read_IMDMA_D1_IRQ_STATUS();
875	}
876#endif
877	printk(KERN_INFO "Hardware Trace ");
878	if (bfin_read_TBUFCTL() & 0x1)
879		printk(KERN_CONT "Active ");
880	else
881		printk(KERN_CONT "Off ");
882	if (bfin_read_TBUFCTL() & 0x2)
883		printk(KERN_CONT "and Enabled\n");
884	else
885		printk(KERN_CONT "and Disabled\n");
886
887	printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF);
888
889	/* Newer parts mirror SWRST bits in SYSCR */
890#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || defined(CONFIG_BF538) || \
891	defined(CONFIG_BF539)
892	_bfin_swrst = bfin_read_SWRST();
893#else
894	/* Clear boot mode field */
895	_bfin_swrst = bfin_read_SYSCR() & ~0xf;
896#endif
897
898#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
899	bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
900#endif
901#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
902	bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
903#endif
904
905#ifdef CONFIG_SMP
906	if (_bfin_swrst & SWRST_DBL_FAULT_A) {
907#else
908	if (_bfin_swrst & RESET_DOUBLE) {
909#endif
910		printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
911#ifdef CONFIG_DEBUG_DOUBLEFAULT
912		/* We assume the crashing kernel, and the current symbol table match */
913		printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
914			(int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
915		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
916		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
917#endif
918		printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
919			init_retx);
920	} else if (_bfin_swrst & RESET_WDOG)
921		printk(KERN_INFO "Recovering from Watchdog event\n");
922	else if (_bfin_swrst & RESET_SOFTWARE)
923		printk(KERN_NOTICE "Reset caused by Software reset\n");
924
925	printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
926	if (bfin_compiled_revid() == 0xffff)
927		printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
928	else if (bfin_compiled_revid() == -1)
929		printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
930	else
931		printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
932
933	if (likely(CPUID == bfin_cpuid())) {
934		if (bfin_revid() != bfin_compiled_revid()) {
935			if (bfin_compiled_revid() == -1)
936				printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
937				       bfin_revid());
938			else if (bfin_compiled_revid() != 0xffff) {
939				printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
940				       bfin_compiled_revid(), bfin_revid());
941				if (bfin_compiled_revid() > bfin_revid())
942					panic("Error: you are missing anomaly workarounds for this rev");
943			}
944		}
945		if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
946			printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
947			       CPU, bfin_revid());
948	}
949
950	printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
951
952	printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
953	       cclk / 1000000, sclk / 1000000);
954
955	setup_bootmem_allocator();
956
957	paging_init();
958
959	/* Copy atomic sequences to their fixed location, and sanity check that
960	   these locations are the ones that we advertise to userspace.  */
961	memcpy((void *)FIXED_CODE_START, &fixed_code_start,
962	       FIXED_CODE_END - FIXED_CODE_START);
963	BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
964	       != SIGRETURN_STUB - FIXED_CODE_START);
965	BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
966	       != ATOMIC_XCHG32 - FIXED_CODE_START);
967	BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
968	       != ATOMIC_CAS32 - FIXED_CODE_START);
969	BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
970	       != ATOMIC_ADD32 - FIXED_CODE_START);
971	BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
972	       != ATOMIC_SUB32 - FIXED_CODE_START);
973	BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
974	       != ATOMIC_IOR32 - FIXED_CODE_START);
975	BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
976	       != ATOMIC_AND32 - FIXED_CODE_START);
977	BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
978	       != ATOMIC_XOR32 - FIXED_CODE_START);
979	BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
980		!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
981
982#ifdef CONFIG_SMP
983	platform_init_cpus();
984#endif
985	init_exception_vectors();
986	bfin_cache_init();	/* Initialize caches for the boot CPU */
987}
988
989static int __init topology_init(void)
990{
991	unsigned int cpu;
992	/* Record CPU-private information for the boot processor. */
993	bfin_setup_cpudata(0);
994
995	for_each_possible_cpu(cpu) {
996		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
997	}
998
999	return 0;
1000}
1001
1002subsys_initcall(topology_init);
1003
1004/* Get the input clock frequency */
1005static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1006static u_long get_clkin_hz(void)
1007{
1008	return cached_clkin_hz;
1009}
1010static int __init early_init_clkin_hz(char *buf)
1011{
1012	cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1013#ifdef BFIN_KERNEL_CLOCK
1014	if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1015		panic("cannot change clkin_hz when reprogramming clocks");
1016#endif
1017	return 1;
1018}
1019early_param("clkin_hz=", early_init_clkin_hz);
1020
1021/* Get the voltage input multiplier */
1022static u_long get_vco(void)
1023{
1024	static u_long cached_vco;
1025	u_long msel, pll_ctl;
1026
1027	/* The assumption here is that VCO never changes at runtime.
1028	 * If, someday, we support that, then we'll have to change this.
1029	 */
1030	if (cached_vco)
1031		return cached_vco;
1032
1033	pll_ctl = bfin_read_PLL_CTL();
1034	msel = (pll_ctl >> 9) & 0x3F;
1035	if (0 == msel)
1036		msel = 64;
1037
1038	cached_vco = get_clkin_hz();
1039	cached_vco >>= (1 & pll_ctl);	/* DF bit */
1040	cached_vco *= msel;
1041	return cached_vco;
1042}
1043
1044/* Get the Core clock */
1045u_long get_cclk(void)
1046{
1047	static u_long cached_cclk_pll_div, cached_cclk;
1048	u_long csel, ssel;
1049
1050	if (bfin_read_PLL_STAT() & 0x1)
1051		return get_clkin_hz();
1052
1053	ssel = bfin_read_PLL_DIV();
1054	if (ssel == cached_cclk_pll_div)
1055		return cached_cclk;
1056	else
1057		cached_cclk_pll_div = ssel;
1058
1059	csel = ((ssel >> 4) & 0x03);
1060	ssel &= 0xf;
1061	if (ssel && ssel < (1 << csel))	/* SCLK > CCLK */
1062		cached_cclk = get_vco() / ssel;
1063	else
1064		cached_cclk = get_vco() >> csel;
1065	return cached_cclk;
1066}
1067EXPORT_SYMBOL(get_cclk);
1068
1069/* Get the System clock */
1070u_long get_sclk(void)
1071{
1072	static u_long cached_sclk;
1073	u_long ssel;
1074
1075	/* The assumption here is that SCLK never changes at runtime.
1076	 * If, someday, we support that, then we'll have to change this.
1077	 */
1078	if (cached_sclk)
1079		return cached_sclk;
1080
1081	if (bfin_read_PLL_STAT() & 0x1)
1082		return get_clkin_hz();
1083
1084	ssel = bfin_read_PLL_DIV() & 0xf;
1085	if (0 == ssel) {
1086		printk(KERN_WARNING "Invalid System Clock\n");
1087		ssel = 1;
1088	}
1089
1090	cached_sclk = get_vco() / ssel;
1091	return cached_sclk;
1092}
1093EXPORT_SYMBOL(get_sclk);
1094
1095unsigned long sclk_to_usecs(unsigned long sclk)
1096{
1097	u64 tmp = USEC_PER_SEC * (u64)sclk;
1098	do_div(tmp, get_sclk());
1099	return tmp;
1100}
1101EXPORT_SYMBOL(sclk_to_usecs);
1102
1103unsigned long usecs_to_sclk(unsigned long usecs)
1104{
1105	u64 tmp = get_sclk() * (u64)usecs;
1106	do_div(tmp, USEC_PER_SEC);
1107	return tmp;
1108}
1109EXPORT_SYMBOL(usecs_to_sclk);
1110
1111/*
1112 *	Get CPU information for use by the procfs.
1113 */
1114static int show_cpuinfo(struct seq_file *m, void *v)
1115{
1116	char *cpu, *mmu, *fpu, *vendor, *cache;
1117	uint32_t revid;
1118	int cpu_num = *(unsigned int *)v;
1119	u_long sclk, cclk;
1120	u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1121	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1122
1123	cpu = CPU;
1124	mmu = "none";
1125	fpu = "none";
1126	revid = bfin_revid();
1127
1128	sclk = get_sclk();
1129	cclk = get_cclk();
1130
1131	switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1132	case 0xca:
1133		vendor = "Analog Devices";
1134		break;
1135	default:
1136		vendor = "unknown";
1137		break;
1138	}
1139
1140	seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1141
1142	if (CPUID == bfin_cpuid())
1143		seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1144	else
1145		seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1146			CPUID, bfin_cpuid());
1147
1148	seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1149		"stepping\t: %d ",
1150		cpu, cclk/1000000, sclk/1000000,
1151#ifdef CONFIG_MPU
1152		"mpu on",
1153#else
1154		"mpu off",
1155#endif
1156		revid);
1157
1158	if (bfin_revid() != bfin_compiled_revid()) {
1159		if (bfin_compiled_revid() == -1)
1160			seq_printf(m, "(Compiled for Rev none)");
1161		else if (bfin_compiled_revid() == 0xffff)
1162			seq_printf(m, "(Compiled for Rev any)");
1163		else
1164			seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1165	}
1166
1167	seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1168		cclk/1000000, cclk%1000000,
1169		sclk/1000000, sclk%1000000);
1170	seq_printf(m, "bogomips\t: %lu.%02lu\n"
1171		"Calibration\t: %lu loops\n",
1172		(loops_per_jiffy * HZ) / 500000,
1173		((loops_per_jiffy * HZ) / 5000) % 100,
1174		(loops_per_jiffy * HZ));
1175
1176	/* Check Cache configutation */
1177	switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1178	case ACACHE_BSRAM:
1179		cache = "dbank-A/B\t: cache/sram";
1180		dcache_size = 16;
1181		dsup_banks = 1;
1182		break;
1183	case ACACHE_BCACHE:
1184		cache = "dbank-A/B\t: cache/cache";
1185		dcache_size = 32;
1186		dsup_banks = 2;
1187		break;
1188	case ASRAM_BSRAM:
1189		cache = "dbank-A/B\t: sram/sram";
1190		dcache_size = 0;
1191		dsup_banks = 0;
1192		break;
1193	default:
1194		cache = "unknown";
1195		dcache_size = 0;
1196		dsup_banks = 0;
1197		break;
1198	}
1199
1200	/* Is it turned on? */
1201	if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1202		dcache_size = 0;
1203
1204	if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1205		icache_size = 0;
1206
1207	seq_printf(m, "cache size\t: %d KB(L1 icache) "
1208		"%d KB(L1 dcache) %d KB(L2 cache)\n",
1209		icache_size, dcache_size, 0);
1210	seq_printf(m, "%s\n", cache);
1211	seq_printf(m, "external memory\t: "
1212#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1213		   "cacheable"
1214#else
1215		   "uncacheable"
1216#endif
1217		   " in instruction cache\n");
1218	seq_printf(m, "external memory\t: "
1219#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1220		      "cacheable (write-back)"
1221#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1222		      "cacheable (write-through)"
1223#else
1224		      "uncacheable"
1225#endif
1226		      " in data cache\n");
1227
1228	if (icache_size)
1229		seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1230			   BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1231	else
1232		seq_printf(m, "icache setup\t: off\n");
1233
1234	seq_printf(m,
1235		   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1236		   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1237		   BFIN_DLINES);
1238#ifdef __ARCH_SYNC_CORE_DCACHE
1239	seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]);
1240#endif
1241#ifdef __ARCH_SYNC_CORE_ICACHE
1242	seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]);
1243#endif
1244
1245	if (cpu_num != num_possible_cpus() - 1)
1246		return 0;
1247
1248	if (L2_LENGTH) {
1249		seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1250		seq_printf(m, "L2 SRAM\t\t: "
1251#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1252			      "cacheable"
1253#else
1254			      "uncacheable"
1255#endif
1256			      " in instruction cache\n");
1257		seq_printf(m, "L2 SRAM\t\t: "
1258#if defined(CONFIG_BFIN_L2_WRITEBACK)
1259			      "cacheable (write-back)"
1260#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1261			      "cacheable (write-through)"
1262#else
1263			      "uncacheable"
1264#endif
1265			      " in data cache\n");
1266	}
1267	seq_printf(m, "board name\t: %s\n", bfin_board_name);
1268	seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1269		 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
1270	seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
1271		((int)memory_end - (int)_rambase) >> 10,
1272		(void *)_rambase,
1273		(void *)memory_end);
1274	seq_printf(m, "\n");
1275
1276	return 0;
1277}
1278
1279static void *c_start(struct seq_file *m, loff_t *pos)
1280{
1281	if (*pos == 0)
1282		*pos = first_cpu(cpu_online_map);
1283	if (*pos >= num_online_cpus())
1284		return NULL;
1285
1286	return pos;
1287}
1288
1289static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1290{
1291	*pos = next_cpu(*pos, cpu_online_map);
1292
1293	return c_start(m, pos);
1294}
1295
1296static void c_stop(struct seq_file *m, void *v)
1297{
1298}
1299
1300const struct seq_operations cpuinfo_op = {
1301	.start = c_start,
1302	.next = c_next,
1303	.stop = c_stop,
1304	.show = show_cpuinfo,
1305};
1306
1307void __init cmdline_init(const char *r0)
1308{
1309	early_shadow_stamp();
1310	if (r0)
1311		strncpy(command_line, r0, COMMAND_LINE_SIZE);
1312}
1313