• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sh/kernel/
1/*
2 * arch/sh/kernel/setup.c
3 *
4 * This file handles the architecture-dependent parts of initialization
5 *
6 *  Copyright (C) 1999  Niibe Yutaka
7 *  Copyright (C) 2002 - 2010 Paul Mundt
8 */
9#include <linux/screen_info.h>
10#include <linux/ioport.h>
11#include <linux/init.h>
12#include <linux/initrd.h>
13#include <linux/bootmem.h>
14#include <linux/console.h>
15#include <linux/seq_file.h>
16#include <linux/root_dev.h>
17#include <linux/utsname.h>
18#include <linux/nodemask.h>
19#include <linux/cpu.h>
20#include <linux/pfn.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/kexec.h>
24#include <linux/module.h>
25#include <linux/smp.h>
26#include <linux/err.h>
27#include <linux/debugfs.h>
28#include <linux/crash_dump.h>
29#include <linux/mmzone.h>
30#include <linux/clk.h>
31#include <linux/delay.h>
32#include <linux/platform_device.h>
33#include <linux/memblock.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/page.h>
37#include <asm/elf.h>
38#include <asm/sections.h>
39#include <asm/irq.h>
40#include <asm/setup.h>
41#include <asm/clock.h>
42#include <asm/smp.h>
43#include <asm/mmu_context.h>
44#include <asm/mmzone.h>
45
46/*
47 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
48 * This value will be used at the very early stage of serial setup.
49 * The bigger value means no problem.
50 */
51struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
52	[0] = {
53		.type			= CPU_SH_NONE,
54		.family			= CPU_FAMILY_UNKNOWN,
55		.loops_per_jiffy	= 10000000,
56	},
57};
58EXPORT_SYMBOL(cpu_data);
59
60/*
61 * The machine vector. First entry in .machvec.init, or clobbered by
62 * sh_mv= on the command line, prior to .machvec.init teardown.
63 */
64struct sh_machine_vector sh_mv = { .mv_name = "generic", };
65EXPORT_SYMBOL(sh_mv);
66
67#ifdef CONFIG_VT
68struct screen_info screen_info;
69#endif
70
71extern int root_mountflags;
72
73#define RAMDISK_IMAGE_START_MASK	0x07FF
74#define RAMDISK_PROMPT_FLAG		0x8000
75#define RAMDISK_LOAD_FLAG		0x4000
76
77static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
78
79static struct resource code_resource = {
80	.name = "Kernel code",
81	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
82};
83
84static struct resource data_resource = {
85	.name = "Kernel data",
86	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
87};
88
89static struct resource bss_resource = {
90	.name	= "Kernel bss",
91	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
92};
93
94unsigned long memory_start;
95EXPORT_SYMBOL(memory_start);
96unsigned long memory_end = 0;
97EXPORT_SYMBOL(memory_end);
98unsigned long memory_limit = 0;
99
100static struct resource mem_resources[MAX_NUMNODES];
101
102int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
103
104static int __init early_parse_mem(char *p)
105{
106	if (!p)
107		return 1;
108
109	memory_limit = PAGE_ALIGN(memparse(p, &p));
110
111	pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
112
113	return 0;
114}
115early_param("mem", early_parse_mem);
116
117void __init check_for_initrd(void)
118{
119#ifdef CONFIG_BLK_DEV_INITRD
120	unsigned long start, end;
121
122	/*
123	 * Check for the rare cases where boot loaders adhere to the boot
124	 * ABI.
125	 */
126	if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
127		goto disable;
128
129	start = INITRD_START + __MEMORY_START;
130	end = start + INITRD_SIZE;
131
132	if (unlikely(end <= start))
133		goto disable;
134	if (unlikely(start & ~PAGE_MASK)) {
135		pr_err("initrd must be page aligned\n");
136		goto disable;
137	}
138
139	if (unlikely(start < PAGE_OFFSET)) {
140		pr_err("initrd start < PAGE_OFFSET\n");
141		goto disable;
142	}
143
144	if (unlikely(end > memblock_end_of_DRAM())) {
145		pr_err("initrd extends beyond end of memory "
146		       "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
147		       end, (unsigned long)memblock_end_of_DRAM());
148		goto disable;
149	}
150
151	/*
152	 * If we got this far inspite of the boot loader's best efforts
153	 * to the contrary, assume we actually have a valid initrd and
154	 * fix up the root dev.
155	 */
156	ROOT_DEV = Root_RAM0;
157
158	/*
159	 * Address sanitization
160	 */
161	initrd_start = (unsigned long)__va(__pa(start));
162	initrd_end = initrd_start + INITRD_SIZE;
163
164	memblock_reserve(__pa(initrd_start), INITRD_SIZE);
165
166	return;
167
168disable:
169	pr_info("initrd disabled\n");
170	initrd_start = initrd_end = 0;
171#endif
172}
173
174void __cpuinit calibrate_delay(void)
175{
176	struct clk *clk = clk_get(NULL, "cpu_clk");
177
178	if (IS_ERR(clk))
179		panic("Need a sane CPU clock definition!");
180
181	loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
182
183	printk(KERN_INFO "Calibrating delay loop (skipped)... "
184			 "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
185			 loops_per_jiffy/(500000/HZ),
186			 (loops_per_jiffy/(5000/HZ)) % 100,
187			 loops_per_jiffy);
188}
189
190void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
191						unsigned long end_pfn)
192{
193	struct resource *res = &mem_resources[nid];
194	unsigned long start, end;
195
196	WARN_ON(res->name); /* max one active range per node for now */
197
198	start = start_pfn << PAGE_SHIFT;
199	end = end_pfn << PAGE_SHIFT;
200
201	res->name = "System RAM";
202	res->start = start;
203	res->end = end - 1;
204	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
205
206	if (request_resource(&iomem_resource, res)) {
207		pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
208		       start_pfn, end_pfn);
209		return;
210	}
211
212	/*
213	 *  We don't know which RAM region contains kernel data,
214	 *  so we try it repeatedly and let the resource manager
215	 *  test it.
216	 */
217	request_resource(res, &code_resource);
218	request_resource(res, &data_resource);
219	request_resource(res, &bss_resource);
220
221	/*
222	 * Also make sure that there is a PMB mapping that covers this
223	 * range before we attempt to activate it, to avoid reset by MMU.
224	 * We can hit this path with NUMA or memory hot-add.
225	 */
226	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
227			 PAGE_KERNEL);
228
229	add_active_range(nid, start_pfn, end_pfn);
230}
231
232void __init __weak plat_early_device_setup(void)
233{
234}
235
236void __init setup_arch(char **cmdline_p)
237{
238	enable_mmu();
239
240	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
241
242	printk(KERN_NOTICE "Boot params:\n"
243			   "... MOUNT_ROOT_RDONLY - %08lx\n"
244			   "... RAMDISK_FLAGS     - %08lx\n"
245			   "... ORIG_ROOT_DEV     - %08lx\n"
246			   "... LOADER_TYPE       - %08lx\n"
247			   "... INITRD_START      - %08lx\n"
248			   "... INITRD_SIZE       - %08lx\n",
249			   MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
250			   ORIG_ROOT_DEV, LOADER_TYPE,
251			   INITRD_START, INITRD_SIZE);
252
253#ifdef CONFIG_BLK_DEV_RAM
254	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
255	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
256	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
257#endif
258
259	if (!MOUNT_ROOT_RDONLY)
260		root_mountflags &= ~MS_RDONLY;
261	init_mm.start_code = (unsigned long) _text;
262	init_mm.end_code = (unsigned long) _etext;
263	init_mm.end_data = (unsigned long) _edata;
264	init_mm.brk = (unsigned long) _end;
265
266	code_resource.start = virt_to_phys(_text);
267	code_resource.end = virt_to_phys(_etext)-1;
268	data_resource.start = virt_to_phys(_etext);
269	data_resource.end = virt_to_phys(_edata)-1;
270	bss_resource.start = virt_to_phys(__bss_start);
271	bss_resource.end = virt_to_phys(_ebss)-1;
272
273#ifdef CONFIG_CMDLINE_OVERWRITE
274	strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
275#else
276	strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
277#ifdef CONFIG_CMDLINE_EXTEND
278	strlcat(command_line, " ", sizeof(command_line));
279	strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
280#endif
281#endif
282
283	/* Save unparsed command line copy for /proc/cmdline */
284	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
285	*cmdline_p = command_line;
286
287	parse_early_param();
288
289	plat_early_device_setup();
290
291	sh_mv_setup();
292
293	/* Let earlyprintk output early console messages */
294	early_platform_driver_probe("earlyprintk", 1, 1);
295
296	paging_init();
297
298#ifdef CONFIG_DUMMY_CONSOLE
299	conswitchp = &dummy_con;
300#endif
301
302	/* Perform the machine specific initialisation */
303	if (likely(sh_mv.mv_setup))
304		sh_mv.mv_setup(cmdline_p);
305
306	plat_smp_setup();
307}
308
309/* processor boot mode configuration */
310int generic_mode_pins(void)
311{
312	pr_warning("generic_mode_pins(): missing mode pin configuration\n");
313	return 0;
314}
315
316int test_mode_pin(int pin)
317{
318	return sh_mv.mv_mode_pins() & pin;
319}
320
321static const char *cpu_name[] = {
322	[CPU_SH7201]	= "SH7201",
323	[CPU_SH7203]	= "SH7203",	[CPU_SH7263]	= "SH7263",
324	[CPU_SH7206]	= "SH7206",	[CPU_SH7619]	= "SH7619",
325	[CPU_SH7705]	= "SH7705",	[CPU_SH7706]	= "SH7706",
326	[CPU_SH7707]	= "SH7707",	[CPU_SH7708]	= "SH7708",
327	[CPU_SH7709]	= "SH7709",	[CPU_SH7710]	= "SH7710",
328	[CPU_SH7712]	= "SH7712",	[CPU_SH7720]	= "SH7720",
329	[CPU_SH7721]	= "SH7721",	[CPU_SH7729]	= "SH7729",
330	[CPU_SH7750]	= "SH7750",	[CPU_SH7750S]	= "SH7750S",
331	[CPU_SH7750R]	= "SH7750R",	[CPU_SH7751]	= "SH7751",
332	[CPU_SH7751R]	= "SH7751R",	[CPU_SH7760]	= "SH7760",
333	[CPU_SH4_202]	= "SH4-202",	[CPU_SH4_501]	= "SH4-501",
334	[CPU_SH7763]	= "SH7763",	[CPU_SH7770]	= "SH7770",
335	[CPU_SH7780]	= "SH7780",	[CPU_SH7781]	= "SH7781",
336	[CPU_SH7343]	= "SH7343",	[CPU_SH7785]	= "SH7785",
337	[CPU_SH7786]	= "SH7786",	[CPU_SH7757]	= "SH7757",
338	[CPU_SH7722]	= "SH7722",	[CPU_SHX3]	= "SH-X3",
339	[CPU_SH5_101]	= "SH5-101",	[CPU_SH5_103]	= "SH5-103",
340	[CPU_MXG]	= "MX-G",	[CPU_SH7723]	= "SH7723",
341	[CPU_SH7366]	= "SH7366",	[CPU_SH7724]	= "SH7724",
342	[CPU_SH_NONE]	= "Unknown"
343};
344
345const char *get_cpu_subtype(struct sh_cpuinfo *c)
346{
347	return cpu_name[c->type];
348}
349EXPORT_SYMBOL(get_cpu_subtype);
350
351#ifdef CONFIG_PROC_FS
352/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
353static const char *cpu_flags[] = {
354	"none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
355	"ptea", "llsc", "l2", "op32", "pteaex", NULL
356};
357
358static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
359{
360	unsigned long i;
361
362	seq_printf(m, "cpu flags\t:");
363
364	if (!c->flags) {
365		seq_printf(m, " %s\n", cpu_flags[0]);
366		return;
367	}
368
369	for (i = 0; cpu_flags[i]; i++)
370		if ((c->flags & (1 << i)))
371			seq_printf(m, " %s", cpu_flags[i+1]);
372
373	seq_printf(m, "\n");
374}
375
376static void show_cacheinfo(struct seq_file *m, const char *type,
377			   struct cache_info info)
378{
379	unsigned int cache_size;
380
381	cache_size = info.ways * info.sets * info.linesz;
382
383	seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
384		   type, cache_size >> 10, info.ways);
385}
386
387/*
388 *	Get CPU information for use by the procfs.
389 */
390static int show_cpuinfo(struct seq_file *m, void *v)
391{
392	struct sh_cpuinfo *c = v;
393	unsigned int cpu = c - cpu_data;
394
395	if (!cpu_online(cpu))
396		return 0;
397
398	if (cpu == 0)
399		seq_printf(m, "machine\t\t: %s\n", get_system_type());
400	else
401		seq_printf(m, "\n");
402
403	seq_printf(m, "processor\t: %d\n", cpu);
404	seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
405	seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
406	if (c->cut_major == -1)
407		seq_printf(m, "cut\t\t: unknown\n");
408	else if (c->cut_minor == -1)
409		seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
410	else
411		seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
412
413	show_cpuflags(m, c);
414
415	seq_printf(m, "cache type\t: ");
416
417	/*
418	 * Check for what type of cache we have, we support both the
419	 * unified cache on the SH-2 and SH-3, as well as the harvard
420	 * style cache on the SH-4.
421	 */
422	if (c->icache.flags & SH_CACHE_COMBINED) {
423		seq_printf(m, "unified\n");
424		show_cacheinfo(m, "cache", c->icache);
425	} else {
426		seq_printf(m, "split (harvard)\n");
427		show_cacheinfo(m, "icache", c->icache);
428		show_cacheinfo(m, "dcache", c->dcache);
429	}
430
431	/* Optional secondary cache */
432	if (c->flags & CPU_HAS_L2_CACHE)
433		show_cacheinfo(m, "scache", c->scache);
434
435	seq_printf(m, "bogomips\t: %lu.%02lu\n",
436		     c->loops_per_jiffy/(500000/HZ),
437		     (c->loops_per_jiffy/(5000/HZ)) % 100);
438
439	return 0;
440}
441
442static void *c_start(struct seq_file *m, loff_t *pos)
443{
444	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
445}
446static void *c_next(struct seq_file *m, void *v, loff_t *pos)
447{
448	++*pos;
449	return c_start(m, pos);
450}
451static void c_stop(struct seq_file *m, void *v)
452{
453}
454const struct seq_operations cpuinfo_op = {
455	.start	= c_start,
456	.next	= c_next,
457	.stop	= c_stop,
458	.show	= show_cpuinfo,
459};
460#endif /* CONFIG_PROC_FS */
461
462struct dentry *sh_debugfs_root;
463
464static int __init sh_debugfs_init(void)
465{
466	sh_debugfs_root = debugfs_create_dir("sh", NULL);
467	if (!sh_debugfs_root)
468		return -ENOMEM;
469	if (IS_ERR(sh_debugfs_root))
470		return PTR_ERR(sh_debugfs_root);
471
472	return 0;
473}
474arch_initcall(sh_debugfs_init);
475