1
2
3/*
4 * This file handles the architecture-dependent parts of initialization
5 */
6
7#include <linux/errno.h>
8#include <linux/sched.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/stddef.h>
12#include <linux/unistd.h>
13#include <linux/ptrace.h>
14#include <linux/slab.h>
15#include <linux/user.h>
16#include <linux/a.out.h>
17#include <linux/tty.h>
18#include <linux/ioport.h>
19#include <linux/delay.h>
20#include <linux/config.h>
21#include <linux/init.h>
22#include <linux/apm_bios.h>
23#ifdef CONFIG_BLK_DEV_RAM
24#include <linux/blk.h>
25#endif
26#include <linux/highmem.h>
27#include <linux/bootmem.h>
28#include <linux/pci.h>
29#include <linux/pci_ids.h>
30#include <linux/seq_file.h>
31#include <asm/processor.h>
32#include <linux/console.h>
33#include <asm/mtrr.h>
34#include <asm/uaccess.h>
35#include <asm/system.h>
36#include <asm/io.h>
37#include <asm/smp.h>
38#include <asm/cobalt.h>
39#include <asm/msr.h>
40#include <asm/desc.h>
41#include <asm/e820.h>
42#include <asm/dma.h>
43#include <asm/mpspec.h>
44#include <asm/mmu_context.h>
45/*
46 * Machine setup..
47 */
48
49char ignore_irq13;		/* set if exception 16 works */
50struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
51
52unsigned long mmu_cr4_features;
53
54/*
55 * Bus types ..
56 */
57#ifdef CONFIG_EISA
58int EISA_bus;
59#endif
60int MCA_bus;
61
62/* for MCA, but anyone else can use it if they want */
63unsigned int machine_id;
64unsigned int machine_submodel_id;
65unsigned int BIOS_revision;
66unsigned int mca_pentium_flag;
67
68/* For PCI or other memory-mapped resources */
69unsigned long pci_mem_start = 0x10000000;
70
71/* user-defined highmem size */
72static unsigned int highmem_pages __initdata = -1;
73
74/*
75 * Setup options
76 */
77struct drive_info_struct { char dummy[32]; } drive_info;
78struct screen_info screen_info;
79struct apm_info apm_info;
80struct sys_desc_table_struct {
81	unsigned short length;
82	unsigned char table[0];
83};
84
85struct e820map e820;
86
87unsigned char aux_device_present;
88
89extern void mcheck_init(struct cpuinfo_x86 *c);
90extern void dmi_scan_machine(void);
91extern int root_mountflags;
92extern char _text, _etext, _edata, _end;
93
94static int have_cpuid_p(void) __init;
95
96static int disable_x86_serial_nr __initdata = 1;
97static int disable_x86_ht __initdata = 0;
98static u32 disabled_x86_caps[NCAPINTS] __initdata = { 0 };
99extern int blk_nohighio;
100
101int enable_acpi_smp_table;
102
103/*
104 * This is set up by the setup-routine at boot-time
105 */
106#define PARAM	((unsigned char *)empty_zero_page)
107#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
108#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
109#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
110#define E820_MAP_NR (*(char*) (PARAM+E820NR))
111#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
112#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
113#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
114#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
115#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
116#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
117#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
118#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
119#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
120#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
121#define INITRD_START (*(unsigned long *) (PARAM+0x218))
122#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
123#define COMMAND_LINE ((char *) (PARAM+2048))
124#define COMMAND_LINE_SIZE 256
125
126#define RAMDISK_IMAGE_START_MASK  	0x07FF
127#define RAMDISK_PROMPT_FLAG		0x8000
128#define RAMDISK_LOAD_FLAG		0x4000
129
130#ifdef	CONFIG_VISWS
131char visws_board_type = -1;
132char visws_board_rev = -1;
133
134#define	PIIX_PM_START		0x0F80
135
136#define	SIO_GPIO_START		0x0FC0
137
138#define	SIO_PM_START		0x0FC8
139
140#define	PMBASE			PIIX_PM_START
141#define	GPIREG0			(PMBASE+0x30)
142#define	GPIREG(x)		(GPIREG0+((x)/8))
143#define	PIIX_GPI_BD_ID1		18
144#define	PIIX_GPI_BD_REG		GPIREG(PIIX_GPI_BD_ID1)
145
146#define	PIIX_GPI_BD_SHIFT	(PIIX_GPI_BD_ID1 % 8)
147
148#define	SIO_INDEX	0x2e
149#define	SIO_DATA	0x2f
150
151#define	SIO_DEV_SEL	0x7
152#define	SIO_DEV_ENB	0x30
153#define	SIO_DEV_MSB	0x60
154#define	SIO_DEV_LSB	0x61
155
156#define	SIO_GP_DEV	0x7
157
158#define	SIO_GP_BASE	SIO_GPIO_START
159#define	SIO_GP_MSB	(SIO_GP_BASE>>8)
160#define	SIO_GP_LSB	(SIO_GP_BASE&0xff)
161
162#define	SIO_GP_DATA1	(SIO_GP_BASE+0)
163
164#define	SIO_PM_DEV	0x8
165
166#define	SIO_PM_BASE	SIO_PM_START
167#define	SIO_PM_MSB	(SIO_PM_BASE>>8)
168#define	SIO_PM_LSB	(SIO_PM_BASE&0xff)
169#define	SIO_PM_INDEX	(SIO_PM_BASE+0)
170#define	SIO_PM_DATA	(SIO_PM_BASE+1)
171
172#define	SIO_PM_FER2	0x1
173
174#define	SIO_PM_GP_EN	0x80
175
176static void __init visws_get_board_type_and_rev(void)
177{
178	int raw;
179
180	visws_board_type = (char)(inb_p(PIIX_GPI_BD_REG) & PIIX_GPI_BD_REG)
181							 >> PIIX_GPI_BD_SHIFT;
182/*
183 * Get Board rev.
184 * First, we have to initialize the 307 part to allow us access
185 * to the GPIO registers.  Let's map them at 0x0fc0 which is right
186 * after the PIIX4 PM section.
187 */
188	outb_p(SIO_DEV_SEL, SIO_INDEX);
189	outb_p(SIO_GP_DEV, SIO_DATA);	/* Talk to GPIO regs. */
190
191	outb_p(SIO_DEV_MSB, SIO_INDEX);
192	outb_p(SIO_GP_MSB, SIO_DATA);	/* MSB of GPIO base address */
193
194	outb_p(SIO_DEV_LSB, SIO_INDEX);
195	outb_p(SIO_GP_LSB, SIO_DATA);	/* LSB of GPIO base address */
196
197	outb_p(SIO_DEV_ENB, SIO_INDEX);
198	outb_p(1, SIO_DATA);		/* Enable GPIO registers. */
199
200/*
201 * Now, we have to map the power management section to write
202 * a bit which enables access to the GPIO registers.
203 * What lunatic came up with this shit?
204 */
205	outb_p(SIO_DEV_SEL, SIO_INDEX);
206	outb_p(SIO_PM_DEV, SIO_DATA);	/* Talk to GPIO regs. */
207
208	outb_p(SIO_DEV_MSB, SIO_INDEX);
209	outb_p(SIO_PM_MSB, SIO_DATA);	/* MSB of PM base address */
210
211	outb_p(SIO_DEV_LSB, SIO_INDEX);
212	outb_p(SIO_PM_LSB, SIO_DATA);	/* LSB of PM base address */
213
214	outb_p(SIO_DEV_ENB, SIO_INDEX);
215	outb_p(1, SIO_DATA);		/* Enable PM registers. */
216
217/*
218 * Now, write the PM register which enables the GPIO registers.
219 */
220	outb_p(SIO_PM_FER2, SIO_PM_INDEX);
221	outb_p(SIO_PM_GP_EN, SIO_PM_DATA);
222
223/*
224 * Now, initialize the GPIO registers.
225 * We want them all to be inputs which is the
226 * power on default, so let's leave them alone.
227 * So, let's just read the board rev!
228 */
229	raw = inb_p(SIO_GP_DATA1);
230	raw &= 0x7f;	/* 7 bits of valid board revision ID. */
231
232	if (visws_board_type == VISWS_320) {
233		if (raw < 0x6) {
234			visws_board_rev = 4;
235		} else if (raw < 0xc) {
236			visws_board_rev = 5;
237		} else {
238			visws_board_rev = 6;
239
240		}
241	} else if (visws_board_type == VISWS_540) {
242			visws_board_rev = 2;
243		} else {
244			visws_board_rev = raw;
245		}
246
247		printk(KERN_INFO "Silicon Graphics %s (rev %d)\n",
248			visws_board_type == VISWS_320 ? "320" :
249			(visws_board_type == VISWS_540 ? "540" :
250					"unknown"),
251					visws_board_rev);
252	}
253#endif
254
255
256static char command_line[COMMAND_LINE_SIZE];
257       char saved_command_line[COMMAND_LINE_SIZE];
258
259struct resource standard_io_resources[] = {
260	{ "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
261	{ "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
262	{ "timer", 0x40, 0x5f, IORESOURCE_BUSY },
263	{ "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
264	{ "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
265	{ "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
266	{ "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
267	{ "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
268};
269
270#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
271
272static struct resource code_resource = { "Kernel code", 0x100000, 0 };
273static struct resource data_resource = { "Kernel data", 0, 0 };
274static struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
275
276/* System ROM resources */
277#define MAXROMS 6
278static struct resource rom_resources[MAXROMS] = {
279	{ "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
280	{ "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
281};
282
283#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
284
285static void __init probe_roms(void)
286{
287	int roms = 1;
288	unsigned long base;
289	unsigned char *romstart;
290
291	request_resource(&iomem_resource, rom_resources+0);
292
293	/* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
294	for (base = 0xC0000; base < 0xE0000; base += 2048) {
295		romstart = bus_to_virt(base);
296		if (!romsignature(romstart))
297			continue;
298		request_resource(&iomem_resource, rom_resources + roms);
299		roms++;
300		break;
301	}
302
303	/* Extension roms at C800:0000 - DFFF:0000 */
304	for (base = 0xC8000; base < 0xE0000; base += 2048) {
305		unsigned long length;
306
307		romstart = bus_to_virt(base);
308		if (!romsignature(romstart))
309			continue;
310		length = romstart[2] * 512;
311		if (length) {
312			unsigned int i;
313			unsigned char chksum;
314
315			chksum = 0;
316			for (i = 0; i < length; i++)
317				chksum += romstart[i];
318
319			/* Good checksum? */
320			if (!chksum) {
321				rom_resources[roms].start = base;
322				rom_resources[roms].end = base + length - 1;
323				rom_resources[roms].name = "Extension ROM";
324				rom_resources[roms].flags = IORESOURCE_BUSY;
325
326				request_resource(&iomem_resource, rom_resources + roms);
327				roms++;
328				if (roms >= MAXROMS)
329					return;
330			}
331		}
332	}
333
334	/* Final check for motherboard extension rom at E000:0000 */
335	base = 0xE0000;
336	romstart = bus_to_virt(base);
337
338	if (romsignature(romstart)) {
339		rom_resources[roms].start = base;
340		rom_resources[roms].end = base + 65535;
341		rom_resources[roms].name = "Extension ROM";
342		rom_resources[roms].flags = IORESOURCE_BUSY;
343
344		request_resource(&iomem_resource, rom_resources + roms);
345	}
346}
347
348static void __init limit_regions (unsigned long long size)
349{
350	unsigned long long current_addr = 0;
351	int i;
352
353	for (i = 0; i < e820.nr_map; i++) {
354		if (e820.map[i].type == E820_RAM) {
355			current_addr = e820.map[i].addr + e820.map[i].size;
356			if (current_addr >= size) {
357				e820.map[i].size -= current_addr-size;
358				e820.nr_map = i + 1;
359				return;
360			}
361		}
362	}
363}
364static void __init add_memory_region(unsigned long long start,
365                                  unsigned long long size, int type)
366{
367	int x = e820.nr_map;
368
369	if (x == E820MAX) {
370	    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
371	    return;
372	}
373
374	e820.map[x].addr = start;
375	e820.map[x].size = size;
376	e820.map[x].type = type;
377	e820.nr_map++;
378} /* add_memory_region */
379
380#define E820_DEBUG	1
381
382static void __init print_memory_map(char *who)
383{
384	int i;
385
386	for (i = 0; i < e820.nr_map; i++) {
387		printk(" %s: %016Lx - %016Lx ", who,
388			e820.map[i].addr,
389			e820.map[i].addr + e820.map[i].size);
390		switch (e820.map[i].type) {
391		case E820_RAM:	printk("(usable)\n");
392				break;
393		case E820_RESERVED:
394				printk("(reserved)\n");
395				break;
396		case E820_ACPI:
397				printk("(ACPI data)\n");
398				break;
399		case E820_NVS:
400				printk("(ACPI NVS)\n");
401				break;
402		default:	printk("type %lu\n", e820.map[i].type);
403				break;
404		}
405	}
406}
407
408/*
409 * Sanitize the BIOS e820 map.
410 *
411 * Some e820 responses include overlapping entries.  The following
412 * replaces the original e820 map with a new one, removing overlaps.
413 *
414 */
415static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
416{
417	struct change_member {
418		struct e820entry *pbios; /* pointer to original bios entry */
419		unsigned long long addr; /* address for this change point */
420	};
421	struct change_member change_point_list[2*E820MAX];
422	struct change_member *change_point[2*E820MAX];
423	struct e820entry *overlap_list[E820MAX];
424	struct e820entry new_bios[E820MAX];
425	struct change_member *change_tmp;
426	unsigned long current_type, last_type;
427	unsigned long long last_addr;
428	int chgidx, still_changing;
429	int overlap_entries;
430	int new_bios_entry;
431	int old_nr, new_nr;
432	int i;
433
434	/*
435		Visually we're performing the following (1,2,3,4 = memory types)...
436
437		Sample memory map (w/overlaps):
438		   ____22__________________
439		   ______________________4_
440		   ____1111________________
441		   _44_____________________
442		   11111111________________
443		   ____________________33__
444		   ___________44___________
445		   __________33333_________
446		   ______________22________
447		   ___________________2222_
448		   _________111111111______
449		   _____________________11_
450		   _________________4______
451
452		Sanitized equivalent (no overlap):
453		   1_______________________
454		   _44_____________________
455		   ___1____________________
456		   ____22__________________
457		   ______11________________
458		   _________1______________
459		   __________3_____________
460		   ___________44___________
461		   _____________33_________
462		   _______________2________
463		   ________________1_______
464		   _________________4______
465		   ___________________2____
466		   ____________________33__
467		   ______________________4_
468	*/
469
470	/* if there's only one memory region, don't bother */
471	if (*pnr_map < 2)
472		return -1;
473
474	old_nr = *pnr_map;
475
476	/* bail out if we find any unreasonable addresses in bios map */
477	for (i=0; i<old_nr; i++)
478		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
479			return -1;
480
481	/* create pointers for initial change-point information (for sorting) */
482	for (i=0; i < 2*old_nr; i++)
483		change_point[i] = &change_point_list[i];
484
485	/* record all known change-points (starting and ending addresses) */
486	chgidx = 0;
487	for (i=0; i < old_nr; i++)	{
488		change_point[chgidx]->addr = biosmap[i].addr;
489		change_point[chgidx++]->pbios = &biosmap[i];
490		change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
491		change_point[chgidx++]->pbios = &biosmap[i];
492	}
493
494	/* sort change-point list by memory addresses (low -> high) */
495	still_changing = 1;
496	while (still_changing)	{
497		still_changing = 0;
498		for (i=1; i < 2*old_nr; i++)  {
499			/* if <current_addr> > <last_addr>, swap */
500			/* or, if current=<start_addr> & last=<end_addr>, swap */
501			if ((change_point[i]->addr < change_point[i-1]->addr) ||
502				((change_point[i]->addr == change_point[i-1]->addr) &&
503				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
504				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
505			   )
506			{
507				change_tmp = change_point[i];
508				change_point[i] = change_point[i-1];
509				change_point[i-1] = change_tmp;
510				still_changing=1;
511			}
512		}
513	}
514
515	/* create a new bios memory map, removing overlaps */
516	overlap_entries=0;	 /* number of entries in the overlap table */
517	new_bios_entry=0;	 /* index for creating new bios map entries */
518	last_type = 0;		 /* start with undefined memory type */
519	last_addr = 0;		 /* start with 0 as last starting address */
520	/* loop through change-points, determining affect on the new bios map */
521	for (chgidx=0; chgidx < 2*old_nr; chgidx++)
522	{
523		/* keep track of all overlapping bios entries */
524		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
525		{
526			/* add map entry to overlap list (> 1 entry implies an overlap) */
527			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
528		}
529		else
530		{
531			/* remove entry from list (order independent, so swap with last) */
532			for (i=0; i<overlap_entries; i++)
533			{
534				if (overlap_list[i] == change_point[chgidx]->pbios)
535					overlap_list[i] = overlap_list[overlap_entries-1];
536			}
537			overlap_entries--;
538		}
539		/* if there are overlapping entries, decide which "type" to use */
540		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
541		current_type = 0;
542		for (i=0; i<overlap_entries; i++)
543			if (overlap_list[i]->type > current_type)
544				current_type = overlap_list[i]->type;
545		/* continue building up new bios map based on this information */
546		if (current_type != last_type)	{
547			if (last_type != 0)	 {
548				new_bios[new_bios_entry].size =
549					change_point[chgidx]->addr - last_addr;
550				/* move forward only if the new size was non-zero */
551				if (new_bios[new_bios_entry].size != 0)
552					if (++new_bios_entry >= E820MAX)
553						break; 	/* no more space left for new bios entries */
554			}
555			if (current_type != 0)	{
556				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
557				new_bios[new_bios_entry].type = current_type;
558				last_addr=change_point[chgidx]->addr;
559			}
560			last_type = current_type;
561		}
562	}
563	new_nr = new_bios_entry;   /* retain count for new bios entries */
564
565	/* copy new bios mapping into original location */
566	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
567	*pnr_map = new_nr;
568
569	return 0;
570}
571
572/*
573 * Copy the BIOS e820 map into a safe place.
574 *
575 * Sanity-check it while we're at it..
576 *
577 * If we're lucky and live on a modern system, the setup code
578 * will have given us a memory map that we can use to properly
579 * set up memory.  If we aren't, we'll fake a memory map.
580 *
581 * We check to see that the memory map contains at least 2 elements
582 * before we'll use it, because the detection code in setup.S may
583 * not be perfect and most every PC known to man has two memory
584 * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
585 * thinkpad 560x, for example, does not cooperate with the memory
586 * detection code.)
587 */
588static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
589{
590	/* Only one memory region (or negative)? Ignore it */
591	if (nr_map < 2)
592		return -1;
593
594	do {
595		unsigned long long start = biosmap->addr;
596		unsigned long long size = biosmap->size;
597		unsigned long long end = start + size;
598		unsigned long type = biosmap->type;
599
600		/* Overflow in 64 bits? Ignore the memory map. */
601		if (start > end)
602			return -1;
603
604		/*
605		 * Some BIOSes claim RAM in the 640k - 1M region.
606		 * Not right. Fix it up.
607		 */
608		if (type == E820_RAM) {
609			if (start < 0x100000ULL && end > 0xA0000ULL) {
610				if (start < 0xA0000ULL)
611					add_memory_region(start, 0xA0000ULL-start, type);
612				if (end <= 0x100000ULL)
613					continue;
614				start = 0x100000ULL;
615				size = end - start;
616			}
617		}
618		add_memory_region(start, size, type);
619	} while (biosmap++,--nr_map);
620	return 0;
621}
622
623/*
624 * Do NOT EVER look at the BIOS memory size location.
625 * It does not work on many machines.
626 */
627#define LOWMEMSIZE()	(0x9f000)
628
629static void __init setup_memory_region(void)
630{
631	char *who = "BIOS-e820";
632
633	/*
634	 * Try to copy the BIOS-supplied E820-map.
635	 *
636	 * Otherwise fake a memory map; one section from 0k->640k,
637	 * the next section from 1mb->appropriate_mem_k
638	 */
639	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
640	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
641		unsigned long mem_size;
642
643		/* compare results from other methods and take the greater */
644		if (ALT_MEM_K < EXT_MEM_K) {
645			mem_size = EXT_MEM_K;
646			who = "BIOS-88";
647		} else {
648			mem_size = ALT_MEM_K;
649			who = "BIOS-e801";
650		}
651
652		e820.nr_map = 0;
653		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
654		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
655  	}
656	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
657	print_memory_map(who);
658} /* setup_memory_region */
659
660
661static void __init parse_cmdline_early (char ** cmdline_p)
662{
663	char c = ' ', *to = command_line, *from = COMMAND_LINE;
664	int len = 0;
665	int userdef = 0;
666
667	/* Save unparsed command line copy for /proc/cmdline */
668	memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
669	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
670
671	for (;;) {
672		if (c != ' ')
673			goto nextchar;
674		if (!memcmp(from, "mem=", 4)) {
675			if (to != command_line)
676				to--;
677			if (!memcmp(from+4, "nopentium", 9)) {
678				from += 9+4;
679				clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);
680				set_bit(X86_FEATURE_PSE, &disabled_x86_caps);
681			} else if (!memcmp(from+4, "exactmap", 8)) {
682				from += 8+4;
683				e820.nr_map = 0;
684				userdef = 1;
685			} else {
686				/* If the user specifies memory size, we
687				 * limit the BIOS-provided memory map to
688				 * that size. exactmap can be used to specify
689				 * the exact map. mem=number can be used to
690				 * trim the existing memory map.
691				 */
692				unsigned long long start_at, mem_size;
693
694				mem_size = memparse(from+4, &from);
695				if (*from == '@') {
696					start_at = memparse(from+1, &from);
697					add_memory_region(start_at, mem_size, E820_RAM);
698				} else {
699					limit_regions(mem_size);
700					userdef=1;
701				}
702			}
703		}
704
705		/* "noht" disables HyperThreading (2 logical cpus per Xeon) */
706		else if (!memcmp(from, "noht", 4)) {
707			disable_x86_ht = 1;
708			set_bit(X86_FEATURE_HT, disabled_x86_caps);
709		}
710
711		/* "acpismp=force" forces parsing and use of the ACPI SMP table */
712		else if (!memcmp(from, "acpismp=force", 13))
713			enable_acpi_smp_table = 1;
714
715		/*
716		 * highmem=size forces highmem to be exactly 'size' bytes.
717		 * This works even on boxes that have no highmem otherwise.
718		 * This also works to reduce highmem size on bigger boxes.
719		 */
720		else if (!memcmp(from, "highmem=", 8))
721			highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
722nextchar:
723		c = *(from++);
724		if (!c)
725			break;
726		if (COMMAND_LINE_SIZE <= ++len)
727			break;
728		*(to++) = c;
729	}
730	*to = '\0';
731	*cmdline_p = command_line;
732	if (userdef) {
733		printk(KERN_INFO "user-defined physical RAM map:\n");
734		print_memory_map("user");
735	}
736}
737
738#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
739#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
740#define PFN_PHYS(x)	((x) << PAGE_SHIFT)
741
742/*
743 * Reserved space for vmalloc and iomap - defined in asm/page.h
744 */
745#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
746#define MAX_NONPAE_PFN	(1 << 20)
747
748/*
749 * Find the highest page frame number we have available
750 */
751static void __init find_max_pfn(void)
752{
753	int i;
754
755	max_pfn = 0;
756	for (i = 0; i < e820.nr_map; i++) {
757		unsigned long start, end;
758		/* RAM? */
759		if (e820.map[i].type != E820_RAM)
760			continue;
761		start = PFN_UP(e820.map[i].addr);
762		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
763		if (start >= end)
764			continue;
765		if (end > max_pfn)
766			max_pfn = end;
767	}
768}
769
770/*
771 * Determine low and high memory ranges:
772 */
773static unsigned long __init find_max_low_pfn(void)
774{
775	unsigned long max_low_pfn;
776
777	max_low_pfn = max_pfn;
778	if (max_low_pfn > MAXMEM_PFN) {
779		if (highmem_pages == -1)
780			highmem_pages = max_pfn - MAXMEM_PFN;
781		if (highmem_pages + MAXMEM_PFN < max_pfn)
782			max_pfn = MAXMEM_PFN + highmem_pages;
783		if (highmem_pages + MAXMEM_PFN > max_pfn) {
784			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
785			highmem_pages = 0;
786		}
787		max_low_pfn = MAXMEM_PFN;
788#ifndef CONFIG_HIGHMEM
789		/* Maximum memory usable is what is directly addressable */
790		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
791					MAXMEM>>20);
792		if (max_pfn > MAX_NONPAE_PFN)
793			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
794		else
795			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
796#else /* !CONFIG_HIGHMEM */
797#ifndef CONFIG_X86_PAE
798		if (max_pfn > MAX_NONPAE_PFN) {
799			max_pfn = MAX_NONPAE_PFN;
800			printk(KERN_WARNING "Warning only 4GB will be used.\n");
801			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
802		}
803#endif /* !CONFIG_X86_PAE */
804#endif /* !CONFIG_HIGHMEM */
805	} else {
806		if (highmem_pages == -1)
807			highmem_pages = 0;
808#if CONFIG_HIGHMEM
809		if (highmem_pages >= max_pfn) {
810			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
811			highmem_pages = 0;
812		}
813		if (highmem_pages) {
814			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
815				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
816				highmem_pages = 0;
817			}
818			max_low_pfn -= highmem_pages;
819		}
820#else
821		if (highmem_pages)
822			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
823#endif
824	}
825
826	return max_low_pfn;
827}
828
829/*
830 * Register fully available low RAM pages with the bootmem allocator.
831 */
832static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
833{
834	int i;
835
836	for (i = 0; i < e820.nr_map; i++) {
837		unsigned long curr_pfn, last_pfn, size;
838		/*
839		 * Reserve usable low memory
840		 */
841		if (e820.map[i].type != E820_RAM)
842			continue;
843		/*
844		 * We are rounding up the start address of usable memory:
845		 */
846		curr_pfn = PFN_UP(e820.map[i].addr);
847		if (curr_pfn >= max_low_pfn)
848			continue;
849		/*
850		 * ... and at the end of the usable range downwards:
851		 */
852		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
853
854		if (last_pfn > max_low_pfn)
855			last_pfn = max_low_pfn;
856
857		/*
858		 * .. finally, did all the rounding and playing
859		 * around just make the area go away?
860		 */
861		if (last_pfn <= curr_pfn)
862			continue;
863
864		size = last_pfn - curr_pfn;
865		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
866	}
867}
868
869static unsigned long __init setup_memory(void)
870{
871	unsigned long bootmap_size, start_pfn, max_low_pfn;
872
873	/*
874	 * partially used pages are not usable - thus
875	 * we are rounding upwards:
876	 */
877	start_pfn = PFN_UP(__pa(&_end));
878
879	find_max_pfn();
880
881	max_low_pfn = find_max_low_pfn();
882
883#ifdef CONFIG_HIGHMEM
884	highstart_pfn = highend_pfn = max_pfn;
885	if (max_pfn > max_low_pfn) {
886		highstart_pfn = max_low_pfn;
887	}
888	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
889		pages_to_mb(highend_pfn - highstart_pfn));
890#endif
891	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
892			pages_to_mb(max_low_pfn));
893	/*
894	 * Initialize the boot-time allocator (with low memory only):
895	 */
896	bootmap_size = init_bootmem(start_pfn, max_low_pfn);
897
898	register_bootmem_low_pages(max_low_pfn);
899
900	/*
901	 * Reserve the bootmem bitmap itself as well. We do this in two
902	 * steps (first step was init_bootmem()) because this catches
903	 * the (very unlikely) case of us accidentally initializing the
904	 * bootmem allocator with an invalid RAM area.
905	 */
906	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
907			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
908
909	/*
910	 * reserve physical page 0 - it's a special BIOS page on many boxes,
911	 * enabling clean reboots, SMP operation, laptop functions.
912	 */
913	reserve_bootmem(0, PAGE_SIZE);
914
915#ifdef CONFIG_SMP
916	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
917#endif
918
919#ifdef CONFIG_X86_LOCAL_APIC
920	/*
921	 * Find and reserve possible boot-time SMP configuration:
922	 */
923	find_smp_config();
924#endif
925#ifdef CONFIG_BLK_DEV_INITRD
926	if (LOADER_TYPE && INITRD_START) {
927		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
928			reserve_bootmem(INITRD_START, INITRD_SIZE);
929			initrd_start =
930				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
931			initrd_end = initrd_start+INITRD_SIZE;
932		}
933		else {
934			printk(KERN_ERR "initrd extends beyond end of memory "
935			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
936			    INITRD_START + INITRD_SIZE,
937			    max_low_pfn << PAGE_SHIFT);
938			initrd_start = 0;
939		}
940	}
941#endif
942
943	return max_low_pfn;
944}
945
946/*
947 * Request address space for all standard RAM and ROM resources
948 * and also for regions reported as reserved by the e820.
949 */
950static void __init register_memory(unsigned long max_low_pfn)
951{
952	unsigned long low_mem_size;
953	int i;
954
955	probe_roms();
956	for (i = 0; i < e820.nr_map; i++) {
957		struct resource *res;
958		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
959			continue;
960		res = alloc_bootmem_low(sizeof(struct resource));
961		switch (e820.map[i].type) {
962		case E820_RAM:	res->name = "System RAM"; break;
963		case E820_ACPI:	res->name = "ACPI Tables"; break;
964		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
965		default:	res->name = "reserved";
966		}
967		res->start = e820.map[i].addr;
968		res->end = res->start + e820.map[i].size - 1;
969		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
970		request_resource(&iomem_resource, res);
971		if (e820.map[i].type == E820_RAM) {
972			/*
973			 *  We dont't know which RAM region contains kernel data,
974			 *  so we try it repeatedly and let the resource manager
975			 *  test it.
976			 */
977			request_resource(res, &code_resource);
978			request_resource(res, &data_resource);
979		}
980	}
981	request_resource(&iomem_resource, &vram_resource);
982
983	/* request I/O space for devices used on all i[345]86 PCs */
984	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
985		request_resource(&ioport_resource, standard_io_resources+i);
986
987	/* Tell the PCI layer not to allocate too close to the RAM area.. */
988	low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
989	if (low_mem_size > pci_mem_start)
990		pci_mem_start = low_mem_size;
991}
992
993void __init setup_arch(char **cmdline_p)
994{
995	unsigned long max_low_pfn;
996
997#ifdef CONFIG_VISWS
998	visws_get_board_type_and_rev();
999#endif
1000
1001#ifndef CONFIG_HIGHIO
1002	blk_nohighio = 1;
1003#endif
1004
1005 	ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
1006 	drive_info = DRIVE_INFO;
1007 	screen_info = SCREEN_INFO;
1008	apm_info.bios = APM_BIOS_INFO;
1009	if( SYS_DESC_TABLE.length != 0 ) {
1010		MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
1011		machine_id = SYS_DESC_TABLE.table[0];
1012		machine_submodel_id = SYS_DESC_TABLE.table[1];
1013		BIOS_revision = SYS_DESC_TABLE.table[2];
1014	}
1015	aux_device_present = AUX_DEVICE_INFO;
1016
1017#ifdef CONFIG_BLK_DEV_RAM
1018	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
1019	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
1020	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
1021#endif
1022	setup_memory_region();
1023
1024	if (!MOUNT_ROOT_RDONLY)
1025		root_mountflags &= ~MS_RDONLY;
1026	init_mm.start_code = (unsigned long) &_text;
1027	init_mm.end_code = (unsigned long) &_etext;
1028	init_mm.end_data = (unsigned long) &_edata;
1029	init_mm.brk = (unsigned long) &_end;
1030
1031	code_resource.start = virt_to_bus(&_text);
1032	code_resource.end = virt_to_bus(&_etext)-1;
1033	data_resource.start = virt_to_bus(&_etext);
1034	data_resource.end = virt_to_bus(&_edata)-1;
1035
1036	parse_cmdline_early(cmdline_p);
1037
1038	max_low_pfn = setup_memory();
1039
1040	/*
1041	 * If enable_acpi_smp_table and HT feature present, acpitable.c
1042	 * will find all logical cpus despite disable_x86_ht: so if both
1043	 * "noht" and "acpismp=force" are specified, let "noht" override
1044	 * "acpismp=force" cleanly.  Why retain "acpismp=force"? because
1045	 * parsing ACPI SMP table might prove useful on some non-HT cpu.
1046	 */
1047	if (disable_x86_ht) {
1048		clear_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]);
1049		set_bit(X86_FEATURE_HT, disabled_x86_caps);
1050		enable_acpi_smp_table = 0;
1051	}
1052	if (test_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]))
1053		enable_acpi_smp_table = 1;
1054
1055
1056	/*
1057	 * NOTE: before this point _nobody_ is allowed to allocate
1058	 * any memory using the bootmem allocator.
1059	 */
1060
1061#ifdef CONFIG_SMP
1062	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
1063#endif
1064	paging_init();
1065#ifdef CONFIG_X86_LOCAL_APIC
1066	/*
1067	 * get boot-time SMP configuration:
1068	 */
1069	if (smp_found_config)
1070		get_smp_config();
1071#endif
1072
1073	register_memory(max_low_pfn);
1074
1075#ifdef CONFIG_VT
1076#if defined(CONFIG_VGA_CONSOLE)
1077	conswitchp = &vga_con;
1078#elif defined(CONFIG_DUMMY_CONSOLE)
1079	conswitchp = &dummy_con;
1080#endif
1081#endif
1082	dmi_scan_machine();
1083}
1084
1085static int cachesize_override __initdata = -1;
1086static int __init cachesize_setup(char *str)
1087{
1088	get_option (&str, &cachesize_override);
1089	return 1;
1090}
1091__setup("cachesize=", cachesize_setup);
1092
1093
1094#ifndef CONFIG_X86_TSC
1095static int tsc_disable __initdata = 0;
1096
1097static int __init notsc_setup(char *str)
1098{
1099	tsc_disable = 1;
1100	return 1;
1101}
1102#else
1103static int __init notsc_setup(char *str)
1104{
1105	printk("notsc: Kernel compiled with CONFIG_X86_TSC, cannot disable TSC.\n");
1106	return 1;
1107}
1108#endif
1109__setup("notsc", notsc_setup);
1110
1111static int __init highio_setup(char *str)
1112{
1113	printk("i386: disabling HIGHMEM block I/O\n");
1114	blk_nohighio = 1;
1115	return 1;
1116}
1117__setup("nohighio", highio_setup);
1118
1119static int __init get_model_name(struct cpuinfo_x86 *c)
1120{
1121	unsigned int *v;
1122	char *p, *q;
1123
1124	if (cpuid_eax(0x80000000) < 0x80000004)
1125		return 0;
1126
1127	v = (unsigned int *) c->x86_model_id;
1128	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
1129	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
1130	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
1131	c->x86_model_id[48] = 0;
1132
1133	/* Intel chips right-justify this string for some dumb reason;
1134	   undo that brain damage */
1135	p = q = &c->x86_model_id[0];
1136	while ( *p == ' ' )
1137	     p++;
1138	if ( p != q ) {
1139	     while ( *p )
1140		  *q++ = *p++;
1141	     while ( q <= &c->x86_model_id[48] )
1142		  *q++ = '\0';	/* Zero-pad the rest */
1143	}
1144
1145	return 1;
1146}
1147
1148
1149static void __init display_cacheinfo(struct cpuinfo_x86 *c)
1150{
1151	unsigned int n, dummy, ecx, edx, l2size;
1152
1153	n = cpuid_eax(0x80000000);
1154
1155	if (n >= 0x80000005) {
1156		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
1157		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
1158			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
1159		c->x86_cache_size=(ecx>>24)+(edx>>24);
1160	}
1161
1162	if (n < 0x80000006)	/* Some chips just has a large L1. */
1163		return;
1164
1165	ecx = cpuid_ecx(0x80000006);
1166	l2size = ecx >> 16;
1167
1168	/* AMD errata T13 (order #21922) */
1169	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
1170		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */
1171			l2size = 64;
1172		if (c->x86_model == 4 &&
1173			(c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */
1174			l2size = 256;
1175	}
1176
1177	/* Intel PIII Tualatin. This comes in two flavours.
1178	 * One has 256kb of cache, the other 512. We have no way
1179	 * to determine which, so we use a boottime override
1180	 * for the 512kb model, and assume 256 otherwise.
1181	 */
1182	if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
1183		(c->x86_model == 11) && (l2size == 0))
1184		l2size = 256;
1185
1186	/* VIA C3 CPUs (670-68F) need further shifting. */
1187	if (c->x86_vendor == X86_VENDOR_CENTAUR && (c->x86 == 6) &&
1188		((c->x86_model == 7) || (c->x86_model == 8))) {
1189		l2size = l2size >> 8;
1190	}
1191
1192	/* Allow user to override all this if necessary. */
1193	if (cachesize_override != -1)
1194		l2size = cachesize_override;
1195
1196	if ( l2size == 0 )
1197		return;		/* Again, no L2 cache is possible */
1198
1199	c->x86_cache_size = l2size;
1200
1201	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
1202	       l2size, ecx & 0xFF);
1203}
1204
1205/*
1206 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
1207 *	misexecution of code under Linux. Owners of such processors should
1208 *	contact AMD for precise details and a CPU swap.
1209 *
1210 *	See	http://www.multimania.com/poulot/k6bug.html
1211 *		http://www.amd.com/K6/k6docs/revgd.html
1212 *
1213 *	The following test is erm.. interesting. AMD neglected to up
1214 *	the chip setting when fixing the bug but they also tweaked some
1215 *	performance at the same time..
1216 */
1217
1218extern void vide(void);
1219__asm__(".align 4\nvide: ret");
1220
1221static int __init init_amd(struct cpuinfo_x86 *c)
1222{
1223	u32 l, h;
1224	int mbytes = max_mapnr >> (20-PAGE_SHIFT);
1225	int r;
1226
1227
1228	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1229	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1230	clear_bit(0*32+31, &c->x86_capability);
1231
1232	r = get_model_name(c);
1233
1234	switch(c->x86)
1235	{
1236		case 5:
1237			if( c->x86_model < 6 )
1238			{
1239				/* Based on AMD doc 20734R - June 2000 */
1240				if ( c->x86_model == 0 ) {
1241					clear_bit(X86_FEATURE_APIC, &c->x86_capability);
1242					set_bit(X86_FEATURE_PGE, &c->x86_capability);
1243				}
1244				break;
1245			}
1246
1247			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
1248				const int K6_BUG_LOOP = 1000000;
1249				int n;
1250				void (*f_vide)(void);
1251				unsigned long d, d2;
1252
1253				printk(KERN_INFO "AMD K6 stepping B detected - ");
1254
1255				/*
1256				 * It looks like AMD fixed the 2.6.2 bug and improved indirect
1257				 * calls at the same time.
1258				 */
1259
1260				n = K6_BUG_LOOP;
1261				f_vide = vide;
1262				rdtscl(d);
1263				while (n--)
1264					f_vide();
1265				rdtscl(d2);
1266				d = d2-d;
1267
1268				/* Knock these two lines out if it debugs out ok */
1269				printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
1270				printk(KERN_INFO "AMD K6 stepping B detected - ");
1271				/* -- cut here -- */
1272				if (d > 20*K6_BUG_LOOP)
1273					printk("system stability may be impaired when more than 32 MB are used.\n");
1274				else
1275					printk("probably OK (after B9730xxxx).\n");
1276				printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
1277			}
1278
1279			/* K6 with old style WHCR */
1280			if (c->x86_model < 8 ||
1281			   (c->x86_model== 8 && c->x86_mask < 8)) {
1282				/* We can only write allocate on the low 508Mb */
1283				if(mbytes>508)
1284					mbytes=508;
1285
1286				rdmsr(MSR_K6_WHCR, l, h);
1287				if ((l&0x0000FFFF)==0) {
1288					unsigned long flags;
1289					l=(1<<0)|((mbytes/4)<<1);
1290					local_irq_save(flags);
1291					wbinvd();
1292					wrmsr(MSR_K6_WHCR, l, h);
1293					local_irq_restore(flags);
1294					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
1295						mbytes);
1296				}
1297				break;
1298			}
1299
1300			if ((c->x86_model == 8 && c->x86_mask >7) ||
1301			     c->x86_model == 9 || c->x86_model == 13) {
1302				/* The more serious chips .. */
1303
1304				if(mbytes>4092)
1305					mbytes=4092;
1306
1307				rdmsr(MSR_K6_WHCR, l, h);
1308				if ((l&0xFFFF0000)==0) {
1309					unsigned long flags;
1310					l=((mbytes>>2)<<22)|(1<<16);
1311					local_irq_save(flags);
1312					wbinvd();
1313					wrmsr(MSR_K6_WHCR, l, h);
1314					local_irq_restore(flags);
1315					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
1316						mbytes);
1317				}
1318
1319				/*  Set MTRR capability flag if appropriate */
1320				if (c->x86_model == 13 || c->x86_model == 9 ||
1321				   (c->x86_model == 8 && c->x86_mask >= 8))
1322					set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);
1323				break;
1324			}
1325			break;
1326
1327		case 6: /* An Athlon/Duron */
1328
1329			/* Bit 15 of Athlon specific MSR 15, needs to be 0
1330 			 * to enable SSE on Palomino/Morgan CPU's.
1331			 * If the BIOS didn't enable it already, enable it
1332			 * here.
1333			 */
1334			if (c->x86_model == 6 || c->x86_model == 7) {
1335				if (!test_bit(X86_FEATURE_XMM,
1336					      &c->x86_capability)) {
1337					printk(KERN_INFO
1338					       "Enabling Disabled K7/SSE Support...\n");
1339					rdmsr(MSR_K7_HWCR, l, h);
1340					l &= ~0x00008000;
1341					wrmsr(MSR_K7_HWCR, l, h);
1342					set_bit(X86_FEATURE_XMM,
1343                                                &c->x86_capability);
1344				}
1345			}
1346			break;
1347
1348	}
1349
1350	display_cacheinfo(c);
1351	return r;
1352}
1353
1354/*
1355 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
1356 */
1357static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
1358{
1359	unsigned char ccr2, ccr3;
1360	unsigned long flags;
1361
1362	/* we test for DEVID by checking whether CCR3 is writable */
1363	local_irq_save(flags);
1364	ccr3 = getCx86(CX86_CCR3);
1365	setCx86(CX86_CCR3, ccr3 ^ 0x80);
1366	getCx86(0xc0);   /* dummy to change bus */
1367
1368	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
1369		ccr2 = getCx86(CX86_CCR2);
1370		setCx86(CX86_CCR2, ccr2 ^ 0x04);
1371		getCx86(0xc0);  /* dummy */
1372
1373		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
1374			*dir0 = 0xfd;
1375		else {                          /* Cx486S A step */
1376			setCx86(CX86_CCR2, ccr2);
1377			*dir0 = 0xfe;
1378		}
1379	}
1380	else {
1381		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
1382
1383		/* read DIR0 and DIR1 CPU registers */
1384		*dir0 = getCx86(CX86_DIR0);
1385		*dir1 = getCx86(CX86_DIR1);
1386	}
1387	local_irq_restore(flags);
1388}
1389
1390/*
1391 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
1392 * order to identify the Cyrix CPU model after we're out of setup.c
1393 *
1394 * Actually since bugs.h doesn't even reference this perhaps someone should
1395 * fix the documentation ???
1396 */
1397static unsigned char Cx86_dir0_msb __initdata = 0;
1398
1399static char Cx86_model[][9] __initdata = {
1400	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
1401	"M II ", "Unknown"
1402};
1403static char Cx486_name[][5] __initdata = {
1404	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
1405	"SRx2", "DRx2"
1406};
1407static char Cx486S_name[][4] __initdata = {
1408	"S", "S2", "Se", "S2e"
1409};
1410static char Cx486D_name[][4] __initdata = {
1411	"DX", "DX2", "?", "?", "?", "DX4"
1412};
1413static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
1414static char cyrix_model_mult1[] __initdata = "12??43";
1415static char cyrix_model_mult2[] __initdata = "12233445";
1416
1417
1418extern void calibrate_delay(void) __init;
1419
1420static void __init check_cx686_slop(struct cpuinfo_x86 *c)
1421{
1422	unsigned long flags;
1423
1424	if (Cx86_dir0_msb == 3) {
1425		unsigned char ccr3, ccr5;
1426
1427		local_irq_save(flags);
1428		ccr3 = getCx86(CX86_CCR3);
1429		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
1430		ccr5 = getCx86(CX86_CCR5);
1431		if (ccr5 & 2)
1432			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
1433		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
1434		local_irq_restore(flags);
1435
1436		if (ccr5 & 2) { /* possible wrong calibration done */
1437			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
1438			calibrate_delay();
1439			c->loops_per_jiffy = loops_per_jiffy;
1440		}
1441	}
1442}
1443
1444static void __init init_cyrix(struct cpuinfo_x86 *c)
1445{
1446	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
1447	char *buf = c->x86_model_id;
1448	const char *p = NULL;
1449
1450	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1451	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1452	clear_bit(0*32+31, &c->x86_capability);
1453
1454	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
1455	if ( test_bit(1*32+24, &c->x86_capability) ) {
1456		clear_bit(1*32+24, &c->x86_capability);
1457		set_bit(X86_FEATURE_CXMMX, &c->x86_capability);
1458	}
1459
1460	do_cyrix_devid(&dir0, &dir1);
1461
1462	check_cx686_slop(c);
1463
1464	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
1465	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
1466
1467	/* common case step number/rev -- exceptions handled below */
1468	c->x86_model = (dir1 >> 4) + 1;
1469	c->x86_mask = dir1 & 0xf;
1470
1471	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
1472	 * We do the same thing for each generation: we work out
1473	 * the model, multiplier and stepping.  Black magic included,
1474	 * to make the silicon step/rev numbers match the printed ones.
1475	 */
1476
1477	switch (dir0_msn) {
1478		unsigned char tmp;
1479
1480	case 0: /* Cx486SLC/DLC/SRx/DRx */
1481		p = Cx486_name[dir0_lsn & 7];
1482		break;
1483
1484	case 1: /* Cx486S/DX/DX2/DX4 */
1485		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
1486			: Cx486S_name[dir0_lsn & 3];
1487		break;
1488
1489	case 2: /* 5x86 */
1490		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1491		p = Cx86_cb+2;
1492		break;
1493
1494	case 3: /* 6x86/6x86L */
1495		Cx86_cb[1] = ' ';
1496		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
1497		if (dir1 > 0x21) { /* 686L */
1498			Cx86_cb[0] = 'L';
1499			p = Cx86_cb;
1500			(c->x86_model)++;
1501		} else             /* 686 */
1502			p = Cx86_cb+1;
1503		/* Emulate MTRRs using Cyrix's ARRs. */
1504		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1505		/* 6x86's contain this bug */
1506		c->coma_bug = 1;
1507		break;
1508
1509	case 4: /* MediaGX/GXm */
1510#ifdef CONFIG_PCI
1511		/* It isnt really a PCI quirk directly, but the cure is the
1512		   same. The MediaGX has deep magic SMM stuff that handles the
1513		   SB emulation. It thows away the fifo on disable_dma() which
1514		   is wrong and ruins the audio.
1515
1516		   Bug2: VSA1 has a wrap bug so that using maximum sized DMA
1517		   causes bad things. According to NatSemi VSA2 has another
1518		   bug to do with 'hlt'. I've not seen any boards using VSA2
1519		   and X doesn't seem to support it either so who cares 8).
1520		   VSA1 we work around however.
1521		*/
1522
1523		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
1524		isa_dma_bridge_buggy = 2;
1525#endif
1526		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
1527
1528		/* GXm supports extended cpuid levels 'ala' AMD */
1529		if (c->cpuid_level == 2) {
1530			get_model_name(c);  /* get CPU marketing name */
1531			/*
1532	 		 *	The 5510/5520 companion chips have a funky PIT
1533			 *	that breaks the TSC synchronizing, so turn it off
1534			 */
1535			if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1536			   pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1537				clear_bit(X86_FEATURE_TSC, c->x86_capability);
1538			return;
1539		}
1540		else {  /* MediaGX */
1541			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
1542			p = Cx86_cb+2;
1543			c->x86_model = (dir1 & 0x20) ? 1 : 2;
1544			if(pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, NULL) ||
1545			   pci_find_device(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, NULL))
1546				clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1547		}
1548		break;
1549
1550        case 5: /* 6x86MX/M II */
1551		if (dir1 > 7)
1552		{
1553			dir0_msn++;  /* M II */
1554			/* Enable MMX extensions (App note 108) */
1555			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
1556		}
1557		else
1558		{
1559			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
1560		}
1561		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
1562		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
1563		p = Cx86_cb+tmp;
1564        	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
1565			(c->x86_model)++;
1566		/* Emulate MTRRs using Cyrix's ARRs. */
1567		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
1568		break;
1569
1570	case 0xf:  /* Cyrix 486 without DEVID registers */
1571		switch (dir0_lsn) {
1572		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
1573			dir0_msn = 0;
1574			p = Cx486_name[(c->hard_math) ? 1 : 0];
1575			break;
1576
1577		case 0xe:  /* a 486S A step */
1578			dir0_msn = 0;
1579			p = Cx486S_name[0];
1580			break;
1581		}
1582		break;
1583
1584	default:  /* unknown (shouldn't happen, we know everyone ;-) */
1585		dir0_msn = 7;
1586		break;
1587	}
1588	strcpy(buf, Cx86_model[dir0_msn & 7]);
1589	if (p) strcat(buf, p);
1590	return;
1591}
1592
1593#ifdef CONFIG_X86_OOSTORE
1594
1595static u32 __init power2(u32 x)
1596{
1597	u32 s=1;
1598	while(s<=x)
1599		s<<=1;
1600	return s>>=1;
1601}
1602
1603/*
1604 *	Set up an actual MCR
1605 */
1606
1607static void __init winchip_mcr_insert(int reg, u32 base, u32 size, int key)
1608{
1609	u32 lo, hi;
1610
1611	hi = base & ~0xFFF;
1612	lo = ~(size-1);		/* Size is a power of 2 so this makes a mask */
1613	lo &= ~0xFFF;		/* Remove the ctrl value bits */
1614	lo |= key;		/* Attribute we wish to set */
1615	wrmsr(reg+MSR_IDT_MCR0, lo, hi);
1616	mtrr_centaur_report_mcr(reg, lo, hi);	/* Tell the mtrr driver */
1617}
1618
1619/*
1620 *	Figure what we can cover with MCR's
1621 *
1622 *	Shortcut: We know you can't put 4Gig of RAM on a winchip
1623 */
1624
1625static u32 __init ramtop(void)		/* 16388 */
1626{
1627	int i;
1628	u32 top = 0;
1629	u32 clip = 0xFFFFFFFFUL;
1630
1631	for (i = 0; i < e820.nr_map; i++) {
1632		unsigned long start, end;
1633
1634		if (e820.map[i].addr > 0xFFFFFFFFUL)
1635			continue;
1636		/*
1637		 *	Don't MCR over reserved space. Ignore the ISA hole
1638		 *	we frob around that catastrophy already
1639		 */
1640
1641		if (e820.map[i].type == E820_RESERVED)
1642		{
1643			if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
1644				clip = e820.map[i].addr;
1645			continue;
1646		}
1647		start = e820.map[i].addr;
1648		end = e820.map[i].addr + e820.map[i].size;
1649		if (start >= end)
1650			continue;
1651		if (end > top)
1652			top = end;
1653	}
1654	/* Everything below 'top' should be RAM except for the ISA hole.
1655	   Because of the limited MCR's we want to map NV/ACPI into our
1656	   MCR range for gunk in RAM
1657
1658	   Clip might cause us to MCR insufficient RAM but that is an
1659	   acceptable failure mode and should only bite obscure boxes with
1660	   a VESA hole at 15Mb
1661
1662	   The second case Clip sometimes kicks in is when the EBDA is marked
1663	   as reserved. Again we fail safe with reasonable results
1664	*/
1665
1666	if(top>clip)
1667		top=clip;
1668
1669	return top;
1670}
1671
1672/*
1673 *	Compute a set of MCR's to give maximum coverage
1674 */
1675
1676static int __init winchip_mcr_compute(int nr, int key)
1677{
1678	u32 mem = ramtop();
1679	u32 root = power2(mem);
1680	u32 base = root;
1681	u32 top = root;
1682	u32 floor = 0;
1683	int ct = 0;
1684
1685	while(ct<nr)
1686	{
1687		u32 fspace = 0;
1688
1689		/*
1690		 *	Find the largest block we will fill going upwards
1691		 */
1692
1693		u32 high = power2(mem-top);
1694
1695		/*
1696		 *	Find the largest block we will fill going downwards
1697		 */
1698
1699		u32 low = base/2;
1700
1701		/*
1702		 *	Don't fill below 1Mb going downwards as there
1703		 *	is an ISA hole in the way.
1704		 */
1705
1706		if(base <= 1024*1024)
1707			low = 0;
1708
1709		/*
1710		 *	See how much space we could cover by filling below
1711		 *	the ISA hole
1712		 */
1713
1714		if(floor == 0)
1715			fspace = 512*1024;
1716		else if(floor ==512*1024)
1717			fspace = 128*1024;
1718
1719		/* And forget ROM space */
1720
1721		/*
1722		 *	Now install the largest coverage we get
1723		 */
1724
1725		if(fspace > high && fspace > low)
1726		{
1727			winchip_mcr_insert(ct, floor, fspace, key);
1728			floor += fspace;
1729		}
1730		else if(high > low)
1731		{
1732			winchip_mcr_insert(ct, top, high, key);
1733			top += high;
1734		}
1735		else if(low > 0)
1736		{
1737			base -= low;
1738			winchip_mcr_insert(ct, base, low, key);
1739		}
1740		else break;
1741		ct++;
1742	}
1743	/*
1744	 *	We loaded ct values. We now need to set the mask. The caller
1745	 *	must do this bit.
1746	 */
1747
1748	return ct;
1749}
1750
1751static void __init winchip_create_optimal_mcr(void)
1752{
1753	int i;
1754	/*
1755	 *	Allocate up to 6 mcrs to mark as much of ram as possible
1756	 *	as write combining and weak write ordered.
1757	 *
1758	 *	To experiment with: Linux never uses stack operations for
1759	 *	mmio spaces so we could globally enable stack operation wc
1760	 *
1761	 *	Load the registers with type 31 - full write combining, all
1762	 *	writes weakly ordered.
1763	 */
1764	int used = winchip_mcr_compute(6, 31);
1765
1766	/*
1767	 *	Wipe unused MCRs
1768	 */
1769
1770	for(i=used;i<8;i++)
1771		wrmsr(MSR_IDT_MCR0+i, 0, 0);
1772}
1773
1774static void __init winchip2_create_optimal_mcr(void)
1775{
1776	u32 lo, hi;
1777	int i;
1778
1779	/*
1780	 *	Allocate up to 6 mcrs to mark as much of ram as possible
1781	 *	as write combining, weak store ordered.
1782	 *
1783	 *	Load the registers with type 25
1784	 *		8	-	weak write ordering
1785	 *		16	-	weak read ordering
1786	 *		1	-	write combining
1787	 */
1788
1789	int used = winchip_mcr_compute(6, 25);
1790
1791	/*
1792	 *	Mark the registers we are using.
1793	 */
1794
1795	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1796	for(i=0;i<used;i++)
1797		lo|=1<<(9+i);
1798	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1799
1800	/*
1801	 *	Wipe unused MCRs
1802	 */
1803
1804	for(i=used;i<8;i++)
1805		wrmsr(MSR_IDT_MCR0+i, 0, 0);
1806}
1807
1808/*
1809 *	Handle the MCR key on the Winchip 2.
1810 */
1811
1812static void __init winchip2_unprotect_mcr(void)
1813{
1814	u32 lo, hi;
1815	u32 key;
1816
1817	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1818	lo&=~0x1C0;	/* blank bits 8-6 */
1819	key = (lo>>17) & 7;
1820	lo |= key<<6;	/* replace with unlock key */
1821	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1822}
1823
1824static void __init winchip2_protect_mcr(void)
1825{
1826	u32 lo, hi;
1827
1828	rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1829	lo&=~0x1C0;	/* blank bits 8-6 */
1830	wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1831}
1832
1833#endif
1834
1835static void __init init_centaur(struct cpuinfo_x86 *c)
1836{
1837	enum {
1838		ECX8=1<<1,
1839		EIERRINT=1<<2,
1840		DPM=1<<3,
1841		DMCE=1<<4,
1842		DSTPCLK=1<<5,
1843		ELINEAR=1<<6,
1844		DSMC=1<<7,
1845		DTLOCK=1<<8,
1846		EDCTLB=1<<8,
1847		EMMX=1<<9,
1848		DPDC=1<<11,
1849		EBRPRED=1<<12,
1850		DIC=1<<13,
1851		DDC=1<<14,
1852		DNA=1<<15,
1853		ERETSTK=1<<16,
1854		E2MMX=1<<19,
1855		EAMD3D=1<<20,
1856	};
1857
1858	char *name;
1859	u32  fcr_set=0;
1860	u32  fcr_clr=0;
1861	u32  lo,hi,newlo;
1862	u32  aa,bb,cc,dd;
1863
1864	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1865	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
1866	clear_bit(0*32+31, &c->x86_capability);
1867
1868	switch (c->x86) {
1869
1870		case 5:
1871			switch(c->x86_model) {
1872			case 4:
1873				name="C6";
1874				fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
1875				fcr_clr=DPDC;
1876				printk(KERN_NOTICE "Disabling bugged TSC.\n");
1877				clear_bit(X86_FEATURE_TSC, &c->x86_capability);
1878#ifdef CONFIG_X86_OOSTORE
1879				winchip_create_optimal_mcr();
1880				/* Enable
1881					write combining on non-stack, non-string
1882					write combining on string, all types
1883					weak write ordering
1884
1885				   The C6 original lacks weak read order
1886
1887				   Note 0x120 is write only on Winchip 1 */
1888
1889				wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
1890#endif
1891				break;
1892			case 8:
1893				switch(c->x86_mask) {
1894				default:
1895					name="2";
1896					break;
1897				case 7 ... 9:
1898					name="2A";
1899					break;
1900				case 10 ... 15:
1901					name="2B";
1902					break;
1903				}
1904				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1905				fcr_clr=DPDC;
1906#ifdef CONFIG_X86_OOSTORE
1907				winchip2_unprotect_mcr();
1908				winchip2_create_optimal_mcr();
1909				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1910				/* Enable
1911					write combining on non-stack, non-string
1912					write combining on string, all types
1913					weak write ordering
1914				*/
1915				lo|=31;
1916				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1917				winchip2_protect_mcr();
1918#endif
1919				break;
1920			case 9:
1921				name="3";
1922				fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
1923				fcr_clr=DPDC;
1924#ifdef CONFIG_X86_OOSTORE
1925				winchip2_unprotect_mcr();
1926				winchip2_create_optimal_mcr();
1927				rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
1928				/* Enable
1929					write combining on non-stack, non-string
1930					write combining on string, all types
1931					weak write ordering
1932				*/
1933				lo|=31;
1934				wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
1935				winchip2_protect_mcr();
1936#endif
1937				break;
1938			case 10:
1939				name="4";
1940				/* no info on the WC4 yet */
1941				break;
1942			default:
1943				name="??";
1944			}
1945
1946			rdmsr(MSR_IDT_FCR1, lo, hi);
1947			newlo=(lo|fcr_set) & (~fcr_clr);
1948
1949			if (newlo!=lo) {
1950				printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
1951				wrmsr(MSR_IDT_FCR1, newlo, hi );
1952			} else {
1953				printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
1954			}
1955			/* Emulate MTRRs using Centaur's MCR. */
1956			set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability);
1957			/* Report CX8 */
1958			set_bit(X86_FEATURE_CX8, &c->x86_capability);
1959			/* Set 3DNow! on Winchip 2 and above. */
1960			if (c->x86_model >=8)
1961				set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
1962			/* See if we can find out some more. */
1963			if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
1964				/* Yes, we can. */
1965				cpuid(0x80000005,&aa,&bb,&cc,&dd);
1966				/* Add L1 data and code cache sizes. */
1967				c->x86_cache_size = (cc>>24)+(dd>>24);
1968			}
1969			sprintf( c->x86_model_id, "WinChip %s", name );
1970			break;
1971
1972		case 6:
1973			switch (c->x86_model) {
1974				case 6 ... 8:		/* Cyrix III family */
1975					rdmsr (MSR_VIA_FCR, lo, hi);
1976					lo |= (1<<1 | 1<<7);	/* Report CX8 & enable PGE */
1977					wrmsr (MSR_VIA_FCR, lo, hi);
1978
1979					set_bit(X86_FEATURE_CX8, &c->x86_capability);
1980					set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
1981
1982					get_model_name(c);
1983					display_cacheinfo(c);
1984					break;
1985			}
1986			break;
1987	}
1988}
1989
1990
1991static void __init init_transmeta(struct cpuinfo_x86 *c)
1992{
1993	unsigned int cap_mask, uk, max, dummy;
1994	unsigned int cms_rev1, cms_rev2;
1995	unsigned int cpu_rev, cpu_freq, cpu_flags;
1996	char cpu_info[65];
1997
1998	get_model_name(c);	/* Same as AMD/Cyrix */
1999	display_cacheinfo(c);
2000
2001	/* Print CMS and CPU revision */
2002	max = cpuid_eax(0x80860000);
2003	if ( max >= 0x80860001 ) {
2004		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
2005		printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
2006		       (cpu_rev >> 24) & 0xff,
2007		       (cpu_rev >> 16) & 0xff,
2008		       (cpu_rev >> 8) & 0xff,
2009		       cpu_rev & 0xff,
2010		       cpu_freq);
2011	}
2012	if ( max >= 0x80860002 ) {
2013		cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
2014		printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
2015		       (cms_rev1 >> 24) & 0xff,
2016		       (cms_rev1 >> 16) & 0xff,
2017		       (cms_rev1 >> 8) & 0xff,
2018		       cms_rev1 & 0xff,
2019		       cms_rev2);
2020	}
2021	if ( max >= 0x80860006 ) {
2022		cpuid(0x80860003,
2023		      (void *)&cpu_info[0],
2024		      (void *)&cpu_info[4],
2025		      (void *)&cpu_info[8],
2026		      (void *)&cpu_info[12]);
2027		cpuid(0x80860004,
2028		      (void *)&cpu_info[16],
2029		      (void *)&cpu_info[20],
2030		      (void *)&cpu_info[24],
2031		      (void *)&cpu_info[28]);
2032		cpuid(0x80860005,
2033		      (void *)&cpu_info[32],
2034		      (void *)&cpu_info[36],
2035		      (void *)&cpu_info[40],
2036		      (void *)&cpu_info[44]);
2037		cpuid(0x80860006,
2038		      (void *)&cpu_info[48],
2039		      (void *)&cpu_info[52],
2040		      (void *)&cpu_info[56],
2041		      (void *)&cpu_info[60]);
2042		cpu_info[64] = '\0';
2043		printk(KERN_INFO "CPU: %s\n", cpu_info);
2044	}
2045
2046	/* Unhide possibly hidden capability flags */
2047	rdmsr(0x80860004, cap_mask, uk);
2048	wrmsr(0x80860004, ~0, uk);
2049	c->x86_capability[0] = cpuid_edx(0x00000001);
2050	wrmsr(0x80860004, cap_mask, uk);
2051
2052	/* If we can run i686 user-space code, call us an i686 */
2053#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
2054	if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
2055	     c->x86 = 6;
2056}
2057
2058
2059static void __init init_rise(struct cpuinfo_x86 *c)
2060{
2061	printk("CPU: Rise iDragon");
2062	if (c->x86_model > 2)
2063		printk(" II");
2064	printk("\n");
2065
2066	/* Unhide possibly hidden capability flags
2067	   The mp6 iDragon family don't have MSRs.
2068	   We switch on extra features with this cpuid weirdness: */
2069	__asm__ (
2070		"movl $0x6363452a, %%eax\n\t"
2071		"movl $0x3231206c, %%ecx\n\t"
2072		"movl $0x2a32313a, %%edx\n\t"
2073		"cpuid\n\t"
2074		"movl $0x63634523, %%eax\n\t"
2075		"movl $0x32315f6c, %%ecx\n\t"
2076		"movl $0x2333313a, %%edx\n\t"
2077		"cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
2078	);
2079	set_bit(X86_FEATURE_CX8, &c->x86_capability);
2080}
2081
2082
2083extern void trap_init_f00f_bug(void);
2084
2085#define LVL_1_INST      1
2086#define LVL_1_DATA      2
2087#define LVL_2           3
2088#define LVL_3           4
2089
2090struct _cache_table
2091{
2092        unsigned char descriptor;
2093        char cache_type;
2094        short size;
2095};
2096
2097/* all the cache descriptor types we care about (no TLB or trace cache entries) */
2098static struct _cache_table cache_table[] __initdata =
2099{
2100	{ 0x06, LVL_1_INST, 8 },
2101	{ 0x08, LVL_1_INST, 16 },
2102	{ 0x0A, LVL_1_DATA, 8 },
2103	{ 0x0C, LVL_1_DATA, 16 },
2104	{ 0x22, LVL_3,      512 },
2105	{ 0x23, LVL_3,      1024 },
2106	{ 0x25, LVL_3,      2048 },
2107	{ 0x29, LVL_3,      4096 },
2108	{ 0x41, LVL_2,      128 },
2109	{ 0x42, LVL_2,      256 },
2110	{ 0x43, LVL_2,      512 },
2111	{ 0x44, LVL_2,      1024 },
2112	{ 0x45, LVL_2,      2048 },
2113	{ 0x66, LVL_1_DATA, 8 },
2114	{ 0x67, LVL_1_DATA, 16 },
2115	{ 0x68, LVL_1_DATA, 32 },
2116	{ 0x79, LVL_2,      128 },
2117	{ 0x7A, LVL_2,      256 },
2118	{ 0x7B, LVL_2,      512 },
2119	{ 0x7C, LVL_2,      1024 },
2120	{ 0x82, LVL_2,      256 },
2121	{ 0x84, LVL_2,      1024 },
2122	{ 0x85, LVL_2,      2048 },
2123	{ 0x00, 0, 0}
2124};
2125
2126static void __init init_intel(struct cpuinfo_x86 *c)
2127{
2128	unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
2129	char *p = NULL;
2130#ifndef CONFIG_X86_F00F_WORKS_OK
2131	static int f00f_workaround_enabled = 0;
2132
2133	c->f00f_bug = 0;
2134	if (c->x86 == 5) {
2135		c->f00f_bug = 1;
2136		if (!f00f_workaround_enabled) {
2137			trap_init_f00f_bug();
2138			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
2139			f00f_workaround_enabled = 1;
2140		}
2141	}
2142#endif /* CONFIG_X86_F00F_WORKS_OK */
2143
2144	if (c->cpuid_level > 1) {
2145		/* supports eax=2  call */
2146		int i, j, n;
2147		int regs[4];
2148		unsigned char *dp = (unsigned char *)regs;
2149
2150		/* Number of times to iterate */
2151		n = cpuid_eax(2) & 0xFF;
2152
2153		for ( i = 0 ; i < n ; i++ ) {
2154			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
2155
2156			/* If bit 31 is set, this is an unknown format */
2157			for ( j = 0 ; j < 3 ; j++ ) {
2158				if ( regs[j] < 0 ) regs[j] = 0;
2159			}
2160
2161			/* Byte 0 is level count, not a descriptor */
2162			for ( j = 1 ; j < 16 ; j++ ) {
2163				unsigned char des = dp[j];
2164				unsigned char k = 0;
2165
2166				/* look up this descriptor in the table */
2167				while (cache_table[k].descriptor != 0)
2168				{
2169					if (cache_table[k].descriptor == des) {
2170						switch (cache_table[k].cache_type) {
2171						case LVL_1_INST:
2172							l1i += cache_table[k].size;
2173							break;
2174						case LVL_1_DATA:
2175							l1d += cache_table[k].size;
2176							break;
2177						case LVL_2:
2178							l2 += cache_table[k].size;
2179							break;
2180						case LVL_3:
2181							l3 += cache_table[k].size;
2182							break;
2183						}
2184
2185						break;
2186					}
2187
2188					k++;
2189				}
2190			}
2191		}
2192		if ( l1i || l1d )
2193			printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
2194			       l1i, l1d);
2195		if ( l2 )
2196			printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
2197		if ( l3 )
2198			printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
2199
2200		/*
2201		 * This assumes the L3 cache is shared; it typically lives in
2202		 * the northbridge.  The L1 caches are included by the L2
2203		 * cache, and so should not be included for the purpose of
2204		 * SMP switching weights.
2205		 */
2206		c->x86_cache_size = l2 ? l2 : (l1i+l1d);
2207	}
2208
2209	/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
2210	if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
2211		clear_bit(X86_FEATURE_SEP, &c->x86_capability);
2212
2213	/* Names for the Pentium II/Celeron processors
2214	   detectable only by also checking the cache size.
2215	   Dixon is NOT a Celeron. */
2216	if (c->x86 == 6) {
2217		switch (c->x86_model) {
2218		case 5:
2219			if (l2 == 0)
2220				p = "Celeron (Covington)";
2221			if (l2 == 256)
2222				p = "Mobile Pentium II (Dixon)";
2223			break;
2224
2225		case 6:
2226			if (l2 == 128)
2227				p = "Celeron (Mendocino)";
2228			break;
2229
2230		case 8:
2231			if (l2 == 128)
2232				p = "Celeron (Coppermine)";
2233			break;
2234		}
2235	}
2236
2237	if ( p )
2238		strcpy(c->x86_model_id, p);
2239
2240#ifdef CONFIG_SMP
2241	if (test_bit(X86_FEATURE_HT, &c->x86_capability) && !disable_x86_ht) {
2242		extern	int phys_proc_id[NR_CPUS];
2243
2244		u32 	eax, ebx, ecx, edx;
2245		int 	index_lsb, index_msb, tmp;
2246		int	initial_apic_id;
2247		int 	cpu = smp_processor_id();
2248
2249		cpuid(1, &eax, &ebx, &ecx, &edx);
2250		smp_num_siblings = (ebx & 0xff0000) >> 16;
2251
2252		if (smp_num_siblings == 1) {
2253			printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
2254		} else if (smp_num_siblings > 1 ) {
2255			index_lsb = 0;
2256			index_msb = 31;
2257			/*
2258			 * At this point we only support two siblings per
2259			 * processor package.
2260			 */
2261#define NR_SIBLINGS	2
2262			if (smp_num_siblings != NR_SIBLINGS) {
2263				printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
2264				smp_num_siblings = 1;
2265				return;
2266			}
2267			tmp = smp_num_siblings;
2268			while ((tmp & 1) == 0) {
2269				tmp >>=1 ;
2270				index_lsb++;
2271			}
2272			tmp = smp_num_siblings;
2273			while ((tmp & 0x80000000 ) == 0) {
2274				tmp <<=1 ;
2275				index_msb--;
2276			}
2277			if (index_lsb != index_msb )
2278				index_msb++;
2279			initial_apic_id = ebx >> 24 & 0xff;
2280			phys_proc_id[cpu] = initial_apic_id >> index_msb;
2281
2282			printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
2283                               phys_proc_id[cpu]);
2284		}
2285
2286	}
2287#endif
2288}
2289
2290void __init get_cpu_vendor(struct cpuinfo_x86 *c)
2291{
2292	char *v = c->x86_vendor_id;
2293
2294	if (!strcmp(v, "GenuineIntel"))
2295		c->x86_vendor = X86_VENDOR_INTEL;
2296	else if (!strcmp(v, "AuthenticAMD"))
2297		c->x86_vendor = X86_VENDOR_AMD;
2298	else if (!strcmp(v, "CyrixInstead"))
2299		c->x86_vendor = X86_VENDOR_CYRIX;
2300	else if (!strcmp(v, "Geode by NSC"))
2301		c->x86_vendor = X86_VENDOR_NSC;
2302	else if (!strcmp(v, "UMC UMC UMC "))
2303		c->x86_vendor = X86_VENDOR_UMC;
2304	else if (!strcmp(v, "CentaurHauls"))
2305		c->x86_vendor = X86_VENDOR_CENTAUR;
2306	else if (!strcmp(v, "NexGenDriven"))
2307		c->x86_vendor = X86_VENDOR_NEXGEN;
2308	else if (!strcmp(v, "RiseRiseRise"))
2309		c->x86_vendor = X86_VENDOR_RISE;
2310	else if (!strcmp(v, "GenuineTMx86") ||
2311		 !strcmp(v, "TransmetaCPU"))
2312		c->x86_vendor = X86_VENDOR_TRANSMETA;
2313	else
2314		c->x86_vendor = X86_VENDOR_UNKNOWN;
2315}
2316
2317struct cpu_model_info {
2318	int vendor;
2319	int family;
2320	char *model_names[16];
2321};
2322
2323/* Naming convention should be: <Name> [(<Codename>)] */
2324/* This table only is used unless init_<vendor>() below doesn't set it; */
2325/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
2326static struct cpu_model_info cpu_models[] __initdata = {
2327	{ X86_VENDOR_INTEL,	4,
2328	  { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL",
2329	    "486 SX/2", NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL,
2330	    NULL, NULL, NULL, NULL, NULL }},
2331	{ X86_VENDOR_INTEL,	5,
2332	  { "Pentium 60/66 A-step", "Pentium 60/66", "Pentium 75 - 200",
2333	    "OverDrive PODP5V83", "Pentium MMX", NULL, NULL,
2334	    "Mobile Pentium 75 - 200", "Mobile Pentium MMX", NULL, NULL, NULL,
2335	    NULL, NULL, NULL, NULL }},
2336	{ X86_VENDOR_INTEL,	6,
2337	  { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
2338	    NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
2339	    "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
2340	    "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
2341	{ X86_VENDOR_AMD,	4,
2342	  { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
2343	    "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
2344	    "Am5x86-WB" }},
2345	{ X86_VENDOR_AMD,	5, /* Is this this really necessary?? */
2346	  { "K5/SSA5", "K5",
2347	    "K5", "K5", NULL, NULL,
2348	    "K6", "K6", "K6-2",
2349	    "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
2350	{ X86_VENDOR_AMD,	6, /* Is this this really necessary?? */
2351	  { "Athlon", "Athlon",
2352	    "Athlon", NULL, "Athlon", NULL,
2353	    NULL, NULL, NULL,
2354	    NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2355	{ X86_VENDOR_UMC,	4,
2356	  { NULL, "U5D", "U5S", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2357	    NULL, NULL, NULL, NULL, NULL, NULL }},
2358	{ X86_VENDOR_NEXGEN,	5,
2359	  { "Nx586", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2360	    NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
2361	{ X86_VENDOR_RISE,	5,
2362	  { "iDragon", NULL, "iDragon", NULL, NULL, NULL, NULL,
2363	    NULL, "iDragon II", "iDragon II", NULL, NULL, NULL, NULL, NULL, NULL }},
2364};
2365
2366/* Look up CPU names by table lookup. */
2367static char __init *table_lookup_model(struct cpuinfo_x86 *c)
2368{
2369	struct cpu_model_info *info = cpu_models;
2370	int i;
2371
2372	if ( c->x86_model >= 16 )
2373		return NULL;	/* Range check */
2374
2375	for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
2376		if ( info->vendor == c->x86_vendor &&
2377		     info->family == c->x86 ) {
2378			return info->model_names[c->x86_model];
2379		}
2380		info++;
2381	}
2382	return NULL;		/* Not found */
2383}
2384
2385/*
2386 *	Detect a NexGen CPU running without BIOS hypercode new enough
2387 *	to have CPUID. (Thanks to Herbert Oppmann)
2388 */
2389
2390static int __init deep_magic_nexgen_probe(void)
2391{
2392	int ret;
2393
2394	__asm__ __volatile__ (
2395		"	movw	$0x5555, %%ax\n"
2396		"	xorw	%%dx,%%dx\n"
2397		"	movw	$2, %%cx\n"
2398		"	divw	%%cx\n"
2399		"	movl	$0, %%eax\n"
2400		"	jnz	1f\n"
2401		"	movl	$1, %%eax\n"
2402		"1:\n"
2403		: "=a" (ret) : : "cx", "dx" );
2404	return  ret;
2405}
2406
2407static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
2408{
2409	if( test_bit(X86_FEATURE_PN, &c->x86_capability) &&
2410	    disable_x86_serial_nr ) {
2411		/* Disable processor serial number */
2412		unsigned long lo,hi;
2413		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2414		lo |= 0x200000;
2415		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
2416		printk(KERN_NOTICE "CPU serial number disabled.\n");
2417		clear_bit(X86_FEATURE_PN, &c->x86_capability);
2418
2419		/* Disabling the serial number may affect the cpuid level */
2420		c->cpuid_level = cpuid_eax(0);
2421	}
2422}
2423
2424
2425static int __init x86_serial_nr_setup(char *s)
2426{
2427	disable_x86_serial_nr = 0;
2428	return 1;
2429}
2430__setup("serialnumber", x86_serial_nr_setup);
2431
2432static int __init x86_fxsr_setup(char * s)
2433{
2434	set_bit(X86_FEATURE_XMM, disabled_x86_caps);
2435	set_bit(X86_FEATURE_FXSR, disabled_x86_caps);
2436	return 1;
2437}
2438__setup("nofxsr", x86_fxsr_setup);
2439
2440
2441/* Standard macro to see if a specific flag is changeable */
2442static inline int flag_is_changeable_p(u32 flag)
2443{
2444	u32 f1, f2;
2445
2446	asm("pushfl\n\t"
2447	    "pushfl\n\t"
2448	    "popl %0\n\t"
2449	    "movl %0,%1\n\t"
2450	    "xorl %2,%0\n\t"
2451	    "pushl %0\n\t"
2452	    "popfl\n\t"
2453	    "pushfl\n\t"
2454	    "popl %0\n\t"
2455	    "popfl\n\t"
2456	    : "=&r" (f1), "=&r" (f2)
2457	    : "ir" (flag));
2458
2459	return ((f1^f2) & flag) != 0;
2460}
2461
2462
2463/* Probe for the CPUID instruction */
2464static int __init have_cpuid_p(void)
2465{
2466	return flag_is_changeable_p(X86_EFLAGS_ID);
2467}
2468
2469/*
2470 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
2471 * by the fact that they preserve the flags across the division of 5/2.
2472 * PII and PPro exhibit this behavior too, but they have cpuid available.
2473 */
2474
2475/*
2476 * Perform the Cyrix 5/2 test. A Cyrix won't change
2477 * the flags, while other 486 chips will.
2478 */
2479static inline int test_cyrix_52div(void)
2480{
2481	unsigned int test;
2482
2483	__asm__ __volatile__(
2484	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
2485	     "div %b2\n\t"	/* divide 5 by 2 */
2486	     "lahf"		/* store flags into %ah */
2487	     : "=a" (test)
2488	     : "0" (5), "q" (2)
2489	     : "cc");
2490
2491	/* AH is 0x02 on Cyrix after the divide.. */
2492	return (unsigned char) (test >> 8) == 0x02;
2493}
2494
2495/* Try to detect a CPU with disabled CPUID, and if so, enable.  This routine
2496   may also be used to detect non-CPUID processors and fill in some of
2497   the information manually. */
2498static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
2499{
2500	/* First of all, decide if this is a 486 or higher */
2501	/* It's a 486 if we can modify the AC flag */
2502	if ( flag_is_changeable_p(X86_EFLAGS_AC) )
2503		c->x86 = 4;
2504	else
2505		c->x86 = 3;
2506
2507	/* Detect Cyrix with disabled CPUID */
2508	if ( c->x86 == 4 && test_cyrix_52div() ) {
2509		unsigned char dir0, dir1;
2510
2511		strcpy(c->x86_vendor_id, "CyrixInstead");
2512	        c->x86_vendor = X86_VENDOR_CYRIX;
2513
2514	        /* Actually enable cpuid on the older cyrix */
2515
2516	    	/* Retrieve CPU revisions */
2517
2518		do_cyrix_devid(&dir0, &dir1);
2519
2520		dir0>>=4;
2521
2522		/* Check it is an affected model */
2523
2524   	        if (dir0 == 5 || dir0 == 3)
2525   	        {
2526			unsigned char ccr3, ccr4;
2527			unsigned long flags;
2528			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
2529			local_irq_save(flags);
2530			ccr3 = getCx86(CX86_CCR3);
2531			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
2532			ccr4 = getCx86(CX86_CCR4);
2533			setCx86(CX86_CCR4, ccr4 | 0x80);          /* enable cpuid  */
2534			setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
2535			local_irq_restore(flags);
2536		}
2537	} else
2538
2539	/* Detect NexGen with old hypercode */
2540	if ( deep_magic_nexgen_probe() ) {
2541		strcpy(c->x86_vendor_id, "NexGenDriven");
2542	}
2543
2544	return have_cpuid_p();	/* Check to see if CPUID now enabled? */
2545}
2546
2547/*
2548 * This does the hard work of actually picking apart the CPU stuff...
2549 */
2550void __init identify_cpu(struct cpuinfo_x86 *c)
2551{
2552	int junk, i;
2553	u32 xlvl, tfms;
2554
2555	c->loops_per_jiffy = loops_per_jiffy;
2556	c->x86_cache_size = -1;
2557	c->x86_vendor = X86_VENDOR_UNKNOWN;
2558	c->cpuid_level = -1;	/* CPUID not detected */
2559	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
2560	c->x86_vendor_id[0] = '\0'; /* Unset */
2561	c->x86_model_id[0] = '\0';  /* Unset */
2562	memset(&c->x86_capability, 0, sizeof c->x86_capability);
2563
2564	if ( !have_cpuid_p() && !id_and_try_enable_cpuid(c) ) {
2565		/* CPU doesn't have CPUID */
2566
2567		/* If there are any capabilities, they're vendor-specific */
2568		/* enable_cpuid() would have set c->x86 for us. */
2569	} else {
2570		/* CPU does have CPUID */
2571
2572		/* Get vendor name */
2573		cpuid(0x00000000, &c->cpuid_level,
2574		      (int *)&c->x86_vendor_id[0],
2575		      (int *)&c->x86_vendor_id[8],
2576		      (int *)&c->x86_vendor_id[4]);
2577
2578		get_cpu_vendor(c);
2579		/* Initialize the standard set of capabilities */
2580		/* Note that the vendor-specific code below might override */
2581
2582		/* Intel-defined flags: level 0x00000001 */
2583		if ( c->cpuid_level >= 0x00000001 ) {
2584			cpuid(0x00000001, &tfms, &junk, &junk,
2585			      &c->x86_capability[0]);
2586			c->x86 = (tfms >> 8) & 15;
2587			c->x86_model = (tfms >> 4) & 15;
2588			c->x86_mask = tfms & 15;
2589		} else {
2590			/* Have CPUID level 0 only - unheard of */
2591			c->x86 = 4;
2592		}
2593
2594		/* AMD-defined flags: level 0x80000001 */
2595		xlvl = cpuid_eax(0x80000000);
2596		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
2597			if ( xlvl >= 0x80000001 )
2598				c->x86_capability[1] = cpuid_edx(0x80000001);
2599			if ( xlvl >= 0x80000004 )
2600				get_model_name(c); /* Default name */
2601		}
2602
2603		/* Transmeta-defined flags: level 0x80860001 */
2604		xlvl = cpuid_eax(0x80860000);
2605		if ( (xlvl & 0xffff0000) == 0x80860000 ) {
2606			if (  xlvl >= 0x80860001 )
2607				c->x86_capability[2] = cpuid_edx(0x80860001);
2608		}
2609	}
2610
2611	/*
2612	 * Vendor-specific initialization.  In this section we
2613	 * canonicalize the feature flags, meaning if there are
2614	 * features a certain CPU supports which CPUID doesn't
2615	 * tell us, CPUID claiming incorrect flags, or other bugs,
2616	 * we handle them here.
2617	 *
2618	 * At the end of this section, c->x86_capability better
2619	 * indicate the features this CPU genuinely supports!
2620	 */
2621	switch ( c->x86_vendor ) {
2622	case X86_VENDOR_UNKNOWN:
2623	default:
2624		/* Not much we can do here... */
2625		/* Check if at least it has cpuid */
2626		if (c->cpuid_level == -1)
2627		{
2628			/* No cpuid. It must be an ancient CPU */
2629			if (c->x86 == 4)
2630				strcpy(c->x86_model_id, "486");
2631			else if (c->x86 == 3)
2632				strcpy(c->x86_model_id, "386");
2633		}
2634		break;
2635
2636	case X86_VENDOR_CYRIX:
2637		init_cyrix(c);
2638		break;
2639
2640	case X86_VENDOR_NSC:
2641	        init_cyrix(c);
2642		break;
2643
2644	case X86_VENDOR_AMD:
2645		init_amd(c);
2646		break;
2647
2648	case X86_VENDOR_CENTAUR:
2649		init_centaur(c);
2650		break;
2651
2652	case X86_VENDOR_INTEL:
2653		init_intel(c);
2654		break;
2655
2656	case X86_VENDOR_NEXGEN:
2657		c->x86_cache_size = 256; /* A few had 1 MB... */
2658		break;
2659
2660	case X86_VENDOR_TRANSMETA:
2661		init_transmeta(c);
2662		break;
2663
2664	case X86_VENDOR_RISE:
2665		init_rise(c);
2666		break;
2667	}
2668
2669	/*
2670	 * The vendor-specific functions might have changed features.  Now
2671	 * we do "generic changes."
2672	 */
2673
2674	/* TSC disabled? */
2675#ifndef CONFIG_X86_TSC
2676	if ( tsc_disable )
2677		clear_bit(X86_FEATURE_TSC, &c->x86_capability);
2678#endif
2679
2680	/* check for caps that have been disabled earlier */
2681	for (i = 0; i < NCAPINTS; i++) {
2682	     c->x86_capability[i] &= ~disabled_x86_caps[i];
2683	}
2684
2685	/* Disable the PN if appropriate */
2686	squash_the_stupid_serial_number(c);
2687
2688	/* Init Machine Check Exception if available. */
2689	mcheck_init(c);
2690
2691	/* If the model name is still unset, do table lookup. */
2692	if ( !c->x86_model_id[0] ) {
2693		char *p;
2694		p = table_lookup_model(c);
2695		if ( p )
2696			strcpy(c->x86_model_id, p);
2697		else
2698			/* Last resort... */
2699			sprintf(c->x86_model_id, "%02x/%02x",
2700				c->x86_vendor, c->x86_model);
2701	}
2702
2703	/* Now the feature flags better reflect actual CPU features! */
2704
2705	printk(KERN_DEBUG "CPU:     After generic, caps: %08x %08x %08x %08x\n",
2706	       c->x86_capability[0],
2707	       c->x86_capability[1],
2708	       c->x86_capability[2],
2709	       c->x86_capability[3]);
2710
2711	/*
2712	 * On SMP, boot_cpu_data holds the common feature set between
2713	 * all CPUs; so make sure that we indicate which features are
2714	 * common between the CPUs.  The first time this routine gets
2715	 * executed, c == &boot_cpu_data.
2716	 */
2717	if ( c != &boot_cpu_data ) {
2718		/* AND the already accumulated flags with these */
2719		for ( i = 0 ; i < NCAPINTS ; i++ )
2720			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2721	}
2722
2723	printk(KERN_DEBUG "CPU:             Common caps: %08x %08x %08x %08x\n",
2724	       boot_cpu_data.x86_capability[0],
2725	       boot_cpu_data.x86_capability[1],
2726	       boot_cpu_data.x86_capability[2],
2727	       boot_cpu_data.x86_capability[3]);
2728}
2729/*
2730 *	Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
2731 */
2732
2733void __init dodgy_tsc(void)
2734{
2735	get_cpu_vendor(&boot_cpu_data);
2736
2737	if ( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ||
2738	     boot_cpu_data.x86_vendor == X86_VENDOR_NSC )
2739		init_cyrix(&boot_cpu_data);
2740}
2741
2742
2743/* These need to match <asm/processor.h> */
2744static char *cpu_vendor_names[] __initdata = {
2745	"Intel", "Cyrix", "AMD", "UMC", "NexGen",
2746	"Centaur", "Rise", "Transmeta", "NSC"
2747};
2748
2749
2750void __init print_cpu_info(struct cpuinfo_x86 *c)
2751{
2752	char *vendor = NULL;
2753
2754	if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
2755		vendor = cpu_vendor_names[c->x86_vendor];
2756	else if (c->cpuid_level >= 0)
2757		vendor = c->x86_vendor_id;
2758
2759	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
2760		printk("%s ", vendor);
2761
2762	if (!c->x86_model_id[0])
2763		printk("%d86", c->x86);
2764	else
2765		printk("%s", c->x86_model_id);
2766
2767	if (c->x86_mask || c->cpuid_level >= 0)
2768		printk(" stepping %02x\n", c->x86_mask);
2769	else
2770		printk("\n");
2771}
2772
2773/*
2774 *	Get CPU information for use by the procfs.
2775 */
2776static int show_cpuinfo(struct seq_file *m, void *v)
2777{
2778	/*
2779	 * These flag bits must match the definitions in <asm/cpufeature.h>.
2780	 * NULL means this bit is undefined or reserved; either way it doesn't
2781	 * have meaning as far as Linux is concerned.  Note that it's important
2782	 * to realize there is a difference between this table and CPUID -- if
2783	 * applications want to get the raw CPUID data, they should access
2784	 * /dev/cpu/<cpu_nr>/cpuid instead.
2785	 */
2786	static char *x86_cap_flags[] = {
2787		/* Intel-defined */
2788	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
2789	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
2790	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
2791	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
2792
2793		/* AMD-defined */
2794		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2795		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
2796		NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL,
2797		NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
2798
2799		/* Transmeta-defined */
2800		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
2801		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2802		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2803		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2804
2805		/* Other (Linux-defined) */
2806		"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
2807		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2808		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2809		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2810	};
2811	struct cpuinfo_x86 *c = v;
2812	int i, n = c - cpu_data;
2813	int fpu_exception;
2814
2815#ifdef CONFIG_SMP
2816	if (!(cpu_online_map & (1<<n)))
2817		return 0;
2818#endif
2819	seq_printf(m, "processor\t: %d\n"
2820		"vendor_id\t: %s\n"
2821		"cpu family\t: %d\n"
2822		"model\t\t: %d\n"
2823		"model name\t: %s\n",
2824		n,
2825		c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
2826		c->x86,
2827		c->x86_model,
2828		c->x86_model_id[0] ? c->x86_model_id : "unknown");
2829
2830	if (c->x86_mask || c->cpuid_level >= 0)
2831		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
2832	else
2833		seq_printf(m, "stepping\t: unknown\n");
2834
2835	if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
2836		seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
2837			cpu_khz / 1000, (cpu_khz % 1000));
2838	}
2839
2840	/* Cache size */
2841	if (c->x86_cache_size >= 0)
2842		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
2843
2844	/* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
2845	fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
2846	seq_printf(m, "fdiv_bug\t: %s\n"
2847			"hlt_bug\t\t: %s\n"
2848			"f00f_bug\t: %s\n"
2849			"coma_bug\t: %s\n"
2850			"fpu\t\t: %s\n"
2851			"fpu_exception\t: %s\n"
2852			"cpuid level\t: %d\n"
2853			"wp\t\t: %s\n"
2854			"flags\t\t:",
2855		     c->fdiv_bug ? "yes" : "no",
2856		     c->hlt_works_ok ? "no" : "yes",
2857		     c->f00f_bug ? "yes" : "no",
2858		     c->coma_bug ? "yes" : "no",
2859		     c->hard_math ? "yes" : "no",
2860		     fpu_exception ? "yes" : "no",
2861		     c->cpuid_level,
2862		     c->wp_works_ok ? "yes" : "no");
2863
2864	for ( i = 0 ; i < 32*NCAPINTS ; i++ )
2865		if ( test_bit(i, &c->x86_capability) &&
2866		     x86_cap_flags[i] != NULL )
2867			seq_printf(m, " %s", x86_cap_flags[i]);
2868
2869	seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
2870		     c->loops_per_jiffy/(500000/HZ),
2871		     (c->loops_per_jiffy/(5000/HZ)) % 100);
2872	return 0;
2873}
2874
2875static void *c_start(struct seq_file *m, loff_t *pos)
2876{
2877	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
2878}
2879static void *c_next(struct seq_file *m, void *v, loff_t *pos)
2880{
2881	++*pos;
2882	return c_start(m, pos);
2883}
2884static void c_stop(struct seq_file *m, void *v)
2885{
2886}
2887struct seq_operations cpuinfo_op = {
2888	start:	c_start,
2889	next:	c_next,
2890	stop:	c_stop,
2891	show:	show_cpuinfo,
2892};
2893
2894unsigned long cpu_initialized __initdata = 0;
2895
2896/*
2897 * cpu_init() initializes state that is per-CPU. Some data is already
2898 * initialized (naturally) in the bootstrap process, such as the GDT
2899 * and IDT. We reload them nevertheless, this function acts as a
2900 * 'CPU state barrier', nothing should get across.
2901 */
2902void __init cpu_init (void)
2903{
2904	int nr = smp_processor_id();
2905	struct tss_struct * t = &init_tss[nr];
2906
2907	if (test_and_set_bit(nr, &cpu_initialized)) {
2908		printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
2909		for (;;) __sti();
2910	}
2911	printk(KERN_INFO "Initializing CPU#%d\n", nr);
2912
2913	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
2914		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2915#ifndef CONFIG_X86_TSC
2916	if (tsc_disable && cpu_has_tsc) {
2917		printk(KERN_NOTICE "Disabling TSC...\n");
2918		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
2919		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
2920		set_in_cr4(X86_CR4_TSD);
2921	}
2922#endif
2923
2924	__asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
2925	__asm__ __volatile__("lidt %0": "=m" (idt_descr));
2926
2927	/*
2928	 * Delete NT
2929	 */
2930	__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
2931
2932	/*
2933	 * set up and load the per-CPU TSS and LDT
2934	 */
2935	atomic_inc(&init_mm.mm_count);
2936	current->active_mm = &init_mm;
2937	if(current->mm)
2938		BUG();
2939	enter_lazy_tlb(&init_mm, current, nr);
2940
2941	t->esp0 = current->thread.esp0;
2942	set_tss_desc(nr,t);
2943	gdt_table[__TSS(nr)].b &= 0xfffffdff;
2944	load_TR(nr);
2945	load_LDT(&init_mm);
2946
2947	/*
2948	 * Clear all 6 debug registers:
2949	 */
2950
2951#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
2952
2953	CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
2954
2955#undef CD
2956
2957	/*
2958	 * Force FPU initialization:
2959	 */
2960	current->flags &= ~PF_USEDFPU;
2961	current->used_math = 0;
2962	stts();
2963}
2964
2965/*
2966 *	Early probe support logic for ppro memory erratum #50
2967 *
2968 *	This is called before we do cpu ident work
2969 */
2970
2971int __init ppro_with_ram_bug(void)
2972{
2973	char vendor_id[16];
2974	int ident;
2975
2976	/* Must have CPUID */
2977	if(!have_cpuid_p())
2978		return 0;
2979	if(cpuid_eax(0)<1)
2980		return 0;
2981
2982	/* Must be Intel */
2983	cpuid(0, &ident,
2984		(int *)&vendor_id[0],
2985		(int *)&vendor_id[8],
2986		(int *)&vendor_id[4]);
2987
2988	if(memcmp(vendor_id, "IntelInside", 12))
2989		return 0;
2990
2991	ident = cpuid_eax(1);
2992
2993	/* Model 6 */
2994
2995	if(((ident>>8)&15)!=6)
2996		return 0;
2997
2998	/* Pentium Pro */
2999
3000	if(((ident>>4)&15)!=1)
3001		return 0;
3002
3003	if((ident&15) < 8)
3004	{
3005		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
3006		return 1;
3007	}
3008	printk(KERN_INFO "Your Pentium Pro seems ok.\n");
3009	return 0;
3010}
3011
3012/*
3013 * Local Variables:
3014 * mode:c
3015 * c-file-style:"k&r"
3016 * c-basic-offset:8
3017 * End:
3018 */
3019