1/*
2 * include/asm-i386/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_I386_PROCESSOR_H
8#define __ASM_I386_PROCESSOR_H
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/page.h>
14#include <asm/types.h>
15#include <asm/sigcontext.h>
16#include <asm/cpufeature.h>
17#include <linux/cache.h>
18#include <linux/config.h>
19#include <linux/threads.h>
20
21/*
22 * Default implementation of macro that returns current
23 * instruction pointer ("program counter").
24 */
25#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
26
27/*
28 *  CPU type and hardware bug flags. Kept separately for each CPU.
29 *  Members of this structure are referenced in head.S, so think twice
30 *  before touching them. [mj]
31 */
32
33struct cpuinfo_x86 {
34	__u8	x86;		/* CPU family */
35	__u8	x86_vendor;	/* CPU vendor */
36	__u8	x86_model;
37	__u8	x86_mask;
38	char	wp_works_ok;	/* It doesn't on 386's */
39	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
40	char	hard_math;
41	char	rfu;
42       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
43	__u32	x86_capability[NCAPINTS];
44	char	x86_vendor_id[16];
45	char	x86_model_id[64];
46	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
47				    call  */
48	int	fdiv_bug;
49	int	f00f_bug;
50	int	coma_bug;
51	unsigned long loops_per_jiffy;
52	unsigned long *pgd_quick;
53	unsigned long *pmd_quick;
54	unsigned long *pte_quick;
55	unsigned long pgtable_cache_sz;
56} __attribute__((__aligned__(SMP_CACHE_BYTES)));
57
58#define X86_VENDOR_INTEL 0
59#define X86_VENDOR_CYRIX 1
60#define X86_VENDOR_AMD 2
61#define X86_VENDOR_UMC 3
62#define X86_VENDOR_NEXGEN 4
63#define X86_VENDOR_CENTAUR 5
64#define X86_VENDOR_RISE 6
65#define X86_VENDOR_TRANSMETA 7
66#define X86_VENDOR_NSC 8
67#define X86_VENDOR_UNKNOWN 0xff
68
69/*
70 * capabilities of CPUs
71 */
72
73extern struct cpuinfo_x86 boot_cpu_data;
74extern struct tss_struct init_tss[NR_CPUS];
75
76#ifdef CONFIG_SMP
77extern struct cpuinfo_x86 cpu_data[];
78#define current_cpu_data cpu_data[smp_processor_id()]
79#else
80#define cpu_data (&boot_cpu_data)
81#define current_cpu_data boot_cpu_data
82#endif
83
84#define cpu_has_pge	(test_bit(X86_FEATURE_PGE,  boot_cpu_data.x86_capability))
85#define cpu_has_pse	(test_bit(X86_FEATURE_PSE,  boot_cpu_data.x86_capability))
86#define cpu_has_pae	(test_bit(X86_FEATURE_PAE,  boot_cpu_data.x86_capability))
87#define cpu_has_tsc	(test_bit(X86_FEATURE_TSC,  boot_cpu_data.x86_capability))
88#define cpu_has_de	(test_bit(X86_FEATURE_DE,   boot_cpu_data.x86_capability))
89#define cpu_has_vme	(test_bit(X86_FEATURE_VME,  boot_cpu_data.x86_capability))
90#define cpu_has_fxsr	(test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability))
91#define cpu_has_xmm	(test_bit(X86_FEATURE_XMM,  boot_cpu_data.x86_capability))
92#define cpu_has_fpu	(test_bit(X86_FEATURE_FPU,  boot_cpu_data.x86_capability))
93#define cpu_has_msr     (test_bit(X86_FEATURE_MSR,  boot_cpu_data.x86_capability))
94#define cpu_has_apic	(test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability))
95
96extern char ignore_irq13;
97
98extern void identify_cpu(struct cpuinfo_x86 *);
99extern void print_cpu_info(struct cpuinfo_x86 *);
100extern void dodgy_tsc(void);
101
102/*
103 * EFLAGS bits
104 */
105#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
106#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
107#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
108#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
109#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
110#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
111#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
112#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
113#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
114#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
115#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
116#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
117#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
118#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
119#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
120#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
121#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
122
123/*
124 * Generic CPUID function
125 */
126static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
127{
128	__asm__("cpuid"
129		: "=a" (*eax),
130		  "=b" (*ebx),
131		  "=c" (*ecx),
132		  "=d" (*edx)
133		: "0" (op));
134}
135
136/*
137 * CPUID functions returning a single datum
138 */
139static inline unsigned int cpuid_eax(unsigned int op)
140{
141	unsigned int eax;
142
143	__asm__("cpuid"
144		: "=a" (eax)
145		: "0" (op)
146		: "bx", "cx", "dx");
147	return eax;
148}
149static inline unsigned int cpuid_ebx(unsigned int op)
150{
151	unsigned int eax, ebx;
152
153	__asm__("cpuid"
154		: "=a" (eax), "=b" (ebx)
155		: "0" (op)
156		: "cx", "dx" );
157	return ebx;
158}
159static inline unsigned int cpuid_ecx(unsigned int op)
160{
161	unsigned int eax, ecx;
162
163	__asm__("cpuid"
164		: "=a" (eax), "=c" (ecx)
165		: "0" (op)
166		: "bx", "dx" );
167	return ecx;
168}
169static inline unsigned int cpuid_edx(unsigned int op)
170{
171	unsigned int eax, edx;
172
173	__asm__("cpuid"
174		: "=a" (eax), "=d" (edx)
175		: "0" (op)
176		: "bx", "cx");
177	return edx;
178}
179
180/*
181 * Intel CPU features in CR4
182 */
183#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
184#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
185#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
186#define X86_CR4_DE		0x0008	/* enable debugging extensions */
187#define X86_CR4_PSE		0x0010	/* enable page size extensions */
188#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
189#define X86_CR4_MCE		0x0040	/* Machine check enable */
190#define X86_CR4_PGE		0x0080	/* enable global pages */
191#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
192#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
193#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
194
195#define load_cr3(pgdir) \
196	asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
197
198/*
199 * Save the cr4 feature set we're using (ie
200 * Pentium 4MB enable and PPro Global page
201 * enable), so that any CPU's that boot up
202 * after us can get the correct flags.
203 */
204extern unsigned long mmu_cr4_features;
205
206static inline void set_in_cr4 (unsigned long mask)
207{
208	mmu_cr4_features |= mask;
209	__asm__("movl %%cr4,%%eax\n\t"
210		"orl %0,%%eax\n\t"
211		"movl %%eax,%%cr4\n"
212		: : "irg" (mask)
213		:"ax");
214}
215
216static inline void clear_in_cr4 (unsigned long mask)
217{
218	mmu_cr4_features &= ~mask;
219	__asm__("movl %%cr4,%%eax\n\t"
220		"andl %0,%%eax\n\t"
221		"movl %%eax,%%cr4\n"
222		: : "irg" (~mask)
223		:"ax");
224}
225
226/*
227 *      Cyrix CPU configuration register indexes
228 */
229#define CX86_CCR0 0xc0
230#define CX86_CCR1 0xc1
231#define CX86_CCR2 0xc2
232#define CX86_CCR3 0xc3
233#define CX86_CCR4 0xe8
234#define CX86_CCR5 0xe9
235#define CX86_CCR6 0xea
236#define CX86_CCR7 0xeb
237#define CX86_DIR0 0xfe
238#define CX86_DIR1 0xff
239#define CX86_ARR_BASE 0xc4
240#define CX86_RCR_BASE 0xdc
241
242/*
243 *      Cyrix CPU indexed register access macros
244 */
245
246#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
247
248#define setCx86(reg, data) do { \
249	outb((reg), 0x22); \
250	outb((data), 0x23); \
251} while (0)
252
253/*
254 * Bus types (default is ISA, but people can check others with these..)
255 */
256#ifdef CONFIG_EISA
257extern int EISA_bus;
258#else
259#define EISA_bus (0)
260#endif
261extern int MCA_bus;
262
263/* from system description table in BIOS.  Mostly for MCA use, but
264others may find it useful. */
265extern unsigned int machine_id;
266extern unsigned int machine_submodel_id;
267extern unsigned int BIOS_revision;
268extern unsigned int mca_pentium_flag;
269
270/*
271 * User space process size: 3GB (default).
272 */
273#define TASK_SIZE	(PAGE_OFFSET)
274
275/* This decides where the kernel will search for a free chunk of vm
276 * space during mmap's.
277 */
278#define TASK_UNMAPPED_BASE	(TASK_SIZE / 3)
279
280/*
281 * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
282 */
283#define IO_BITMAP_SIZE	32
284#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
285#define INVALID_IO_BITMAP_OFFSET 0x8000
286
287struct i387_fsave_struct {
288	long	cwd;
289	long	swd;
290	long	twd;
291	long	fip;
292	long	fcs;
293	long	foo;
294	long	fos;
295	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
296	long	status;		/* software status information */
297};
298
299struct i387_fxsave_struct {
300	unsigned short	cwd;
301	unsigned short	swd;
302	unsigned short	twd;
303	unsigned short	fop;
304	long	fip;
305	long	fcs;
306	long	foo;
307	long	fos;
308	long	mxcsr;
309	long	reserved;
310	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
311	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
312	long	padding[56];
313} __attribute__ ((aligned (16)));
314
315struct i387_soft_struct {
316	long	cwd;
317	long	swd;
318	long	twd;
319	long	fip;
320	long	fcs;
321	long	foo;
322	long	fos;
323	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
324	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
325	struct info	*info;
326	unsigned long	entry_eip;
327};
328
329union i387_union {
330	struct i387_fsave_struct	fsave;
331	struct i387_fxsave_struct	fxsave;
332	struct i387_soft_struct soft;
333};
334
335typedef struct {
336	unsigned long seg;
337} mm_segment_t;
338
339struct tss_struct {
340	unsigned short	back_link,__blh;
341	unsigned long	esp0;
342	unsigned short	ss0,__ss0h;
343	unsigned long	esp1;
344	unsigned short	ss1,__ss1h;
345	unsigned long	esp2;
346	unsigned short	ss2,__ss2h;
347	unsigned long	__cr3;
348	unsigned long	eip;
349	unsigned long	eflags;
350	unsigned long	eax,ecx,edx,ebx;
351	unsigned long	esp;
352	unsigned long	ebp;
353	unsigned long	esi;
354	unsigned long	edi;
355	unsigned short	es, __esh;
356	unsigned short	cs, __csh;
357	unsigned short	ss, __ssh;
358	unsigned short	ds, __dsh;
359	unsigned short	fs, __fsh;
360	unsigned short	gs, __gsh;
361	unsigned short	ldt, __ldth;
362	unsigned short	trace, bitmap;
363	unsigned long	io_bitmap[IO_BITMAP_SIZE+1];
364	/*
365	 * pads the TSS to be cacheline-aligned (size is 0x100)
366	 */
367	unsigned long __cacheline_filler[5];
368};
369
370struct thread_struct {
371	unsigned long	esp0;
372	unsigned long	eip;
373	unsigned long	esp;
374	unsigned long	fs;
375	unsigned long	gs;
376/* Hardware debugging registers */
377	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
378/* fault info */
379	unsigned long	cr2, trap_no, error_code;
380/* floating point info */
381	union i387_union	i387;
382/* virtual 86 mode info */
383	struct vm86_struct	* vm86_info;
384	unsigned long		screen_bitmap;
385	unsigned long		v86flags, v86mask, saved_esp0;
386/* IO permissions */
387	int		ioperm;
388	unsigned long	io_bitmap[IO_BITMAP_SIZE+1];
389};
390
391#define INIT_THREAD  {						\
392	0,							\
393	0, 0, 0, 0, 						\
394	{ [0 ... 7] = 0 },	/* debugging registers */	\
395	0, 0, 0,						\
396	{ { 0, }, },		/* 387 state */			\
397	0,0,0,0,0,						\
398	0,{~0,}			/* io permissions */		\
399}
400
401#define INIT_TSS  {						\
402	0,0, /* back_link, __blh */				\
403	sizeof(init_stack) + (long) &init_stack, /* esp0 */	\
404	__KERNEL_DS, 0, /* ss0 */				\
405	0,0,0,0,0,0, /* stack1, stack2 */			\
406	0, /* cr3 */						\
407	0,0, /* eip,eflags */					\
408	0,0,0,0, /* eax,ecx,edx,ebx */				\
409	0,0,0,0, /* esp,ebp,esi,edi */				\
410	0,0,0,0,0,0, /* es,cs,ss */				\
411	0,0,0,0,0,0, /* ds,fs,gs */				\
412	__LDT(0),0, /* ldt */					\
413	0, INVALID_IO_BITMAP_OFFSET, /* tace, bitmap */		\
414	{~0, } /* ioperm */					\
415}
416
417#define start_thread(regs, new_eip, new_esp) do {		\
418	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
419	set_fs(USER_DS);					\
420	regs->xds = __USER_DS;					\
421	regs->xes = __USER_DS;					\
422	regs->xss = __USER_DS;					\
423	regs->xcs = __USER_CS;					\
424	regs->eip = new_eip;					\
425	regs->esp = new_esp;					\
426} while (0)
427
428/* Forward declaration, a strange C thing */
429struct task_struct;
430struct mm_struct;
431
432/* Free all resources held by a thread. */
433extern void release_thread(struct task_struct *);
434/*
435 * create a kernel thread without removing it from tasklists
436 */
437extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
438
439/* Copy and release all segment info associated with a VM */
440extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
441extern void release_segments(struct mm_struct * mm);
442
443/*
444 * Return saved PC of a blocked thread.
445 */
446static inline unsigned long thread_saved_pc(struct thread_struct *t)
447{
448	return ((unsigned long *)t->esp)[3];
449}
450
451unsigned long get_wchan(struct task_struct *p);
452#define KSTK_EIP(tsk)	(((unsigned long *)(4096+(unsigned long)(tsk)))[1019])
453#define KSTK_ESP(tsk)	(((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
454
455#define THREAD_SIZE (2*PAGE_SIZE)
456#define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
457#define free_task_struct(p) free_pages((unsigned long) (p), 1)
458#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
459
460#define init_task	(init_task_union.task)
461#define init_stack	(init_task_union.stack)
462
463struct microcode {
464	unsigned int hdrver;
465	unsigned int rev;
466	unsigned int date;
467	unsigned int sig;
468	unsigned int cksum;
469	unsigned int ldrver;
470	unsigned int pf;
471	unsigned int reserved[5];
472	unsigned int bits[500];
473};
474
475/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
476#define MICROCODE_IOCFREE	_IO('6',0)
477
478/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
479static inline void rep_nop(void)
480{
481	__asm__ __volatile__("rep;nop");
482}
483
484#define cpu_relax()	rep_nop()
485
486/* Prefetch instructions for Pentium III and AMD Athlon */
487#ifdef 	CONFIG_MPENTIUMIII
488
489#define ARCH_HAS_PREFETCH
490extern inline void prefetch(const void *x)
491{
492	__asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
493}
494
495#elif CONFIG_X86_USE_3DNOW
496
497#define ARCH_HAS_PREFETCH
498#define ARCH_HAS_PREFETCHW
499#define ARCH_HAS_SPINLOCK_PREFETCH
500
501extern inline void prefetch(const void *x)
502{
503	 __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
504}
505
506extern inline void prefetchw(const void *x)
507{
508	 __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
509}
510#define spin_lock_prefetch(x)	prefetchw(x)
511
512#endif
513
514#endif /* __ASM_I386_PROCESSOR_H */
515