1/*
2 * IA32 helper functions
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 *	David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 thread context
10 * 02/19/01	D. Mosberger	dropped tssd; it's not needed
11 * 09/14/01	D. Mosberger	fixed memory management for gdt/tss page
12 * 09/29/01	D. Mosberger	added ia32_load_segment_descriptors()
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/personality.h>
19#include <linux/sched.h>
20
21#include <asm/intrinsics.h>
22#include <asm/page.h>
23#include <asm/pgtable.h>
24#include <asm/system.h>
25#include <asm/processor.h>
26#include <asm/uaccess.h>
27
28#include "ia32priv.h"
29
30extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
31
32struct exec_domain ia32_exec_domain;
33struct page *ia32_shared_page[NR_CPUS];
34unsigned long *ia32_boot_gdt;
35unsigned long *cpu_gdt_table[NR_CPUS];
36struct page *ia32_gate_page;
37
38static unsigned long
39load_desc (u16 selector)
40{
41	unsigned long *table, limit, index;
42
43	if (!selector)
44		return 0;
45	if (selector & IA32_SEGSEL_TI) {
46		table = (unsigned long *) IA32_LDT_OFFSET;
47		limit = IA32_LDT_ENTRIES;
48	} else {
49		table = cpu_gdt_table[smp_processor_id()];
50		limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
51	}
52	index = selector >> IA32_SEGSEL_INDEX_SHIFT;
53	if (index >= limit)
54		return 0;
55	return IA32_SEG_UNSCRAMBLE(table[index]);
56}
57
58void
59ia32_load_segment_descriptors (struct task_struct *task)
60{
61	struct pt_regs *regs = task_pt_regs(task);
62
63	/* Setup the segment descriptors */
64	regs->r24 = load_desc(regs->r16 >> 16);		/* ESD */
65	regs->r27 = load_desc(regs->r16 >>  0);		/* DSD */
66	regs->r28 = load_desc(regs->r16 >> 32);		/* FSD */
67	regs->r29 = load_desc(regs->r16 >> 48);		/* GSD */
68	regs->ar_csd = load_desc(regs->r17 >>  0);	/* CSD */
69	regs->ar_ssd = load_desc(regs->r17 >> 16);	/* SSD */
70}
71
72int
73ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
74{
75	struct desc_struct *desc;
76	struct ia32_user_desc info;
77	int idx;
78
79	if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
80		return -EFAULT;
81	if (LDT_empty(&info))
82		return -EINVAL;
83
84	idx = info.entry_number;
85	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
86		return -EINVAL;
87
88	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
89	desc->a = LDT_entry_a(&info);
90	desc->b = LDT_entry_b(&info);
91
92	load_TLS(&child->thread, smp_processor_id());
93	ia32_load_segment_descriptors(child);
94	load_TLS(&current->thread, smp_processor_id());
95
96	return 0;
97}
98
99void
100ia32_save_state (struct task_struct *t)
101{
102	t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
103	t->thread.fsr   = ia64_getreg(_IA64_REG_AR_FSR);
104	t->thread.fcr   = ia64_getreg(_IA64_REG_AR_FCR);
105	t->thread.fir   = ia64_getreg(_IA64_REG_AR_FIR);
106	t->thread.fdr   = ia64_getreg(_IA64_REG_AR_FDR);
107	ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
108	ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
109}
110
111void
112ia32_load_state (struct task_struct *t)
113{
114	unsigned long eflag, fsr, fcr, fir, fdr, tssd;
115	struct pt_regs *regs = task_pt_regs(t);
116
117	eflag = t->thread.eflag;
118	fsr = t->thread.fsr;
119	fcr = t->thread.fcr;
120	fir = t->thread.fir;
121	fdr = t->thread.fdr;
122	tssd = load_desc(_TSS);					/* TSSD */
123
124	ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
125	ia64_setreg(_IA64_REG_AR_FSR, fsr);
126	ia64_setreg(_IA64_REG_AR_FCR, fcr);
127	ia64_setreg(_IA64_REG_AR_FIR, fir);
128	ia64_setreg(_IA64_REG_AR_FDR, fdr);
129	current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
130	current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
131	ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
132	ia64_set_kr(IA64_KR_TSSD, tssd);
133
134	regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
135	regs->r30 = load_desc(_LDT);				/* LDTD */
136	load_TLS(&t->thread, smp_processor_id());
137}
138
139/*
140 * Setup IA32 GDT and TSS
141 */
142void
143ia32_gdt_init (void)
144{
145	int cpu = smp_processor_id();
146
147	ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
148	if (!ia32_shared_page[cpu])
149		panic("failed to allocate ia32_shared_page[%d]\n", cpu);
150
151	cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
152
153	/* Copy from the boot cpu's GDT */
154	memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
155}
156
157
158/*
159 * Setup IA32 GDT and TSS
160 */
161static void
162ia32_boot_gdt_init (void)
163{
164	unsigned long ldt_size;
165
166	ia32_shared_page[0] = alloc_page(GFP_KERNEL);
167	if (!ia32_shared_page[0])
168		panic("failed to allocate ia32_shared_page[0]\n");
169
170	ia32_boot_gdt = page_address(ia32_shared_page[0]);
171	cpu_gdt_table[0] = ia32_boot_gdt;
172
173	/* CS descriptor in IA-32 (scrambled) format */
174	ia32_boot_gdt[__USER_CS >> 3]
175		= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
176				      0xb, 1, 3, 1, 1, 1, 1);
177
178	/* DS descriptor in IA-32 (scrambled) format */
179	ia32_boot_gdt[__USER_DS >> 3]
180		= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
181				      0x3, 1, 3, 1, 1, 1, 1);
182
183	ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
184	ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
185						       0xb, 0, 3, 1, 1, 1, 0);
186	ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
187						       0x2, 0, 3, 1, 1, 1, 0);
188}
189
190static void
191ia32_gate_page_init(void)
192{
193	unsigned long *sr;
194
195	ia32_gate_page = alloc_page(GFP_KERNEL);
196	sr = page_address(ia32_gate_page);
197	/* This is popl %eax ; movl $,%eax ; int $0x80 */
198	*sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
199
200	/* This is movl $,%eax ; int $0x80 */
201	*sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
202}
203
204void
205ia32_mem_init(void)
206{
207	ia32_boot_gdt_init();
208	ia32_gate_page_init();
209}
210
211/*
212 * Handle bad IA32 interrupt via syscall
213 */
214void
215ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
216{
217	siginfo_t siginfo;
218
219	die_if_kernel("Bad IA-32 interrupt", regs, int_num);
220
221	siginfo.si_signo = SIGTRAP;
222	siginfo.si_errno = int_num;
223	siginfo.si_flags = 0;
224	siginfo.si_isr = 0;
225	siginfo.si_addr = NULL;
226	siginfo.si_imm = 0;
227	siginfo.si_code = TRAP_BRKPT;
228	force_sig_info(SIGTRAP, &siginfo, current);
229}
230
231void
232ia32_cpu_init (void)
233{
234	/* initialize global ia32 state - CR0 and CR4 */
235	ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
236}
237
238static int __init
239ia32_init (void)
240{
241	ia32_exec_domain.name = "Linux/x86";
242	ia32_exec_domain.handler = NULL;
243	ia32_exec_domain.pers_low = PER_LINUX32;
244	ia32_exec_domain.pers_high = PER_LINUX32;
245	ia32_exec_domain.signal_map = default_exec_domain.signal_map;
246	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
247	register_exec_domain(&ia32_exec_domain);
248
249#if PAGE_SHIFT > IA32_PAGE_SHIFT
250	{
251		extern struct kmem_cache *partial_page_cachep;
252
253		partial_page_cachep = kmem_cache_create("partial_page_cache",
254						sizeof(struct partial_page),
255						0, SLAB_PANIC, NULL, NULL);
256	}
257#endif
258	return 0;
259}
260
261__initcall(ia32_init);
262