1/*
2 * Copyright 2014, Pawe�� Dziepak, pdziepak@quarnos.org.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <arch/x86/descriptors.h>
9
10#include <boot/kernel_args.h>
11#include <cpu.h>
12#include <tls.h>
13#include <vm/vm.h>
14#include <vm/vm_priv.h>
15
16#include <arch/int.h>
17#include <arch/user_debugger.h>
18
19
20template<typename T, T (*Function)(unsigned), unsigned N, unsigned ...Index>
21struct GenerateTable : GenerateTable<T, Function, N - 1,  N - 1, Index...> {
22};
23
24template<typename T, T (*Function)(unsigned), unsigned ...Index>
25struct GenerateTable<T, Function, 0, Index...> {
26	GenerateTable()
27		:
28		fTable { Function(Index)... }
29	{
30	}
31
32	T	fTable[sizeof...(Index)];
33};
34
35enum class DescriptorType : unsigned {
36	DataWritable		= 0x2,
37	CodeExecuteOnly		= 0x8,
38	TSS					= 0x9,
39};
40
41class Descriptor {
42public:
43	constexpr				Descriptor();
44	inline					Descriptor(uint32_t first, uint32_t second);
45	constexpr				Descriptor(DescriptorType type, bool kernelOnly,
46								bool compatMode = false);
47
48protected:
49	union {
50		struct [[gnu::packed]] {
51			uint16_t		fLimit0;
52			unsigned		fBase0			:24;
53			unsigned		fType			:4;
54			unsigned		fSystem			:1;
55			unsigned		fDPL			:2;
56			unsigned		fPresent		:1;
57			unsigned		fLimit1			:4;
58			unsigned 		fUnused			:1;
59			unsigned		fLong			:1;
60			unsigned		fDB				:1;
61			unsigned		fGranularity	:1;
62			uint8_t			fBase1;
63		};
64
65		uint32_t			fDescriptor[2];
66	};
67};
68
69class TSSDescriptor : public Descriptor {
70public:
71	inline						TSSDescriptor(uintptr_t base, size_t limit);
72
73			const Descriptor&	GetLower() const	{ return *this; }
74			const Descriptor&	GetUpper() const	{ return fSecond; }
75
76	static	void				LoadTSS(unsigned index);
77
78private:
79			Descriptor			fSecond;
80};
81
82
83class UserTLSDescriptor : public Descriptor {
84public:
85	inline						UserTLSDescriptor(uintptr_t base, size_t limit);
86
87			const Descriptor&	GetDescriptor() const	{ return *this; }
88};
89
90
91class GlobalDescriptorTable {
92public:
93	constexpr						GlobalDescriptorTable();
94
95	inline	void					Load() const;
96
97			unsigned				SetTSS(unsigned cpu,
98										const TSSDescriptor& tss);
99			unsigned				SetUserTLS(unsigned cpu,
100										addr_t base, size_t limit);
101private:
102	static constexpr	unsigned	kFirstTSS = 6;
103	static constexpr	unsigned	kDescriptorCount
104										= kFirstTSS + SMP_MAX_CPUS * 3;
105
106	alignas(uint64_t)	Descriptor	fTable[kDescriptorCount];
107};
108
109enum class InterruptDescriptorType : unsigned {
110	Interrupt		= 14,
111	Trap,
112};
113
114class [[gnu::packed]] InterruptDescriptor {
115public:
116	constexpr						InterruptDescriptor(uintptr_t isr,
117										unsigned ist, bool kernelOnly);
118	constexpr						InterruptDescriptor(uintptr_t isr);
119
120	static 		InterruptDescriptor	Generate(unsigned index);
121
122private:
123						uint16_t	fBase0;
124						uint16_t	fSelector;
125						unsigned	fIST		:3;
126						unsigned	fReserved0	:5;
127						unsigned	fType		:4;
128						unsigned	fReserved1	:1;
129						unsigned	fDPL		:2;
130						unsigned	fPresent	:1;
131						uint16_t	fBase1;
132						uint32_t	fBase2;
133						uint32_t	fReserved2;
134};
135
136class InterruptDescriptorTable {
137public:
138	inline				void		Load() const;
139
140	static constexpr	unsigned	kDescriptorCount = 256;
141
142private:
143	typedef GenerateTable<InterruptDescriptor, InterruptDescriptor::Generate,
144			kDescriptorCount> TableType;
145	alignas(uint64_t)	TableType	fTable;
146};
147
148class InterruptServiceRoutine {
149	alignas(16)	uint8_t	fDummy[16];
150};
151
152extern const InterruptServiceRoutine
153	isr_array[InterruptDescriptorTable::kDescriptorCount];
154
155static GlobalDescriptorTable	sGDT;
156static InterruptDescriptorTable	sIDT;
157static uint32 sGDTIDTConstructed = 0;
158
159typedef void interrupt_handler_function(iframe* frame);
160interrupt_handler_function*
161	gInterruptHandlerTable[InterruptDescriptorTable::kDescriptorCount];
162
163
164constexpr bool
165is_code_segment(DescriptorType type)
166{
167	return type == DescriptorType::CodeExecuteOnly;
168};
169
170
171constexpr
172Descriptor::Descriptor()
173	:
174	fDescriptor { 0, 0 }
175{
176	static_assert(sizeof(Descriptor) == sizeof(uint64_t),
177		"Invalid Descriptor size.");
178}
179
180
181Descriptor::Descriptor(uint32_t first, uint32_t second)
182	:
183	fDescriptor { first, second }
184{
185}
186
187
188constexpr
189Descriptor::Descriptor(DescriptorType type, bool kernelOnly, bool compatMode)
190	:
191	fLimit0(-1),
192	fBase0(0),
193	fType(static_cast<unsigned>(type)),
194	fSystem(1),
195	fDPL(kernelOnly ? 0 : 3),
196	fPresent(1),
197	fLimit1(0xf),
198	fUnused(0),
199	fLong(is_code_segment(type) && !compatMode ? 1 : 0),
200	fDB(is_code_segment(type) && !compatMode ? 0 : 1),
201	fGranularity(1),
202	fBase1(0)
203{
204}
205
206
207TSSDescriptor::TSSDescriptor(uintptr_t base, size_t limit)
208	:
209	fSecond(base >> 32, 0)
210{
211	fLimit0 = static_cast<uint16_t>(limit);
212	fBase0 = base & 0xffffff;
213	fType = static_cast<unsigned>(DescriptorType::TSS);
214	fPresent = 1;
215	fLimit1 = (limit >> 16) & 0xf;
216	fBase1 = static_cast<uint8_t>(base >> 24);
217}
218
219
220void
221TSSDescriptor::LoadTSS(unsigned index)
222{
223	asm volatile("ltr %w0" : : "r" (index << 3));
224}
225
226
227UserTLSDescriptor::UserTLSDescriptor(uintptr_t base, size_t limit)
228	: Descriptor(DescriptorType::DataWritable, false)
229{
230	fLimit0 = static_cast<uint16_t>(limit);
231	fBase0 = base & 0xffffff;
232	fLimit1 = (limit >> 16) & 0xf;
233	fBase1 = static_cast<uint8_t>(base >> 24);
234}
235
236
237constexpr
238GlobalDescriptorTable::GlobalDescriptorTable()
239	:
240	fTable {
241		Descriptor(),
242		Descriptor(DescriptorType::CodeExecuteOnly, true),
243		Descriptor(DescriptorType::DataWritable, true),
244		Descriptor(DescriptorType::CodeExecuteOnly, false, true),
245		Descriptor(DescriptorType::DataWritable, false),
246		Descriptor(DescriptorType::CodeExecuteOnly, false),
247	}
248{
249	static_assert(kDescriptorCount <= 8192,
250		"GDT cannot contain more than 8192 descriptors");
251}
252
253
254void
255GlobalDescriptorTable::Load() const
256{
257	struct [[gnu::packed]] {
258		uint16_t	fLimit;
259		const void*	fAddress;
260	} gdtDescriptor = {
261		sizeof(fTable) - 1,
262		static_cast<const void*>(fTable),
263	};
264
265	asm volatile("lgdt	%0" : : "m" (gdtDescriptor));
266}
267
268
269unsigned
270GlobalDescriptorTable::SetTSS(unsigned cpu, const TSSDescriptor& tss)
271{
272	auto index = kFirstTSS + cpu * 3;
273	ASSERT(index + 1 < kDescriptorCount);
274	fTable[index] = tss.GetLower();
275	fTable[index + 1] = tss.GetUpper();
276	return index;
277}
278
279
280unsigned
281GlobalDescriptorTable::SetUserTLS(unsigned cpu, uintptr_t base, size_t limit)
282{
283	auto index = kFirstTSS + cpu * 3 + 2;
284	ASSERT(index < kDescriptorCount);
285	UserTLSDescriptor desc(base, limit);
286	fTable[index] = desc.GetDescriptor();
287	return index;
288}
289
290
291constexpr
292InterruptDescriptor::InterruptDescriptor(uintptr_t isr, unsigned ist,
293	bool kernelOnly)
294	:
295	fBase0(isr),
296	fSelector(KERNEL_CODE_SELECTOR),
297	fIST(ist),
298	fReserved0(0),
299	fType(static_cast<unsigned>(InterruptDescriptorType::Interrupt)),
300	fReserved1(0),
301	fDPL(kernelOnly ? 0 : 3),
302	fPresent(1),
303	fBase1(isr >> 16),
304	fBase2(isr >> 32),
305	fReserved2(0)
306{
307	static_assert(sizeof(InterruptDescriptor) == sizeof(uint64_t) * 2,
308		"Invalid InterruptDescriptor size.");
309}
310
311
312constexpr
313InterruptDescriptor::InterruptDescriptor(uintptr_t isr)
314	:
315	InterruptDescriptor(isr, 0, true)
316{
317}
318
319
320void
321InterruptDescriptorTable::Load() const
322{
323	struct [[gnu::packed]] {
324		uint16_t	fLimit;
325		const void*	fAddress;
326	} gdtDescriptor = {
327		sizeof(fTable) - 1,
328		static_cast<const void*>(fTable.fTable),
329	};
330
331	asm volatile("lidt	%0" : : "m" (gdtDescriptor));
332}
333
334
335InterruptDescriptor
336InterruptDescriptor::Generate(unsigned index)
337{
338	return index == 3
339		? InterruptDescriptor(uintptr_t(isr_array + index), 0, false)
340		: (index == 8
341			? InterruptDescriptor(uintptr_t(isr_array + index), 1, true)
342			: InterruptDescriptor(uintptr_t(isr_array + index)));
343}
344
345
346//	#pragma mark - Exception handlers
347
348
349static void
350x86_64_general_protection_fault(iframe* frame)
351{
352	if (debug_debugger_running()) {
353		// Handle GPFs if there is a debugger fault handler installed, for
354		// non-canonical address accesses.
355		cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
356		if (cpu->fault_handler != 0) {
357			debug_set_page_fault_info(0, frame->ip, DEBUG_PAGE_FAULT_NO_INFO);
358			frame->ip = cpu->fault_handler;
359			frame->bp = cpu->fault_handler_stack_pointer;
360			return;
361		}
362	}
363
364	x86_unexpected_exception(frame);
365}
366
367
368static void
369x86_64_stack_fault_exception(iframe* frame)
370{
371	// Non-canonical address accesses which reference the stack cause a stack
372	// fault exception instead of GPF. However, we can treat it like a GPF.
373	x86_64_general_protection_fault(frame);
374}
375
376
377// #pragma mark -
378
379
380void
381x86_descriptors_preboot_init_percpu(kernel_args* args, int cpu)
382{
383	if (cpu == 0) {
384		new(&sGDT) GlobalDescriptorTable;
385		new(&sIDT) InterruptDescriptorTable;
386	}
387
388	smp_cpu_rendezvous(&sGDTIDTConstructed);
389	sGDT.Load();
390
391	memset(&gCPU[cpu].arch.tss, 0, sizeof(struct tss));
392	gCPU[cpu].arch.tss.io_map_base = sizeof(struct tss);
393
394	// Set up the double fault IST entry (see x86_descriptors_init()).
395	struct tss* tss = &gCPU[cpu].arch.tss;
396	size_t stackSize;
397	tss->ist1 = (addr_t)x86_get_double_fault_stack(cpu, &stackSize);
398	tss->ist1 += stackSize;
399
400	// Set up the descriptor for this TSS.
401	auto tssIndex = sGDT.SetTSS(cpu,
402			TSSDescriptor(uintptr_t(&gCPU[cpu].arch.tss), sizeof(struct tss)));
403	TSSDescriptor::LoadTSS(tssIndex);
404
405	sGDT.SetUserTLS(cpu, 0, TLS_COMPAT_SIZE);
406
407	sIDT.Load();
408}
409
410
411void
412x86_descriptors_init(kernel_args* args)
413{
414	// Initialize the interrupt handler table.
415	interrupt_handler_function** table = gInterruptHandlerTable;
416	for (uint32 i = 0; i < ARCH_INTERRUPT_BASE; i++)
417		table[i] = x86_invalid_exception;
418	for (uint32 i = ARCH_INTERRUPT_BASE;
419		i < InterruptDescriptorTable::kDescriptorCount; i++) {
420		table[i] = x86_hardware_interrupt;
421	}
422
423	table[0]  = x86_unexpected_exception;	// Divide Error Exception (#DE)
424	table[1]  = x86_handle_debug_exception; // Debug Exception (#DB)
425	table[2]  = x86_fatal_exception;		// NMI Interrupt
426	table[3]  = x86_handle_breakpoint_exception; // Breakpoint Exception (#BP)
427	table[4]  = x86_unexpected_exception;	// Overflow Exception (#OF)
428	table[5]  = x86_unexpected_exception;	// BOUND Range Exceeded Exception (#BR)
429	table[6]  = x86_unexpected_exception;	// Invalid Opcode Exception (#UD)
430	table[7]  = x86_fatal_exception;		// Device Not Available Exception (#NM)
431	table[8]  = x86_fatal_exception;		// Double Fault Exception (#DF)
432	table[9]  = x86_fatal_exception;		// Coprocessor Segment Overrun
433	table[10] = x86_fatal_exception;		// Invalid TSS Exception (#TS)
434	table[11] = x86_fatal_exception;		// Segment Not Present (#NP)
435	table[12] = x86_64_stack_fault_exception;	// Stack Fault Exception (#SS)
436	table[13] = x86_64_general_protection_fault; // General Protection Exception (#GP)
437	table[14] = x86_page_fault_exception;	// Page-Fault Exception (#PF)
438	table[16] = x86_unexpected_exception;	// x87 FPU Floating-Point Error (#MF)
439	table[17] = x86_unexpected_exception;	// Alignment Check Exception (#AC)
440	table[18] = x86_fatal_exception;		// Machine-Check Exception (#MC)
441	table[19] = x86_unexpected_exception;	// SIMD Floating-Point Exception (#XF)
442}
443
444
445unsigned
446x86_64_set_user_tls_segment_base(int cpu, addr_t base)
447{
448	return sGDT.SetUserTLS(cpu, base, TLS_COMPAT_SIZE);
449}
450