• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/ia64/kvm/
1/*
2 * arch/ia64/kvm/vmm_ivt.S
3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 *      Stephane Eranian <eranian@hpl.hp.com>
6 *      David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 *      Asit Mallick <asit.k.mallick@intel.com>
9 *      Suresh Siddha <suresh.b.siddha@intel.com>
10 *      Kenneth Chen <kenneth.w.chen@intel.com>
11 *      Fenghua Yu <fenghua.yu@intel.com>
12 *
13 *
14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
15 * for SMP
16 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
17 * handler now uses virtual PT.
18 *
19 * 07/6/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com)
20 *              Supporting Intel virtualization architecture
21 *
22 */
23
24/*
25 * This file defines the interruption vector table used by the CPU.
26 * It does not include one entry per possible cause of interruption.
27 *
28 * The first 20 entries of the table contain 64 bundles each while the
29 * remaining 48 entries contain only 16 bundles each.
30 *
31 * The 64 bundles are used to allow inlining the whole handler for
32 * critical
33 * interruptions like TLB misses.
34 *
35 *  For each entry, the comment is as follows:
36 *
37 *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
38 *              (12,51)
39 *  entry offset ----/     /         /                  /
40 *  /
41 *  entry number ---------/         /                  /
42 *  /
43 *  size of the entry -------------/                  /
44 *  /
45 *  vector name -------------------------------------/
46 *  /
47 *  interruptions triggering this vector
48 *  ----------------------/
49 *
50 * The table is 32KB in size and must be aligned on 32KB
51 * boundary.
52 * (The CPU ignores the 15 lower bits of the address)
53 *
54 * Table is based upon EAS2.6 (Oct 1999)
55 */
56
57
58#include <asm/asmmacro.h>
59#include <asm/cache.h>
60#include <asm/pgtable.h>
61
62#include "asm-offsets.h"
63#include "vcpu.h"
64#include "kvm_minstate.h"
65#include "vti.h"
66
67# define PSR_DEFAULT_BITS   psr.ac
68
69#define KVM_FAULT(n)    \
70	kvm_fault_##n:;          \
71	mov r19=n;;          \
72	br.sptk.many kvm_vmm_panic;         \
73	;;                  \
74
75#define KVM_REFLECT(n)    \
76	mov r31=pr;           \
77	mov r19=n;       /* prepare to save predicates */ \
78	mov r29=cr.ipsr;      \
79	;;      \
80	tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
81(p7)	br.sptk.many kvm_dispatch_reflection;        \
82	br.sptk.many kvm_vmm_panic;      \
83
84GLOBAL_ENTRY(kvm_vmm_panic)
85	KVM_SAVE_MIN_WITH_COVER_R19
86	alloc r14=ar.pfs,0,0,1,0
87	mov out0=r15
88	adds r3=8,r2                // set up second base pointer
89	;;
90	ssm psr.ic
91	;;
92	srlz.i    // guarantee that interruption collection is on
93	;;
94	(p15) ssm psr.i               // restore psr.
95	addl r14=@gprel(ia64_leave_hypervisor),gp
96	;;
97	KVM_SAVE_REST
98	mov rp=r14
99	;;
100	br.call.sptk.many b6=vmm_panic_handler;
101END(kvm_vmm_panic)
102
103    .section .text..ivt,"ax"
104
105    .align 32768    // align on 32KB boundary
106    .global kvm_ia64_ivt
107kvm_ia64_ivt:
108///////////////////////////////////////////////////////////////
109// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
110ENTRY(kvm_vhpt_miss)
111	KVM_FAULT(0)
112END(kvm_vhpt_miss)
113
114    .org kvm_ia64_ivt+0x400
115////////////////////////////////////////////////////////////////
116// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
117ENTRY(kvm_itlb_miss)
118	mov r31 = pr
119	mov r29=cr.ipsr;
120	;;
121	tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
122(p6)	br.sptk kvm_alt_itlb_miss
123	mov r19 = 1
124	br.sptk kvm_itlb_miss_dispatch
125	KVM_FAULT(1);
126END(kvm_itlb_miss)
127
128    .org kvm_ia64_ivt+0x0800
129//////////////////////////////////////////////////////////////////
130// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
131ENTRY(kvm_dtlb_miss)
132	mov r31 = pr
133	mov r29=cr.ipsr;
134	;;
135	tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
136(p6)	br.sptk kvm_alt_dtlb_miss
137	br.sptk kvm_dtlb_miss_dispatch
138END(kvm_dtlb_miss)
139
140     .org kvm_ia64_ivt+0x0c00
141////////////////////////////////////////////////////////////////////
142// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
143ENTRY(kvm_alt_itlb_miss)
144	mov r16=cr.ifa    // get address that caused the TLB miss
145	;;
146	movl r17=PAGE_KERNEL
147	mov r24=cr.ipsr
148	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
149	;;
150	and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
151	;;
152	or r19=r17,r19      // insert PTE control bits into r19
153	;;
154	movl r20=IA64_GRANULE_SHIFT<<2
155	;;
156	mov cr.itir=r20
157	;;
158	itc.i r19		// insert the TLB entry
159	mov pr=r31,-1
160	rfi
161END(kvm_alt_itlb_miss)
162
163    .org kvm_ia64_ivt+0x1000
164/////////////////////////////////////////////////////////////////////
165// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
166ENTRY(kvm_alt_dtlb_miss)
167	mov r16=cr.ifa		// get address that caused the TLB miss
168	;;
169	movl r17=PAGE_KERNEL
170	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
171	mov r24=cr.ipsr
172	;;
173	and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
174	;;
175	or r19=r19,r17	// insert PTE control bits into r19
176	;;
177	movl r20=IA64_GRANULE_SHIFT<<2
178	;;
179	mov cr.itir=r20
180	;;
181	itc.d r19		// insert the TLB entry
182	mov pr=r31,-1
183	rfi
184END(kvm_alt_dtlb_miss)
185
186    .org kvm_ia64_ivt+0x1400
187//////////////////////////////////////////////////////////////////////
188// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
189ENTRY(kvm_nested_dtlb_miss)
190	KVM_FAULT(5)
191END(kvm_nested_dtlb_miss)
192
193    .org kvm_ia64_ivt+0x1800
194/////////////////////////////////////////////////////////////////////
195// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
196ENTRY(kvm_ikey_miss)
197	KVM_REFLECT(6)
198END(kvm_ikey_miss)
199
200    .org kvm_ia64_ivt+0x1c00
201/////////////////////////////////////////////////////////////////////
202// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
203ENTRY(kvm_dkey_miss)
204	KVM_REFLECT(7)
205END(kvm_dkey_miss)
206
207    .org kvm_ia64_ivt+0x2000
208////////////////////////////////////////////////////////////////////
209// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
210ENTRY(kvm_dirty_bit)
211	KVM_REFLECT(8)
212END(kvm_dirty_bit)
213
214    .org kvm_ia64_ivt+0x2400
215////////////////////////////////////////////////////////////////////
216// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
217ENTRY(kvm_iaccess_bit)
218	KVM_REFLECT(9)
219END(kvm_iaccess_bit)
220
221    .org kvm_ia64_ivt+0x2800
222///////////////////////////////////////////////////////////////////
223// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
224ENTRY(kvm_daccess_bit)
225	KVM_REFLECT(10)
226END(kvm_daccess_bit)
227
228    .org kvm_ia64_ivt+0x2c00
229/////////////////////////////////////////////////////////////////
230// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
231ENTRY(kvm_break_fault)
232	mov r31=pr
233	mov r19=11
234	mov r29=cr.ipsr
235	;;
236	KVM_SAVE_MIN_WITH_COVER_R19
237	;;
238	alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
239	mov out0=cr.ifa
240	mov out2=cr.isr
241	mov out3=cr.iim
242	adds r3=8,r2                // set up second base pointer
243	;;
244	ssm psr.ic
245	;;
246	srlz.i         // guarantee that interruption collection is on
247	;;
248	(p15)ssm psr.i               // restore psr.i
249	addl r14=@gprel(ia64_leave_hypervisor),gp
250	;;
251	KVM_SAVE_REST
252	mov rp=r14
253	;;
254	adds out1=16,sp
255	br.call.sptk.many b6=kvm_ia64_handle_break
256	;;
257END(kvm_break_fault)
258
259    .org kvm_ia64_ivt+0x3000
260/////////////////////////////////////////////////////////////////
261// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
262ENTRY(kvm_interrupt)
263	mov r31=pr		// prepare to save predicates
264	mov r19=12
265	mov r29=cr.ipsr
266	;;
267	tbit.z p6,p7=r29,IA64_PSR_VM_BIT
268	tbit.z p0,p15=r29,IA64_PSR_I_BIT
269	;;
270(p7)	br.sptk kvm_dispatch_interrupt
271	;;
272	mov r27=ar.rsc		/* M */
273	mov r20=r1			/* A */
274	mov r25=ar.unat		/* M */
275	mov r26=ar.pfs		/* I */
276	mov r28=cr.iip		/* M */
277	cover			/* B (or nothing) */
278	;;
279	mov r1=sp
280	;;
281	invala			/* M */
282	mov r30=cr.ifs
283	;;
284	addl r1=-VMM_PT_REGS_SIZE,r1
285	;;
286	adds r17=2*L1_CACHE_BYTES,r1	/* really: biggest cache-line size */
287	adds r16=PT(CR_IPSR),r1
288	;;
289	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
290	st8 [r16]=r29			/* save cr.ipsr */
291	;;
292	lfetch.fault.excl.nt1 [r17]
293	mov r29=b0
294	;;
295	adds r16=PT(R8),r1  	/* initialize first base pointer */
296	adds r17=PT(R9),r1  	/* initialize second base pointer */
297	mov r18=r0      		/* make sure r18 isn't NaT */
298	;;
299.mem.offset 0,0; st8.spill [r16]=r8,16
300.mem.offset 8,0; st8.spill [r17]=r9,16
301        ;;
302.mem.offset 0,0; st8.spill [r16]=r10,24
303.mem.offset 8,0; st8.spill [r17]=r11,24
304        ;;
305	st8 [r16]=r28,16		/* save cr.iip */
306	st8 [r17]=r30,16		/* save cr.ifs */
307	mov r8=ar.fpsr		/* M */
308	mov r9=ar.csd
309	mov r10=ar.ssd
310	movl r11=FPSR_DEFAULT	/* L-unit */
311	;;
312	st8 [r16]=r25,16		/* save ar.unat */
313	st8 [r17]=r26,16		/* save ar.pfs */
314	shl r18=r18,16		/* compute ar.rsc to be used for "loadrs" */
315	;;
316	st8 [r16]=r27,16		/* save ar.rsc */
317	adds r17=16,r17		/* skip over ar_rnat field */
318	;;
319	st8 [r17]=r31,16		/* save predicates */
320	adds r16=16,r16		/* skip over ar_bspstore field */
321	;;
322	st8 [r16]=r29,16		/* save b0 */
323	st8 [r17]=r18,16		/* save ar.rsc value for "loadrs" */
324	;;
325.mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
326.mem.offset 8,0; st8.spill [r17]=r12,16
327	adds r12=-16,r1
328	/* switch to kernel memory stack (with 16 bytes of scratch) */
329	;;
330.mem.offset 0,0; st8.spill [r16]=r13,16
331.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
332	;;
333.mem.offset 0,0; st8.spill [r16]=r15,16
334.mem.offset 8,0; st8.spill [r17]=r14,16
335	dep r14=-1,r0,60,4
336	;;
337.mem.offset 0,0; st8.spill [r16]=r2,16
338.mem.offset 8,0; st8.spill [r17]=r3,16
339	adds r2=VMM_PT_REGS_R16_OFFSET,r1
340	adds r14 = VMM_VCPU_GP_OFFSET,r13
341	;;
342	mov r8=ar.ccv
343	ld8 r14 = [r14]
344	;;
345	mov r1=r14       /* establish kernel global pointer */
346	;;                                          \
347	bsw.1
348	;;
349	alloc r14=ar.pfs,0,0,1,0	// must be first in an insn group
350	mov out0=r13
351	;;
352	ssm psr.ic
353	;;
354	srlz.i
355	;;
356	//(p15) ssm psr.i
357	adds r3=8,r2		// set up second base pointer for SAVE_REST
358	srlz.i			// ensure everybody knows psr.ic is back on
359	;;
360.mem.offset 0,0; st8.spill [r2]=r16,16
361.mem.offset 8,0; st8.spill [r3]=r17,16
362	;;
363.mem.offset 0,0; st8.spill [r2]=r18,16
364.mem.offset 8,0; st8.spill [r3]=r19,16
365	;;
366.mem.offset 0,0; st8.spill [r2]=r20,16
367.mem.offset 8,0; st8.spill [r3]=r21,16
368	mov r18=b6
369	;;
370.mem.offset 0,0; st8.spill [r2]=r22,16
371.mem.offset 8,0; st8.spill [r3]=r23,16
372	mov r19=b7
373	;;
374.mem.offset 0,0; st8.spill [r2]=r24,16
375.mem.offset 8,0; st8.spill [r3]=r25,16
376	;;
377.mem.offset 0,0; st8.spill [r2]=r26,16
378.mem.offset 8,0; st8.spill [r3]=r27,16
379	;;
380.mem.offset 0,0; st8.spill [r2]=r28,16
381.mem.offset 8,0; st8.spill [r3]=r29,16
382	;;
383.mem.offset 0,0; st8.spill [r2]=r30,16
384.mem.offset 8,0; st8.spill [r3]=r31,32
385	;;
386	mov ar.fpsr=r11       /* M-unit */
387	st8 [r2]=r8,8         /* ar.ccv */
388	adds r24=PT(B6)-PT(F7),r3
389	;;
390	stf.spill [r2]=f6,32
391	stf.spill [r3]=f7,32
392	;;
393	stf.spill [r2]=f8,32
394	stf.spill [r3]=f9,32
395	;;
396	stf.spill [r2]=f10
397	stf.spill [r3]=f11
398	adds r25=PT(B7)-PT(F11),r3
399	;;
400	st8 [r24]=r18,16       /* b6 */
401	st8 [r25]=r19,16       /* b7 */
402	;;
403	st8 [r24]=r9           /* ar.csd */
404	st8 [r25]=r10          /* ar.ssd */
405	;;
406	srlz.d		// make sure we see the effect of cr.ivr
407	addl r14=@gprel(ia64_leave_nested),gp
408	;;
409	mov rp=r14
410	br.call.sptk.many b6=kvm_ia64_handle_irq
411	;;
412END(kvm_interrupt)
413
414    .global kvm_dispatch_vexirq
415    .org kvm_ia64_ivt+0x3400
416//////////////////////////////////////////////////////////////////////
417// 0x3400 Entry 13 (size 64 bundles) Reserved
418ENTRY(kvm_virtual_exirq)
419	mov r31=pr
420	mov r19=13
421	mov r30 =r0
422	;;
423kvm_dispatch_vexirq:
424	cmp.eq p6,p0 = 1,r30
425	;;
426(p6)	add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
427	;;
428(p6)	ld8 r1 = [r29]
429	;;
430	KVM_SAVE_MIN_WITH_COVER_R19
431	alloc r14=ar.pfs,0,0,1,0
432	mov out0=r13
433
434	ssm psr.ic
435	;;
436	srlz.i // guarantee that interruption collection is on
437	;;
438	(p15) ssm psr.i               // restore psr.i
439	adds r3=8,r2                // set up second base pointer
440	;;
441	KVM_SAVE_REST
442	addl r14=@gprel(ia64_leave_hypervisor),gp
443	;;
444	mov rp=r14
445	br.call.sptk.many b6=kvm_vexirq
446END(kvm_virtual_exirq)
447
448    .org kvm_ia64_ivt+0x3800
449/////////////////////////////////////////////////////////////////////
450// 0x3800 Entry 14 (size 64 bundles) Reserved
451	KVM_FAULT(14)
452	// this code segment is from 2.6.16.13
453
454    .org kvm_ia64_ivt+0x3c00
455///////////////////////////////////////////////////////////////////////
456// 0x3c00 Entry 15 (size 64 bundles) Reserved
457	KVM_FAULT(15)
458
459    .org kvm_ia64_ivt+0x4000
460///////////////////////////////////////////////////////////////////////
461// 0x4000 Entry 16 (size 64 bundles) Reserved
462	KVM_FAULT(16)
463
464    .org kvm_ia64_ivt+0x4400
465//////////////////////////////////////////////////////////////////////
466// 0x4400 Entry 17 (size 64 bundles) Reserved
467	KVM_FAULT(17)
468
469    .org kvm_ia64_ivt+0x4800
470//////////////////////////////////////////////////////////////////////
471// 0x4800 Entry 18 (size 64 bundles) Reserved
472	KVM_FAULT(18)
473
474    .org kvm_ia64_ivt+0x4c00
475//////////////////////////////////////////////////////////////////////
476// 0x4c00 Entry 19 (size 64 bundles) Reserved
477	KVM_FAULT(19)
478
479    .org kvm_ia64_ivt+0x5000
480//////////////////////////////////////////////////////////////////////
481// 0x5000 Entry 20 (size 16 bundles) Page Not Present
482ENTRY(kvm_page_not_present)
483	KVM_REFLECT(20)
484END(kvm_page_not_present)
485
486    .org kvm_ia64_ivt+0x5100
487///////////////////////////////////////////////////////////////////////
488// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
489ENTRY(kvm_key_permission)
490	KVM_REFLECT(21)
491END(kvm_key_permission)
492
493    .org kvm_ia64_ivt+0x5200
494//////////////////////////////////////////////////////////////////////
495// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
496ENTRY(kvm_iaccess_rights)
497	KVM_REFLECT(22)
498END(kvm_iaccess_rights)
499
500    .org kvm_ia64_ivt+0x5300
501//////////////////////////////////////////////////////////////////////
502// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
503ENTRY(kvm_daccess_rights)
504	KVM_REFLECT(23)
505END(kvm_daccess_rights)
506
507    .org kvm_ia64_ivt+0x5400
508/////////////////////////////////////////////////////////////////////
509// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
510ENTRY(kvm_general_exception)
511	KVM_REFLECT(24)
512	KVM_FAULT(24)
513END(kvm_general_exception)
514
515    .org kvm_ia64_ivt+0x5500
516//////////////////////////////////////////////////////////////////////
517// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
518ENTRY(kvm_disabled_fp_reg)
519	KVM_REFLECT(25)
520END(kvm_disabled_fp_reg)
521
522    .org kvm_ia64_ivt+0x5600
523////////////////////////////////////////////////////////////////////
524// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
525ENTRY(kvm_nat_consumption)
526	KVM_REFLECT(26)
527END(kvm_nat_consumption)
528
529    .org kvm_ia64_ivt+0x5700
530/////////////////////////////////////////////////////////////////////
531// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
532ENTRY(kvm_speculation_vector)
533	KVM_REFLECT(27)
534END(kvm_speculation_vector)
535
536    .org kvm_ia64_ivt+0x5800
537/////////////////////////////////////////////////////////////////////
538// 0x5800 Entry 28 (size 16 bundles) Reserved
539	KVM_FAULT(28)
540
541    .org kvm_ia64_ivt+0x5900
542///////////////////////////////////////////////////////////////////
543// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
544ENTRY(kvm_debug_vector)
545	KVM_FAULT(29)
546END(kvm_debug_vector)
547
548    .org kvm_ia64_ivt+0x5a00
549///////////////////////////////////////////////////////////////
550// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
551ENTRY(kvm_unaligned_access)
552	KVM_REFLECT(30)
553END(kvm_unaligned_access)
554
555    .org kvm_ia64_ivt+0x5b00
556//////////////////////////////////////////////////////////////////////
557// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
558ENTRY(kvm_unsupported_data_reference)
559	KVM_REFLECT(31)
560END(kvm_unsupported_data_reference)
561
562    .org kvm_ia64_ivt+0x5c00
563////////////////////////////////////////////////////////////////////
564// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
565ENTRY(kvm_floating_point_fault)
566	KVM_REFLECT(32)
567END(kvm_floating_point_fault)
568
569    .org kvm_ia64_ivt+0x5d00
570/////////////////////////////////////////////////////////////////////
571// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
572ENTRY(kvm_floating_point_trap)
573	KVM_REFLECT(33)
574END(kvm_floating_point_trap)
575
576    .org kvm_ia64_ivt+0x5e00
577//////////////////////////////////////////////////////////////////////
578// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
579ENTRY(kvm_lower_privilege_trap)
580	KVM_REFLECT(34)
581END(kvm_lower_privilege_trap)
582
583    .org kvm_ia64_ivt+0x5f00
584//////////////////////////////////////////////////////////////////////
585// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
586ENTRY(kvm_taken_branch_trap)
587	KVM_REFLECT(35)
588END(kvm_taken_branch_trap)
589
590    .org kvm_ia64_ivt+0x6000
591////////////////////////////////////////////////////////////////////
592// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
593ENTRY(kvm_single_step_trap)
594	KVM_REFLECT(36)
595END(kvm_single_step_trap)
596    .global kvm_virtualization_fault_back
597    .org kvm_ia64_ivt+0x6100
598/////////////////////////////////////////////////////////////////////
599// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
600ENTRY(kvm_virtualization_fault)
601	mov r31=pr
602	adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
603	;;
604	st8 [r16] = r1
605	adds r17 = VMM_VCPU_GP_OFFSET, r21
606	;;
607	ld8 r1 = [r17]
608	cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
609	cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
610	cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
611	cmp.eq p9,p0=EVENT_RSM,r24
612	cmp.eq p10,p0=EVENT_SSM,r24
613	cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
614	cmp.eq p12,p0=EVENT_THASH,r24
615(p6)	br.dptk.many kvm_asm_mov_from_ar
616(p7)	br.dptk.many kvm_asm_mov_from_rr
617(p8)	br.dptk.many kvm_asm_mov_to_rr
618(p9)	br.dptk.many kvm_asm_rsm
619(p10)	br.dptk.many kvm_asm_ssm
620(p11)	br.dptk.many kvm_asm_mov_to_psr
621(p12)	br.dptk.many kvm_asm_thash
622	;;
623kvm_virtualization_fault_back:
624	adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
625	;;
626	ld8 r1 = [r16]
627	;;
628	mov r19=37
629	adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
630	adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
631	;;
632	st8 [r16] = r24
633	st8 [r17] = r25
634	;;
635	cmp.ne p6,p0=EVENT_RFI, r24
636(p6)	br.sptk kvm_dispatch_virtualization_fault
637	;;
638	adds r18=VMM_VPD_BASE_OFFSET,r21
639	;;
640	ld8 r18=[r18]
641	;;
642	adds r18=VMM_VPD_VIFS_OFFSET,r18
643	;;
644	ld8 r18=[r18]
645	;;
646	tbit.z p6,p0=r18,63
647(p6)	br.sptk kvm_dispatch_virtualization_fault
648	;;
649//if vifs.v=1 desert current register frame
650	alloc r18=ar.pfs,0,0,0,0
651	br.sptk kvm_dispatch_virtualization_fault
652END(kvm_virtualization_fault)
653
654    .org kvm_ia64_ivt+0x6200
655//////////////////////////////////////////////////////////////
656// 0x6200 Entry 38 (size 16 bundles) Reserved
657	KVM_FAULT(38)
658
659    .org kvm_ia64_ivt+0x6300
660/////////////////////////////////////////////////////////////////
661// 0x6300 Entry 39 (size 16 bundles) Reserved
662	KVM_FAULT(39)
663
664    .org kvm_ia64_ivt+0x6400
665/////////////////////////////////////////////////////////////////
666// 0x6400 Entry 40 (size 16 bundles) Reserved
667	KVM_FAULT(40)
668
669    .org kvm_ia64_ivt+0x6500
670//////////////////////////////////////////////////////////////////
671// 0x6500 Entry 41 (size 16 bundles) Reserved
672	KVM_FAULT(41)
673
674    .org kvm_ia64_ivt+0x6600
675//////////////////////////////////////////////////////////////////
676// 0x6600 Entry 42 (size 16 bundles) Reserved
677	KVM_FAULT(42)
678
679    .org kvm_ia64_ivt+0x6700
680//////////////////////////////////////////////////////////////////
681// 0x6700 Entry 43 (size 16 bundles) Reserved
682	KVM_FAULT(43)
683
684    .org kvm_ia64_ivt+0x6800
685//////////////////////////////////////////////////////////////////
686// 0x6800 Entry 44 (size 16 bundles) Reserved
687	KVM_FAULT(44)
688
689    .org kvm_ia64_ivt+0x6900
690///////////////////////////////////////////////////////////////////
691// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
692//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
693ENTRY(kvm_ia32_exception)
694	KVM_FAULT(45)
695END(kvm_ia32_exception)
696
697    .org kvm_ia64_ivt+0x6a00
698////////////////////////////////////////////////////////////////////
699// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
700ENTRY(kvm_ia32_intercept)
701	KVM_FAULT(47)
702END(kvm_ia32_intercept)
703
704    .org kvm_ia64_ivt+0x6c00
705/////////////////////////////////////////////////////////////////////
706// 0x6c00 Entry 48 (size 16 bundles) Reserved
707	KVM_FAULT(48)
708
709    .org kvm_ia64_ivt+0x6d00
710//////////////////////////////////////////////////////////////////////
711// 0x6d00 Entry 49 (size 16 bundles) Reserved
712	KVM_FAULT(49)
713
714    .org kvm_ia64_ivt+0x6e00
715//////////////////////////////////////////////////////////////////////
716// 0x6e00 Entry 50 (size 16 bundles) Reserved
717	KVM_FAULT(50)
718
719    .org kvm_ia64_ivt+0x6f00
720/////////////////////////////////////////////////////////////////////
721// 0x6f00 Entry 51 (size 16 bundles) Reserved
722	KVM_FAULT(52)
723
724    .org kvm_ia64_ivt+0x7100
725////////////////////////////////////////////////////////////////////
726// 0x7100 Entry 53 (size 16 bundles) Reserved
727	KVM_FAULT(53)
728
729    .org kvm_ia64_ivt+0x7200
730/////////////////////////////////////////////////////////////////////
731// 0x7200 Entry 54 (size 16 bundles) Reserved
732	KVM_FAULT(54)
733
734    .org kvm_ia64_ivt+0x7300
735////////////////////////////////////////////////////////////////////
736// 0x7300 Entry 55 (size 16 bundles) Reserved
737	KVM_FAULT(55)
738
739    .org kvm_ia64_ivt+0x7400
740////////////////////////////////////////////////////////////////////
741// 0x7400 Entry 56 (size 16 bundles) Reserved
742	KVM_FAULT(56)
743
744    .org kvm_ia64_ivt+0x7500
745/////////////////////////////////////////////////////////////////////
746// 0x7500 Entry 57 (size 16 bundles) Reserved
747	KVM_FAULT(57)
748
749    .org kvm_ia64_ivt+0x7600
750/////////////////////////////////////////////////////////////////////
751// 0x7600 Entry 58 (size 16 bundles) Reserved
752	KVM_FAULT(58)
753
754    .org kvm_ia64_ivt+0x7700
755////////////////////////////////////////////////////////////////////
756// 0x7700 Entry 59 (size 16 bundles) Reserved
757	KVM_FAULT(59)
758
759    .org kvm_ia64_ivt+0x7800
760////////////////////////////////////////////////////////////////////
761// 0x7800 Entry 60 (size 16 bundles) Reserved
762	KVM_FAULT(60)
763
764    .org kvm_ia64_ivt+0x7900
765/////////////////////////////////////////////////////////////////////
766// 0x7900 Entry 61 (size 16 bundles) Reserved
767	KVM_FAULT(61)
768
769    .org kvm_ia64_ivt+0x7a00
770/////////////////////////////////////////////////////////////////////
771// 0x7a00 Entry 62 (size 16 bundles) Reserved
772	KVM_FAULT(62)
773
774    .org kvm_ia64_ivt+0x7b00
775/////////////////////////////////////////////////////////////////////
776// 0x7b00 Entry 63 (size 16 bundles) Reserved
777	KVM_FAULT(63)
778
779    .org kvm_ia64_ivt+0x7c00
780////////////////////////////////////////////////////////////////////
781// 0x7c00 Entry 64 (size 16 bundles) Reserved
782	KVM_FAULT(64)
783
784    .org kvm_ia64_ivt+0x7d00
785/////////////////////////////////////////////////////////////////////
786// 0x7d00 Entry 65 (size 16 bundles) Reserved
787	KVM_FAULT(65)
788
789    .org kvm_ia64_ivt+0x7e00
790/////////////////////////////////////////////////////////////////////
791// 0x7e00 Entry 66 (size 16 bundles) Reserved
792	KVM_FAULT(66)
793
794    .org kvm_ia64_ivt+0x7f00
795////////////////////////////////////////////////////////////////////
796// 0x7f00 Entry 67 (size 16 bundles) Reserved
797	KVM_FAULT(67)
798
799    .org kvm_ia64_ivt+0x8000
800// There is no particular reason for this code to be here, other than that
801// there happens to be space here that would go unused otherwise.  If this
802// fault ever gets "unreserved", simply moved the following code to a more
803// suitable spot...
804
805
806ENTRY(kvm_dtlb_miss_dispatch)
807	mov r19 = 2
808	KVM_SAVE_MIN_WITH_COVER_R19
809	alloc r14=ar.pfs,0,0,3,0
810	mov out0=cr.ifa
811	mov out1=r15
812	adds r3=8,r2                // set up second base pointer
813	;;
814	ssm psr.ic
815	;;
816	srlz.i     // guarantee that interruption collection is on
817	;;
818	(p15) ssm psr.i               // restore psr.i
819	addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
820	;;
821	KVM_SAVE_REST
822	KVM_SAVE_EXTRA
823	mov rp=r14
824	;;
825	adds out2=16,r12
826	br.call.sptk.many b6=kvm_page_fault
827END(kvm_dtlb_miss_dispatch)
828
829ENTRY(kvm_itlb_miss_dispatch)
830
831	KVM_SAVE_MIN_WITH_COVER_R19
832	alloc r14=ar.pfs,0,0,3,0
833	mov out0=cr.ifa
834	mov out1=r15
835	adds r3=8,r2                // set up second base pointer
836	;;
837	ssm psr.ic
838	;;
839	srlz.i   // guarantee that interruption collection is on
840	;;
841	(p15) ssm psr.i               // restore psr.i
842	addl r14=@gprel(ia64_leave_hypervisor),gp
843	;;
844	KVM_SAVE_REST
845	mov rp=r14
846	;;
847	adds out2=16,r12
848	br.call.sptk.many b6=kvm_page_fault
849END(kvm_itlb_miss_dispatch)
850
851ENTRY(kvm_dispatch_reflection)
852/*
853 * Input:
854 *  psr.ic: off
855 *  r19:    intr type (offset into ivt, see ia64_int.h)
856 *  r31:    contains saved predicates (pr)
857 */
858	KVM_SAVE_MIN_WITH_COVER_R19
859	alloc r14=ar.pfs,0,0,5,0
860	mov out0=cr.ifa
861	mov out1=cr.isr
862	mov out2=cr.iim
863	mov out3=r15
864	adds r3=8,r2                // set up second base pointer
865	;;
866	ssm psr.ic
867	;;
868	srlz.i   // guarantee that interruption collection is on
869	;;
870	(p15) ssm psr.i               // restore psr.i
871	addl r14=@gprel(ia64_leave_hypervisor),gp
872	;;
873	KVM_SAVE_REST
874	mov rp=r14
875	;;
876	adds out4=16,r12
877	br.call.sptk.many b6=reflect_interruption
878END(kvm_dispatch_reflection)
879
880ENTRY(kvm_dispatch_virtualization_fault)
881	adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
882	adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
883	;;
884	st8 [r16] = r24
885	st8 [r17] = r25
886	;;
887	KVM_SAVE_MIN_WITH_COVER_R19
888	;;
889	alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
890	mov out0=r13        //vcpu
891	adds r3=8,r2                // set up second base pointer
892	;;
893	ssm psr.ic
894	;;
895	srlz.i    // guarantee that interruption collection is on
896	;;
897	(p15) ssm psr.i               // restore psr.i
898	addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
899	;;
900	KVM_SAVE_REST
901	KVM_SAVE_EXTRA
902	mov rp=r14
903	;;
904	adds out1=16,sp         //regs
905	br.call.sptk.many b6=kvm_emulate
906END(kvm_dispatch_virtualization_fault)
907
908
909ENTRY(kvm_dispatch_interrupt)
910	KVM_SAVE_MIN_WITH_COVER_R19	// uses r31; defines r2 and r3
911	;;
912	alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
913	adds r3=8,r2		// set up second base pointer for SAVE_REST
914	;;
915	ssm psr.ic
916	;;
917	srlz.i
918	;;
919	(p15) ssm psr.i
920	addl r14=@gprel(ia64_leave_hypervisor),gp
921	;;
922	KVM_SAVE_REST
923	mov rp=r14
924	;;
925	mov out0=r13		// pass pointer to pt_regs as second arg
926	br.call.sptk.many b6=kvm_ia64_handle_irq
927END(kvm_dispatch_interrupt)
928
929GLOBAL_ENTRY(ia64_leave_nested)
930	rsm psr.i
931	;;
932	adds r21=PT(PR)+16,r12
933	;;
934	lfetch [r21],PT(CR_IPSR)-PT(PR)
935	adds r2=PT(B6)+16,r12
936	adds r3=PT(R16)+16,r12
937	;;
938	lfetch [r21]
939	ld8 r28=[r2],8		// load b6
940	adds r29=PT(R24)+16,r12
941
942	ld8.fill r16=[r3]
943	adds r3=PT(AR_CSD)-PT(R16),r3
944	adds r30=PT(AR_CCV)+16,r12
945	;;
946	ld8.fill r24=[r29]
947	ld8 r15=[r30]		// load ar.ccv
948	;;
949	ld8 r29=[r2],16		// load b7
950	ld8 r30=[r3],16		// load ar.csd
951	;;
952	ld8 r31=[r2],16		// load ar.ssd
953	ld8.fill r8=[r3],16
954	;;
955	ld8.fill r9=[r2],16
956	ld8.fill r10=[r3],PT(R17)-PT(R10)
957	;;
958	ld8.fill r11=[r2],PT(R18)-PT(R11)
959	ld8.fill r17=[r3],16
960	;;
961	ld8.fill r18=[r2],16
962	ld8.fill r19=[r3],16
963	;;
964	ld8.fill r20=[r2],16
965	ld8.fill r21=[r3],16
966	mov ar.csd=r30
967	mov ar.ssd=r31
968	;;
969	rsm psr.i | psr.ic
970	// initiate turning off of interrupt and interruption collection
971	invala			// invalidate ALAT
972	;;
973	srlz.i
974	;;
975	ld8.fill r22=[r2],24
976	ld8.fill r23=[r3],24
977	mov b6=r28
978	;;
979	ld8.fill r25=[r2],16
980	ld8.fill r26=[r3],16
981	mov b7=r29
982	;;
983	ld8.fill r27=[r2],16
984	ld8.fill r28=[r3],16
985	;;
986	ld8.fill r29=[r2],16
987	ld8.fill r30=[r3],24
988	;;
989	ld8.fill r31=[r2],PT(F9)-PT(R31)
990	adds r3=PT(F10)-PT(F6),r3
991	;;
992	ldf.fill f9=[r2],PT(F6)-PT(F9)
993	ldf.fill f10=[r3],PT(F8)-PT(F10)
994	;;
995	ldf.fill f6=[r2],PT(F7)-PT(F6)
996	;;
997	ldf.fill f7=[r2],PT(F11)-PT(F7)
998	ldf.fill f8=[r3],32
999	;;
1000	srlz.i			// ensure interruption collection is off
1001	mov ar.ccv=r15
1002	;;
1003	bsw.0	// switch back to bank 0 (no stop bit required beforehand...)
1004	;;
1005	ldf.fill f11=[r2]
1006//	mov r18=r13
1007//	mov r21=r13
1008	adds r16=PT(CR_IPSR)+16,r12
1009	adds r17=PT(CR_IIP)+16,r12
1010	;;
1011	ld8 r29=[r16],16	// load cr.ipsr
1012	ld8 r28=[r17],16	// load cr.iip
1013	;;
1014	ld8 r30=[r16],16	// load cr.ifs
1015	ld8 r25=[r17],16	// load ar.unat
1016	;;
1017	ld8 r26=[r16],16	// load ar.pfs
1018	ld8 r27=[r17],16	// load ar.rsc
1019	cmp.eq p9,p0=r0,r0
1020	// set p9 to indicate that we should restore cr.ifs
1021	;;
1022	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
1023	ld8 r23=[r17],16// load ar.bspstore (may be garbage)
1024	;;
1025	ld8 r31=[r16],16	// load predicates
1026	ld8 r22=[r17],16	// load b0
1027	;;
1028	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
1029	ld8.fill r1=[r17],16	// load r1
1030	;;
1031	ld8.fill r12=[r16],16
1032	ld8.fill r13=[r17],16
1033	;;
1034	ld8 r20=[r16],16	// ar.fpsr
1035	ld8.fill r15=[r17],16
1036	;;
1037	ld8.fill r14=[r16],16
1038	ld8.fill r2=[r17]
1039	;;
1040	ld8.fill r3=[r16]
1041	;;
1042	mov r16=ar.bsp		// get existing backing store pointer
1043	;;
1044	mov b0=r22
1045	mov ar.pfs=r26
1046	mov cr.ifs=r30
1047	mov cr.ipsr=r29
1048	mov ar.fpsr=r20
1049	mov cr.iip=r28
1050	;;
1051	mov ar.rsc=r27
1052	mov ar.unat=r25
1053	mov pr=r31,-1
1054	rfi
1055END(ia64_leave_nested)
1056
1057GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1058/*
1059 * work.need_resched etc. mustn't get changed
1060 *by this CPU before it returns to
1061 * user- or fsys-mode, hence we disable interrupts early on:
1062 */
1063	adds r2 = PT(R4)+16,r12
1064	adds r3 = PT(R5)+16,r12
1065	adds r8 = PT(EML_UNAT)+16,r12
1066	;;
1067	ld8 r8 = [r8]
1068	;;
1069	mov ar.unat=r8
1070	;;
1071	ld8.fill r4=[r2],16    //load r4
1072	ld8.fill r5=[r3],16    //load r5
1073	;;
1074	ld8.fill r6=[r2]    //load r6
1075	ld8.fill r7=[r3]    //load r7
1076	;;
1077END(ia64_leave_hypervisor_prepare)
1078//fall through
1079GLOBAL_ENTRY(ia64_leave_hypervisor)
1080	rsm psr.i
1081	;;
1082	br.call.sptk.many b0=leave_hypervisor_tail
1083	;;
1084	adds r20=PT(PR)+16,r12
1085	adds r8=PT(EML_UNAT)+16,r12
1086	;;
1087	ld8 r8=[r8]
1088	;;
1089	mov ar.unat=r8
1090	;;
1091	lfetch [r20],PT(CR_IPSR)-PT(PR)
1092	adds r2 = PT(B6)+16,r12
1093	adds r3 = PT(B7)+16,r12
1094	;;
1095	lfetch [r20]
1096	;;
1097	ld8 r24=[r2],16        /* B6 */
1098	ld8 r25=[r3],16        /* B7 */
1099	;;
1100	ld8 r26=[r2],16        /* ar_csd */
1101	ld8 r27=[r3],16        /* ar_ssd */
1102	mov b6 = r24
1103	;;
1104	ld8.fill r8=[r2],16
1105	ld8.fill r9=[r3],16
1106	mov b7 = r25
1107	;;
1108	mov ar.csd = r26
1109	mov ar.ssd = r27
1110	;;
1111	ld8.fill r10=[r2],PT(R15)-PT(R10)
1112	ld8.fill r11=[r3],PT(R14)-PT(R11)
1113	;;
1114	ld8.fill r15=[r2],PT(R16)-PT(R15)
1115	ld8.fill r14=[r3],PT(R17)-PT(R14)
1116	;;
1117	ld8.fill r16=[r2],16
1118	ld8.fill r17=[r3],16
1119	;;
1120	ld8.fill r18=[r2],16
1121	ld8.fill r19=[r3],16
1122	;;
1123	ld8.fill r20=[r2],16
1124	ld8.fill r21=[r3],16
1125	;;
1126	ld8.fill r22=[r2],16
1127	ld8.fill r23=[r3],16
1128	;;
1129	ld8.fill r24=[r2],16
1130	ld8.fill r25=[r3],16
1131	;;
1132	ld8.fill r26=[r2],16
1133	ld8.fill r27=[r3],16
1134	;;
1135	ld8.fill r28=[r2],16
1136	ld8.fill r29=[r3],16
1137	;;
1138	ld8.fill r30=[r2],PT(F6)-PT(R30)
1139	ld8.fill r31=[r3],PT(F7)-PT(R31)
1140	;;
1141	rsm psr.i | psr.ic
1142	// initiate turning off of interrupt and interruption collection
1143	invala          // invalidate ALAT
1144	;;
1145	srlz.i          // ensure interruption collection is off
1146	;;
1147	bsw.0
1148	;;
1149	adds r16 = PT(CR_IPSR)+16,r12
1150	adds r17 = PT(CR_IIP)+16,r12
1151	mov r21=r13		// get current
1152	;;
1153	ld8 r31=[r16],16    // load cr.ipsr
1154	ld8 r30=[r17],16    // load cr.iip
1155	;;
1156	ld8 r29=[r16],16    // load cr.ifs
1157	ld8 r28=[r17],16    // load ar.unat
1158	;;
1159	ld8 r27=[r16],16    // load ar.pfs
1160	ld8 r26=[r17],16    // load ar.rsc
1161	;;
1162	ld8 r25=[r16],16    // load ar.rnat
1163	ld8 r24=[r17],16    // load ar.bspstore
1164	;;
1165	ld8 r23=[r16],16    // load predicates
1166	ld8 r22=[r17],16    // load b0
1167	;;
1168	ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
1169	ld8.fill r1=[r17],16    //load r1
1170	;;
1171	ld8.fill r12=[r16],16    //load r12
1172	ld8.fill r13=[r17],PT(R2)-PT(R13)    //load r13
1173	;;
1174	ld8 r19=[r16],PT(R3)-PT(AR_FPSR)    //load ar_fpsr
1175	ld8.fill r2=[r17],PT(AR_CCV)-PT(R2)    //load r2
1176	;;
1177	ld8.fill r3=[r16]	//load r3
1178	ld8 r18=[r17]	//load ar_ccv
1179	;;
1180	mov ar.fpsr=r19
1181	mov ar.ccv=r18
1182	shr.u r18=r20,16
1183	;;
1184kvm_rbs_switch:
1185	mov r19=96
1186
1187kvm_dont_preserve_current_frame:
1188/*
1189    * To prevent leaking bits between the hypervisor and guest domain,
1190    * we must clear the stacked registers in the "invalid" partition here.
1191    * 5 registers/cycle on McKinley).
1192    */
1193#   define pRecurse	p6
1194#   define pReturn	p7
1195#   define Nregs	14
1196
1197	alloc loc0=ar.pfs,2,Nregs-2,2,0
1198	shr.u loc1=r18,9	// RNaTslots <= floor(dirtySize / (64*8))
1199	sub r19=r19,r18		// r19 = (physStackedSize + 8) - dirtySize
1200	;;
1201	mov ar.rsc=r20		// load ar.rsc to be used for "loadrs"
1202	shladd in0=loc1,3,r19
1203	mov in1=0
1204	;;
1205	TEXT_ALIGN(32)
1206kvm_rse_clear_invalid:
1207	alloc loc0=ar.pfs,2,Nregs-2,2,0
1208	cmp.lt pRecurse,p0=Nregs*8,in0
1209	// if more than Nregs regs left to clear, (re)curse
1210	add out0=-Nregs*8,in0
1211	add out1=1,in1		// increment recursion count
1212	mov loc1=0
1213	mov loc2=0
1214	;;
1215	mov loc3=0
1216	mov loc4=0
1217	mov loc5=0
1218	mov loc6=0
1219	mov loc7=0
1220(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1221	;;
1222	mov loc8=0
1223	mov loc9=0
1224	cmp.ne pReturn,p0=r0,in1
1225	// if recursion count != 0, we need to do a br.ret
1226	mov loc10=0
1227	mov loc11=0
1228(pReturn) br.ret.dptk.many b0
1229
1230#	undef pRecurse
1231#	undef pReturn
1232
1233// loadrs has already been shifted
1234	alloc r16=ar.pfs,0,0,0,0    // drop current register frame
1235	;;
1236	loadrs
1237	;;
1238	mov ar.bspstore=r24
1239	;;
1240	mov ar.unat=r28
1241	mov ar.rnat=r25
1242	mov ar.rsc=r26
1243	;;
1244	mov cr.ipsr=r31
1245	mov cr.iip=r30
1246	mov cr.ifs=r29
1247	mov ar.pfs=r27
1248	adds r18=VMM_VPD_BASE_OFFSET,r21
1249	;;
1250	ld8 r18=[r18]   //vpd
1251	adds r17=VMM_VCPU_ISR_OFFSET,r21
1252	;;
1253	ld8 r17=[r17]
1254	adds r19=VMM_VPD_VPSR_OFFSET,r18
1255	;;
1256	ld8 r19=[r19]        //vpsr
1257	mov r25=r18
1258	adds r16= VMM_VCPU_GP_OFFSET,r21
1259	;;
1260	ld8 r16= [r16] // Put gp in r24
1261	movl r24=@gprel(ia64_vmm_entry)  // calculate return address
1262	;;
1263	add  r24=r24,r16
1264	;;
1265	br.sptk.many  kvm_vps_sync_write       // call the service
1266	;;
1267END(ia64_leave_hypervisor)
1268// fall through
1269GLOBAL_ENTRY(ia64_vmm_entry)
1270/*
1271 *  must be at bank 0
1272 *  parameter:
1273 *  r17:cr.isr
1274 *  r18:vpd
1275 *  r19:vpsr
1276 *  r22:b0
1277 *  r23:predicate
1278 */
1279	mov r24=r22
1280	mov r25=r18
1281	tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
1282(p1) 	br.cond.sptk.few kvm_vps_resume_normal
1283(p2)	br.cond.sptk.many kvm_vps_resume_handler
1284	;;
1285END(ia64_vmm_entry)
1286
1287GLOBAL_ENTRY(ia64_call_vsa)
1288    .regstk 4,4,0,0
1289
1290rpsave  =   loc0
1291pfssave =   loc1
1292psrsave =   loc2
1293entry   =   loc3
1294hostret =   r24
1295
1296	alloc   pfssave=ar.pfs,4,4,0,0
1297	mov rpsave=rp
1298	adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1299	;;
1300	ld8 entry=[entry]
13011:	mov hostret=ip
1302	mov r25=in1         // copy arguments
1303	mov r26=in2
1304	mov r27=in3
1305	mov psrsave=psr
1306	;;
1307	tbit.nz p6,p0=psrsave,14    // IA64_PSR_I
1308	tbit.nz p7,p0=psrsave,13    // IA64_PSR_IC
1309	;;
1310	add hostret=2f-1b,hostret   // calculate return address
1311	add entry=entry,in0
1312	;;
1313	rsm psr.i | psr.ic
1314	;;
1315	srlz.i
1316	mov b6=entry
1317	br.cond.sptk b6         // call the service
13182:
1319// Architectural sequence for enabling interrupts if necessary
1320(p7)    ssm psr.ic
1321	;;
1322(p7)    srlz.i
1323	;;
1324(p6)    ssm psr.i
1325	;;
1326	mov rp=rpsave
1327	mov ar.pfs=pfssave
1328	mov r8=r31
1329	;;
1330	srlz.d
1331	br.ret.sptk rp
1332
1333END(ia64_call_vsa)
1334
1335#define  INIT_BSPSTORE  ((4<<30)-(12<<20)-0x100)
1336
1337GLOBAL_ENTRY(vmm_reset_entry)
1338	//set up ipsr, iip, vpd.vpsr, dcr
1339	// For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1340	// For DCR: all bits 0
1341	bsw.0
1342	;;
1343	mov r21 =r13
1344	adds r14=-VMM_PT_REGS_SIZE, r12
1345	;;
1346	movl r6=0x501008826000      // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1347	movl r10=0x8000000000000000
1348	adds r16=PT(CR_IIP), r14
1349	adds r20=PT(R1), r14
1350	;;
1351	rsm psr.ic | psr.i
1352	;;
1353	srlz.i
1354	;;
1355	mov ar.rsc = 0
1356	;;
1357	flushrs
1358	;;
1359	mov ar.bspstore = 0
1360	// clear BSPSTORE
1361	;;
1362	mov cr.ipsr=r6
1363	mov cr.ifs=r10
1364	ld8 r4 = [r16] // Set init iip for first run.
1365	ld8 r1 = [r20]
1366	;;
1367	mov cr.iip=r4
1368	adds r16=VMM_VPD_BASE_OFFSET,r13
1369	;;
1370	ld8 r18=[r16]
1371	;;
1372	adds r19=VMM_VPD_VPSR_OFFSET,r18
1373	;;
1374	ld8 r19=[r19]
1375	mov r17=r0
1376	mov r22=r0
1377	mov r23=r0
1378	br.cond.sptk ia64_vmm_entry
1379	br.ret.sptk  b0
1380END(vmm_reset_entry)
1381