• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/ia64/xen/
1/******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 *                    VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20 *
21 */
22
23#include <linux/console.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/pm.h>
27#include <linux/unistd.h>
28
29#include <asm/xen/hypervisor.h>
30#include <asm/xen/xencomm.h>
31#include <asm/xen/privop.h>
32
33#include "irq_xen.h"
34#include "time.h"
35
36/***************************************************************************
37 * general info
38 */
39static struct pv_info xen_info __initdata = {
40	.kernel_rpl = 2,	/* or 1: determin at runtime */
41	.paravirt_enabled = 1,
42	.name = "Xen/ia64",
43};
44
45#define IA64_RSC_PL_SHIFT	2
46#define IA64_RSC_PL_BIT_SIZE	2
47#define IA64_RSC_PL_MASK	\
48	(((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
49
50static void __init
51xen_info_init(void)
52{
53	/* Xenified Linux/ia64 may run on pl = 1 or 2.
54	 * determin at run time. */
55	unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
56	unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
57	xen_info.kernel_rpl = rpl;
58}
59
60/***************************************************************************
61 * pv_init_ops
62 * initialization hooks.
63 */
64
65static void
66xen_panic_hypercall(struct unw_frame_info *info, void *arg)
67{
68	current->thread.ksp = (__u64)info->sw - 16;
69	HYPERVISOR_shutdown(SHUTDOWN_crash);
70	/* we're never actually going to get here... */
71}
72
73static int
74xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
75{
76	unw_init_running(xen_panic_hypercall, NULL);
77	/* we're never actually going to get here... */
78	return NOTIFY_DONE;
79}
80
81static struct notifier_block xen_panic_block = {
82	xen_panic_event, NULL, 0 /* try to go last */
83};
84
85static void xen_pm_power_off(void)
86{
87	local_irq_disable();
88	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
89}
90
91static void __init
92xen_banner(void)
93{
94	printk(KERN_INFO
95	       "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
96	       "flags=0x%x\n",
97	       xen_info.kernel_rpl,
98	       HYPERVISOR_shared_info->arch.start_info_pfn,
99	       xen_start_info->nr_pages, xen_start_info->flags);
100}
101
102static int __init
103xen_reserve_memory(struct rsvd_region *region)
104{
105	region->start = (unsigned long)__va(
106		(HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
107	region->end   = region->start + PAGE_SIZE;
108	return 1;
109}
110
111static void __init
112xen_arch_setup_early(void)
113{
114	struct shared_info *s;
115	BUG_ON(!xen_pv_domain());
116
117	s = HYPERVISOR_shared_info;
118	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
119
120	/* Must be done before any hypercall.  */
121	xencomm_initialize();
122
123	xen_setup_features();
124	/* Register a call for panic conditions. */
125	atomic_notifier_chain_register(&panic_notifier_list,
126				       &xen_panic_block);
127	pm_power_off = xen_pm_power_off;
128
129	xen_ia64_enable_opt_feature();
130}
131
132static void __init
133xen_arch_setup_console(char **cmdline_p)
134{
135	add_preferred_console("xenboot", 0, NULL);
136	add_preferred_console("tty", 0, NULL);
137	/* use hvc_xen */
138	add_preferred_console("hvc", 0, NULL);
139
140#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
141	conswitchp = NULL;
142#endif
143}
144
145static int __init
146xen_arch_setup_nomca(void)
147{
148	return 1;
149}
150
151static void __init
152xen_post_smp_prepare_boot_cpu(void)
153{
154	xen_setup_vcpu_info_placement();
155}
156
157#ifdef ASM_SUPPORTED
158static unsigned long __init_or_module
159xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
160#endif
161static void __init
162xen_patch_branch(unsigned long tag, unsigned long type);
163
164static const struct pv_init_ops xen_init_ops __initconst = {
165	.banner = xen_banner,
166
167	.reserve_memory = xen_reserve_memory,
168
169	.arch_setup_early = xen_arch_setup_early,
170	.arch_setup_console = xen_arch_setup_console,
171	.arch_setup_nomca = xen_arch_setup_nomca,
172
173	.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
174#ifdef ASM_SUPPORTED
175	.patch_bundle = xen_patch_bundle,
176#endif
177	.patch_branch = xen_patch_branch,
178};
179
180/***************************************************************************
181 * pv_fsys_data
182 * addresses for fsys
183 */
184
185extern unsigned long xen_fsyscall_table[NR_syscalls];
186extern char xen_fsys_bubble_down[];
187struct pv_fsys_data xen_fsys_data __initdata = {
188	.fsyscall_table = (unsigned long *)xen_fsyscall_table,
189	.fsys_bubble_down = (void *)xen_fsys_bubble_down,
190};
191
192/***************************************************************************
193 * pv_patchdata
194 * patchdata addresses
195 */
196
197#define DECLARE(name)							\
198	extern unsigned long __xen_start_gate_##name##_patchlist[];	\
199	extern unsigned long __xen_end_gate_##name##_patchlist[]
200
201DECLARE(fsyscall);
202DECLARE(brl_fsys_bubble_down);
203DECLARE(vtop);
204DECLARE(mckinley_e9);
205
206extern unsigned long __xen_start_gate_section[];
207
208#define ASSIGN(name)							\
209	.start_##name##_patchlist =					\
210		(unsigned long)__xen_start_gate_##name##_patchlist,	\
211	.end_##name##_patchlist =					\
212		(unsigned long)__xen_end_gate_##name##_patchlist
213
214static struct pv_patchdata xen_patchdata __initdata = {
215	ASSIGN(fsyscall),
216	ASSIGN(brl_fsys_bubble_down),
217	ASSIGN(vtop),
218	ASSIGN(mckinley_e9),
219
220	.gate_section = (void*)__xen_start_gate_section,
221};
222
223/***************************************************************************
224 * pv_cpu_ops
225 * intrinsics hooks.
226 */
227
228#ifndef ASM_SUPPORTED
229static void
230xen_set_itm_with_offset(unsigned long val)
231{
232	/* ia64_cpu_local_tick() calls this with interrupt enabled. */
233	/* WARN_ON(!irqs_disabled()); */
234	xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
235}
236
237static unsigned long
238xen_get_itm_with_offset(void)
239{
240	/* unused at this moment */
241	printk(KERN_DEBUG "%s is called.\n", __func__);
242
243	WARN_ON(!irqs_disabled());
244	return ia64_native_getreg(_IA64_REG_CR_ITM) +
245		XEN_MAPPEDREGS->itc_offset;
246}
247
248/* ia64_set_itc() is only called by
249 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
250 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
251 */
252static void
253xen_set_itc(unsigned long val)
254{
255	unsigned long mitc;
256
257	WARN_ON(!irqs_disabled());
258	mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
259	XEN_MAPPEDREGS->itc_offset = val - mitc;
260	XEN_MAPPEDREGS->itc_last = val;
261}
262
263static unsigned long
264xen_get_itc(void)
265{
266	unsigned long res;
267	unsigned long itc_offset;
268	unsigned long itc_last;
269	unsigned long ret_itc_last;
270
271	itc_offset = XEN_MAPPEDREGS->itc_offset;
272	do {
273		itc_last = XEN_MAPPEDREGS->itc_last;
274		res = ia64_native_getreg(_IA64_REG_AR_ITC);
275		res += itc_offset;
276		if (itc_last >= res)
277			res = itc_last + 1;
278		ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
279				       itc_last, res);
280	} while (unlikely(ret_itc_last != itc_last));
281	return res;
282
283}
284
285static void xen_setreg(int regnum, unsigned long val)
286{
287	switch (regnum) {
288	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
289		xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
290		break;
291	case _IA64_REG_AR_ITC:
292		xen_set_itc(val);
293		break;
294	case _IA64_REG_CR_TPR:
295		xen_set_tpr(val);
296		break;
297	case _IA64_REG_CR_ITM:
298		xen_set_itm_with_offset(val);
299		break;
300	case _IA64_REG_CR_EOI:
301		xen_eoi(val);
302		break;
303	default:
304		ia64_native_setreg_func(regnum, val);
305		break;
306	}
307}
308
309static unsigned long xen_getreg(int regnum)
310{
311	unsigned long res;
312
313	switch (regnum) {
314	case _IA64_REG_PSR:
315		res = xen_get_psr();
316		break;
317	case _IA64_REG_AR_ITC:
318		res = xen_get_itc();
319		break;
320	case _IA64_REG_CR_ITM:
321		res = xen_get_itm_with_offset();
322		break;
323	case _IA64_REG_CR_IVR:
324		res = xen_get_ivr();
325		break;
326	case _IA64_REG_CR_TPR:
327		res = xen_get_tpr();
328		break;
329	default:
330		res = ia64_native_getreg_func(regnum);
331		break;
332	}
333	return res;
334}
335
336/* turning on interrupts is a bit more complicated.. write to the
337 * memory-mapped virtual psr.i bit first (to avoid race condition),
338 * then if any interrupts were pending, we have to execute a hyperprivop
339 * to ensure the pending interrupt gets delivered; else we're done! */
340static void
341xen_ssm_i(void)
342{
343	int old = xen_get_virtual_psr_i();
344	xen_set_virtual_psr_i(1);
345	barrier();
346	if (!old && xen_get_virtual_pend())
347		xen_hyper_ssm_i();
348}
349
350/* turning off interrupts can be paravirtualized simply by writing
351 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
352static void
353xen_rsm_i(void)
354{
355	xen_set_virtual_psr_i(0);
356	barrier();
357}
358
359static unsigned long
360xen_get_psr_i(void)
361{
362	return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
363}
364
365static void
366xen_intrin_local_irq_restore(unsigned long mask)
367{
368	if (mask & IA64_PSR_I)
369		xen_ssm_i();
370	else
371		xen_rsm_i();
372}
373#else
374#define __DEFINE_FUNC(name, code)					\
375	extern const char xen_ ## name ## _direct_start[];		\
376	extern const char xen_ ## name ## _direct_end[];		\
377	asm (".align 32\n"						\
378	     ".proc xen_" #name "\n"					\
379	     "xen_" #name ":\n"						\
380	     "xen_" #name "_direct_start:\n"				\
381	     code							\
382	     "xen_" #name "_direct_end:\n"				\
383	     "br.cond.sptk.many b6\n"					\
384	     ".endp xen_" #name "\n")
385
386#define DEFINE_VOID_FUNC0(name, code)		\
387	extern void				\
388	xen_ ## name (void);			\
389	__DEFINE_FUNC(name, code)
390
391#define DEFINE_VOID_FUNC1(name, code)		\
392	extern void				\
393	xen_ ## name (unsigned long arg);	\
394	__DEFINE_FUNC(name, code)
395
396#define DEFINE_VOID_FUNC1_VOID(name, code)	\
397	extern void				\
398	xen_ ## name (void *arg);		\
399	__DEFINE_FUNC(name, code)
400
401#define DEFINE_VOID_FUNC2(name, code)		\
402	extern void				\
403	xen_ ## name (unsigned long arg0,	\
404		      unsigned long arg1);	\
405	__DEFINE_FUNC(name, code)
406
407#define DEFINE_FUNC0(name, code)		\
408	extern unsigned long			\
409	xen_ ## name (void);			\
410	__DEFINE_FUNC(name, code)
411
412#define DEFINE_FUNC1(name, type, code)		\
413	extern unsigned long			\
414	xen_ ## name (type arg);		\
415	__DEFINE_FUNC(name, code)
416
417#define XEN_PSR_I_ADDR_ADDR     (XSI_BASE + XSI_PSR_I_ADDR_OFS)
418
419/*
420 * static void xen_set_itm_with_offset(unsigned long val)
421 *        xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
422 */
423/* 2 bundles */
424DEFINE_VOID_FUNC1(set_itm_with_offset,
425		  "mov r2 = " __stringify(XSI_BASE) " + "
426		  __stringify(XSI_ITC_OFFSET_OFS) "\n"
427		  ";;\n"
428		  "ld8 r3 = [r2]\n"
429		  ";;\n"
430		  "sub r8 = r8, r3\n"
431		  "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
432
433/*
434 * static unsigned long xen_get_itm_with_offset(void)
435 *    return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
436 */
437/* 2 bundles */
438DEFINE_FUNC0(get_itm_with_offset,
439	     "mov r2 = " __stringify(XSI_BASE) " + "
440	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
441	     ";;\n"
442	     "ld8 r3 = [r2]\n"
443	     "mov r8 = cr.itm\n"
444	     ";;\n"
445	     "add r8 = r8, r2\n");
446
447/*
448 * static void xen_set_itc(unsigned long val)
449 *	unsigned long mitc;
450 *
451 *	WARN_ON(!irqs_disabled());
452 *	mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
453 *	XEN_MAPPEDREGS->itc_offset = val - mitc;
454 *	XEN_MAPPEDREGS->itc_last = val;
455 */
456/* 2 bundles */
457DEFINE_VOID_FUNC1(set_itc,
458		  "mov r2 = " __stringify(XSI_BASE) " + "
459		  __stringify(XSI_ITC_LAST_OFS) "\n"
460		  "mov r3 = ar.itc\n"
461		  ";;\n"
462		  "sub r3 = r8, r3\n"
463		  "st8 [r2] = r8, "
464		  __stringify(XSI_ITC_LAST_OFS) " - "
465		  __stringify(XSI_ITC_OFFSET_OFS) "\n"
466		  ";;\n"
467		  "st8 [r2] = r3\n");
468
469/*
470 * static unsigned long xen_get_itc(void)
471 *	unsigned long res;
472 *	unsigned long itc_offset;
473 *	unsigned long itc_last;
474 *	unsigned long ret_itc_last;
475 *
476 *	itc_offset = XEN_MAPPEDREGS->itc_offset;
477 *	do {
478 *		itc_last = XEN_MAPPEDREGS->itc_last;
479 *		res = ia64_native_getreg(_IA64_REG_AR_ITC);
480 *		res += itc_offset;
481 *		if (itc_last >= res)
482 *			res = itc_last + 1;
483 *		ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
484 *				       itc_last, res);
485 *	} while (unlikely(ret_itc_last != itc_last));
486 *	return res;
487 */
488/* 5 bundles */
489DEFINE_FUNC0(get_itc,
490	     "mov r2 = " __stringify(XSI_BASE) " + "
491	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
492	     ";;\n"
493	     "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
494	     __stringify(XSI_ITC_OFFSET_OFS) "\n"
495					/* r9 = itc_offset */
496					/* r2 = XSI_ITC_OFFSET */
497	     "888:\n"
498	     "mov r8 = ar.itc\n"	/* res = ar.itc */
499	     ";;\n"
500	     "ld8 r3 = [r2]\n"		/* r3 = itc_last */
501	     "add r8 = r8, r9\n"	/* res = ar.itc + itc_offset */
502	     ";;\n"
503	     "cmp.gtu p6, p0 = r3, r8\n"
504	     ";;\n"
505	     "(p6) add r8 = 1, r3\n"	/* if (itc_last > res) itc_last + 1 */
506	     ";;\n"
507	     "mov ar.ccv = r8\n"
508	     ";;\n"
509	     "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
510	     ";;\n"
511	     "cmp.ne p6, p0 = r10, r3\n"
512	     "(p6) hint @pause\n"
513	     "(p6) br.cond.spnt 888b\n");
514
515DEFINE_VOID_FUNC1_VOID(fc,
516		       "break " __stringify(HYPERPRIVOP_FC) "\n");
517
518/*
519 * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
520 * masked_addr = *psr_i_addr_addr
521 * pending_intr_addr = masked_addr - 1
522 * if (val & IA64_PSR_I) {
523 *   masked = *masked_addr
524 *   *masked_addr = 0:xen_set_virtual_psr_i(1)
525 *   compiler barrier
526 *   if (masked) {
527 *      uint8_t pending = *pending_intr_addr;
528 *      if (pending)
529 *              XEN_HYPER_SSM_I
530 *   }
531 * } else {
532 *   *masked_addr = 1:xen_set_virtual_psr_i(0)
533 * }
534 */
535/* 6 bundles */
536DEFINE_VOID_FUNC1(intrin_local_irq_restore,
537		  /* r8 = input value: 0 or IA64_PSR_I
538		   * p6 =  (flags & IA64_PSR_I)
539		   *    = if clause
540		   * p7 = !(flags & IA64_PSR_I)
541		   *    = else clause
542		   */
543		  "cmp.ne p6, p7 = r8, r0\n"
544		  "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
545		  ";;\n"
546		  /* r9 = XEN_PSR_I_ADDR */
547		  "ld8 r9 = [r9]\n"
548		  ";;\n"
549
550		  /* r10 = masked previous value */
551		  "(p6)	ld1.acq r10 = [r9]\n"
552		  ";;\n"
553
554		  /* p8 = !masked interrupt masked previously? */
555		  "(p6)	cmp.ne.unc p8, p0 = r10, r0\n"
556
557		  /* p7 = else clause */
558		  "(p7)	mov r11 = 1\n"
559		  ";;\n"
560		  /* masked = 1 */
561		  "(p7)	st1.rel [r9] = r11\n"
562
563		  /* p6 = if clause */
564		  /* masked = 0
565		   * r9 = masked_addr - 1
566		   *    = pending_intr_addr
567		   */
568		  "(p8)	st1.rel [r9] = r0, -1\n"
569		  ";;\n"
570		  /* r8 = pending_intr */
571		  "(p8)	ld1.acq r11 = [r9]\n"
572		  ";;\n"
573		  /* p9 = interrupt pending? */
574		  "(p8)	cmp.ne.unc p9, p10 = r11, r0\n"
575		  ";;\n"
576		  "(p10) mf\n"
577		  /* issue hypercall to trigger interrupt */
578		  "(p9)	break " __stringify(HYPERPRIVOP_SSM_I) "\n");
579
580DEFINE_VOID_FUNC2(ptcga,
581		  "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
582DEFINE_VOID_FUNC2(set_rr,
583		  "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
584
585/*
586 * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
587 * tmp = *tmp
588 * tmp = *tmp;
589 * psr_i = tmp? 0: IA64_PSR_I;
590 */
591/* 4 bundles */
592DEFINE_FUNC0(get_psr_i,
593	     "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
594	     ";;\n"
595	     "ld8 r9 = [r9]\n"			/* r9 = XEN_PSR_I_ADDR */
596	     "mov r8 = 0\n"			/* psr_i = 0 */
597	     ";;\n"
598	     "ld1.acq r9 = [r9]\n"		/* r9 = XEN_PSR_I */
599	     ";;\n"
600	     "cmp.eq.unc p6, p0 = r9, r0\n"	/* p6 = (XEN_PSR_I != 0) */
601	     ";;\n"
602	     "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
603
604DEFINE_FUNC1(thash, unsigned long,
605	     "break " __stringify(HYPERPRIVOP_THASH) "\n");
606DEFINE_FUNC1(get_cpuid, int,
607	     "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
608DEFINE_FUNC1(get_pmd, int,
609	     "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
610DEFINE_FUNC1(get_rr, unsigned long,
611	     "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
612
613/*
614 * void xen_privop_ssm_i(void)
615 *
616 * int masked = !xen_get_virtual_psr_i();
617 *	// masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
618 * xen_set_virtual_psr_i(1)
619 *	// *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
620 * // compiler barrier
621 * if (masked) {
622 *	uint8_t* pend_int_addr =
623 *		(uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
624 *	uint8_t pending = *pend_int_addr;
625 *	if (pending)
626 *		XEN_HYPER_SSM_I
627 * }
628 */
629/* 4 bundles */
630DEFINE_VOID_FUNC0(ssm_i,
631		  "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
632		  ";;\n"
633		  "ld8 r8 = [r8]\n"		/* r8 = XEN_PSR_I_ADDR */
634		  ";;\n"
635		  "ld1.acq r9 = [r8]\n"		/* r9 = XEN_PSR_I */
636		  ";;\n"
637		  "st1.rel [r8] = r0, -1\n"	/* psr_i = 0. enable interrupt
638						 * r8 = XEN_PSR_I_ADDR - 1
639						 *    = pend_int_addr
640						 */
641		  "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
642						 * previously interrupt
643						 * masked?
644						 */
645		  ";;\n"
646		  "(p6) ld1.acq r8 = [r8]\n"	/* r8 = xen_pend_int */
647		  ";;\n"
648		  "(p6) cmp.eq.unc p6, p7 = r8, r0\n"	/*interrupt pending?*/
649		  ";;\n"
650		  /* issue hypercall to get interrupt */
651		  "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
652		  ";;\n");
653
654/*
655 * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
656 *		   = XEN_PSR_I_ADDR_ADDR;
657 * psr_i_addr = *psr_i_addr_addr;
658 * *psr_i_addr = 1;
659 */
660/* 2 bundles */
661DEFINE_VOID_FUNC0(rsm_i,
662		  "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
663						/* r8 = XEN_PSR_I_ADDR */
664		  "mov r9 = 1\n"
665		  ";;\n"
666		  "ld8 r8 = [r8]\n"		/* r8 = XEN_PSR_I */
667		  ";;\n"
668		  "st1.rel [r8] = r9\n");	/* XEN_PSR_I = 1 */
669
670extern void
671xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
672		   unsigned long val2, unsigned long val3,
673		   unsigned long val4);
674__DEFINE_FUNC(set_rr0_to_rr4,
675	      "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
676
677
678extern unsigned long xen_getreg(int regnum);
679#define __DEFINE_GET_REG(id, privop)					\
680	"mov r2 = " __stringify(_IA64_REG_ ## id) "\n"			\
681	";;\n"								\
682	"cmp.eq p6, p0 = r2, r8\n"					\
683	";;\n"								\
684	"(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n"	\
685	"(p6) br.cond.sptk.many b6\n"					\
686	";;\n"
687
688__DEFINE_FUNC(getreg,
689	      __DEFINE_GET_REG(PSR, PSR)
690
691	      /* get_itc */
692	      "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
693	      ";;\n"
694	      "cmp.eq p6, p0 = r2, r8\n"
695	      ";;\n"
696	      "(p6) br.cond.spnt xen_get_itc\n"
697	      ";;\n"
698
699	      /* get itm */
700	      "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
701	      ";;\n"
702	      "cmp.eq p6, p0 = r2, r8\n"
703	      ";;\n"
704	      "(p6) br.cond.spnt xen_get_itm_with_offset\n"
705	      ";;\n"
706
707	      __DEFINE_GET_REG(CR_IVR, IVR)
708	      __DEFINE_GET_REG(CR_TPR, TPR)
709
710	      /* fall back */
711	      "movl r2 = ia64_native_getreg_func\n"
712	      ";;\n"
713	      "mov b7 = r2\n"
714	      ";;\n"
715	      "br.cond.sptk.many b7\n");
716
717extern void xen_setreg(int regnum, unsigned long val);
718#define __DEFINE_SET_REG(id, privop)					\
719	"mov r2 = " __stringify(_IA64_REG_ ## id) "\n"			\
720	";;\n"								\
721	"cmp.eq p6, p0 = r2, r9\n"					\
722	";;\n"								\
723	"(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n"		\
724	"(p6) br.cond.sptk.many b6\n"					\
725	";;\n"
726
727__DEFINE_FUNC(setreg,
728	      /* kr0 .. kr 7*/
729	      /*
730	       * if (_IA64_REG_AR_KR0 <= regnum &&
731	       *     regnum <= _IA64_REG_AR_KR7) {
732	       *     register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
733	       *     register __val asm ("r9") = val
734	       *    "break HYPERPRIVOP_SET_KR"
735	       * }
736	       */
737	      "mov r17 = r9\n"
738	      "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
739	      ";;\n"
740	      "cmp.ge p6, p0 = r9, r2\n"
741	      "sub r17 = r17, r2\n"
742	      ";;\n"
743	      "(p6) cmp.ge.unc p7, p0 = "
744	      __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
745	      ", r17\n"
746	      ";;\n"
747	      "(p7) mov r9 = r8\n"
748	      ";;\n"
749	      "(p7) mov r8 = r17\n"
750	      "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
751
752	      /* set itm */
753	      "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
754	      ";;\n"
755	      "cmp.eq p6, p0 = r2, r8\n"
756	      ";;\n"
757	      "(p6) br.cond.spnt xen_set_itm_with_offset\n"
758
759	      /* set itc */
760	      "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
761	      ";;\n"
762	      "cmp.eq p6, p0 = r2, r8\n"
763	      ";;\n"
764	      "(p6) br.cond.spnt xen_set_itc\n"
765
766	      __DEFINE_SET_REG(CR_TPR, SET_TPR)
767	      __DEFINE_SET_REG(CR_EOI, EOI)
768
769	      /* fall back */
770	      "movl r2 = ia64_native_setreg_func\n"
771	      ";;\n"
772	      "mov b7 = r2\n"
773	      ";;\n"
774	      "br.cond.sptk.many b7\n");
775#endif
776
777static const struct pv_cpu_ops xen_cpu_ops __initconst = {
778	.fc		= xen_fc,
779	.thash		= xen_thash,
780	.get_cpuid	= xen_get_cpuid,
781	.get_pmd	= xen_get_pmd,
782	.getreg		= xen_getreg,
783	.setreg		= xen_setreg,
784	.ptcga		= xen_ptcga,
785	.get_rr		= xen_get_rr,
786	.set_rr		= xen_set_rr,
787	.set_rr0_to_rr4	= xen_set_rr0_to_rr4,
788	.ssm_i		= xen_ssm_i,
789	.rsm_i		= xen_rsm_i,
790	.get_psr_i	= xen_get_psr_i,
791	.intrin_local_irq_restore
792			= xen_intrin_local_irq_restore,
793};
794
795/******************************************************************************
796 * replacement of hand written assembly codes.
797 */
798
799extern char xen_switch_to;
800extern char xen_leave_syscall;
801extern char xen_work_processed_syscall;
802extern char xen_leave_kernel;
803
804const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
805	.switch_to		= (unsigned long)&xen_switch_to,
806	.leave_syscall		= (unsigned long)&xen_leave_syscall,
807	.work_processed_syscall	= (unsigned long)&xen_work_processed_syscall,
808	.leave_kernel		= (unsigned long)&xen_leave_kernel,
809};
810
811/***************************************************************************
812 * pv_iosapic_ops
813 * iosapic read/write hooks.
814 */
815static void
816xen_pcat_compat_init(void)
817{
818	/* nothing */
819}
820
821static struct irq_chip*
822xen_iosapic_get_irq_chip(unsigned long trigger)
823{
824	return NULL;
825}
826
827static unsigned int
828xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
829{
830	struct physdev_apic apic_op;
831	int ret;
832
833	apic_op.apic_physbase = (unsigned long)iosapic -
834					__IA64_UNCACHED_OFFSET;
835	apic_op.reg = reg;
836	ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
837	if (ret)
838		return ret;
839	return apic_op.value;
840}
841
842static void
843xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
844{
845	struct physdev_apic apic_op;
846
847	apic_op.apic_physbase = (unsigned long)iosapic -
848					__IA64_UNCACHED_OFFSET;
849	apic_op.reg = reg;
850	apic_op.value = val;
851	HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
852}
853
854static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
855	.pcat_compat_init = xen_pcat_compat_init,
856	.__get_irq_chip = xen_iosapic_get_irq_chip,
857
858	.__read = xen_iosapic_read,
859	.__write = xen_iosapic_write,
860};
861
862/***************************************************************************
863 * pv_ops initialization
864 */
865
866void __init
867xen_setup_pv_ops(void)
868{
869	xen_info_init();
870	pv_info = xen_info;
871	pv_init_ops = xen_init_ops;
872	pv_fsys_data = xen_fsys_data;
873	pv_patchdata = xen_patchdata;
874	pv_cpu_ops = xen_cpu_ops;
875	pv_iosapic_ops = xen_iosapic_ops;
876	pv_irq_ops = xen_irq_ops;
877	pv_time_ops = xen_time_ops;
878
879	paravirt_cpu_asm_init(&xen_cpu_asm_switch);
880}
881
882#ifdef ASM_SUPPORTED
883/***************************************************************************
884 * binary pacthing
885 * pv_init_ops.patch_bundle
886 */
887
888#define DEFINE_FUNC_GETREG(name, privop)				\
889	DEFINE_FUNC0(get_ ## name,					\
890		     "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
891
892DEFINE_FUNC_GETREG(psr, PSR);
893DEFINE_FUNC_GETREG(eflag, EFLAG);
894DEFINE_FUNC_GETREG(ivr, IVR);
895DEFINE_FUNC_GETREG(tpr, TPR);
896
897#define DEFINE_FUNC_SET_KR(n)						\
898	DEFINE_VOID_FUNC0(set_kr ## n,					\
899			  ";;\n"					\
900			  "mov r9 = r8\n"				\
901			  "mov r8 = " #n "\n"				\
902			  "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
903
904DEFINE_FUNC_SET_KR(0);
905DEFINE_FUNC_SET_KR(1);
906DEFINE_FUNC_SET_KR(2);
907DEFINE_FUNC_SET_KR(3);
908DEFINE_FUNC_SET_KR(4);
909DEFINE_FUNC_SET_KR(5);
910DEFINE_FUNC_SET_KR(6);
911DEFINE_FUNC_SET_KR(7);
912
913#define __DEFINE_FUNC_SETREG(name, privop)				\
914	DEFINE_VOID_FUNC0(name,						\
915			  "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
916
917#define DEFINE_FUNC_SETREG(name, privop)			\
918	__DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
919
920DEFINE_FUNC_SETREG(eflag, EFLAG);
921DEFINE_FUNC_SETREG(tpr, TPR);
922__DEFINE_FUNC_SETREG(eoi, EOI);
923
924extern const char xen_check_events[];
925extern const char __xen_intrin_local_irq_restore_direct_start[];
926extern const char __xen_intrin_local_irq_restore_direct_end[];
927extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
928
929asm (
930	".align 32\n"
931	".proc xen_check_events\n"
932	"xen_check_events:\n"
933	/* masked = 0
934	 * r9 = masked_addr - 1
935	 *    = pending_intr_addr
936	 */
937	"st1.rel [r9] = r0, -1\n"
938	";;\n"
939	/* r8 = pending_intr */
940	"ld1.acq r11 = [r9]\n"
941	";;\n"
942	/* p9 = interrupt pending? */
943	"cmp.ne p9, p10 = r11, r0\n"
944	";;\n"
945	"(p10) mf\n"
946	/* issue hypercall to trigger interrupt */
947	"(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
948	"br.cond.sptk.many b6\n"
949	".endp xen_check_events\n"
950	"\n"
951	".align 32\n"
952	".proc __xen_intrin_local_irq_restore_direct\n"
953	"__xen_intrin_local_irq_restore_direct:\n"
954	"__xen_intrin_local_irq_restore_direct_start:\n"
955	"1:\n"
956	"{\n"
957	"cmp.ne p6, p7 = r8, r0\n"
958	"mov r17 = ip\n" /* get ip to calc return address */
959	"mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
960	";;\n"
961	"}\n"
962	"{\n"
963	/* r9 = XEN_PSR_I_ADDR */
964	"ld8 r9 = [r9]\n"
965	";;\n"
966	/* r10 = masked previous value */
967	"(p6) ld1.acq r10 = [r9]\n"
968	"adds r17 =  1f - 1b, r17\n" /* calculate return address */
969	";;\n"
970	"}\n"
971	"{\n"
972	/* p8 = !masked interrupt masked previously? */
973	"(p6) cmp.ne.unc p8, p0 = r10, r0\n"
974	"\n"
975	/* p7 = else clause */
976	"(p7) mov r11 = 1\n"
977	";;\n"
978	"(p8) mov b6 = r17\n" /* set return address */
979	"}\n"
980	"{\n"
981	/* masked = 1 */
982	"(p7) st1.rel [r9] = r11\n"
983	"\n"
984	"[99:]\n"
985	"(p8) brl.cond.dptk.few xen_check_events\n"
986	"}\n"
987	/* pv calling stub is 5 bundles. fill nop to adjust return address */
988	"{\n"
989	"nop 0\n"
990	"nop 0\n"
991	"nop 0\n"
992	"}\n"
993	"1:\n"
994	"__xen_intrin_local_irq_restore_direct_end:\n"
995	".endp __xen_intrin_local_irq_restore_direct\n"
996	"\n"
997	".align 8\n"
998	"__xen_intrin_local_irq_restore_direct_reloc:\n"
999	"data8 99b\n"
1000);
1001
1002static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1003__initdata_or_module =
1004{
1005#define XEN_PATCH_BUNDLE_ELEM(name, type)		\
1006	{						\
1007		(void*)xen_ ## name ## _direct_start,	\
1008		(void*)xen_ ## name ## _direct_end,	\
1009		PARAVIRT_PATCH_TYPE_ ## type,		\
1010	}
1011
1012	XEN_PATCH_BUNDLE_ELEM(fc, FC),
1013	XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1014	XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1015	XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1016	XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1017	XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1018	XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1019	XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1020	XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1021	XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1022	XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1023	{
1024		(void*)__xen_intrin_local_irq_restore_direct_start,
1025		(void*)__xen_intrin_local_irq_restore_direct_end,
1026		PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1027	},
1028
1029#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg)			\
1030	{							\
1031		xen_get_ ## name ## _direct_start,		\
1032		xen_get_ ## name ## _direct_end,		\
1033		PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1034	}
1035
1036	XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1037	XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1038
1039	XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1040	XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1041
1042	XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1043	XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1044
1045
1046#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)		\
1047	{							\
1048		xen_ ## name ## _direct_start,			\
1049		xen_ ## name ## _direct_end,			\
1050		PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1051	}
1052
1053#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)			\
1054	__XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1055
1056	XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1057	XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1058	XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1059	XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1060	XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1061	XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1062	XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1063	XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1064
1065	XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1066	XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1067	__XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1068
1069	XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1070	XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1071};
1072
1073static unsigned long __init_or_module
1074xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1075{
1076	const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1077		sizeof(xen_patch_bundle_elems[0]);
1078	unsigned long used;
1079	const struct paravirt_patch_bundle_elem *found;
1080
1081	used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1082					     xen_patch_bundle_elems, nelems,
1083					     &found);
1084
1085	if (found == NULL)
1086		/* fallback */
1087		return ia64_native_patch_bundle(sbundle, ebundle, type);
1088	if (used == 0)
1089		return used;
1090
1091	/* relocation */
1092	switch (type) {
1093	case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1094		unsigned long reloc =
1095			__xen_intrin_local_irq_restore_direct_reloc;
1096		unsigned long reloc_offset = reloc - (unsigned long)
1097			__xen_intrin_local_irq_restore_direct_start;
1098		unsigned long tag = (unsigned long)sbundle + reloc_offset;
1099		paravirt_patch_reloc_brl(tag, xen_check_events);
1100		break;
1101	}
1102	default:
1103		/* nothing */
1104		break;
1105	}
1106	return used;
1107}
1108#endif /* ASM_SUPPOTED */
1109
1110const struct paravirt_patch_branch_target xen_branch_target[]
1111__initconst = {
1112#define PARAVIRT_BR_TARGET(name, type)			\
1113	{						\
1114		&xen_ ## name,				\
1115		PARAVIRT_PATCH_TYPE_BR_ ## type,	\
1116	}
1117	PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1118	PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1119	PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1120	PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1121};
1122
1123static void __init
1124xen_patch_branch(unsigned long tag, unsigned long type)
1125{
1126	const unsigned long nelem =
1127		sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
1128	__paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
1129}
1130