1/* $NetBSD$ */
2
3/*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Cherry G. Mathew <cherry@zyx.in>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>			/* RCS ID macro */
33
34/*
35 * Based on: x86/ipi.c
36 * __KERNEL_RCSID(0, "$NetBSD$");
37 */
38
39__KERNEL_RCSID(0, "$NetBSD$");
40
41#include <sys/types.h>
42
43#include <sys/atomic.h>
44#include <sys/cpu.h>
45#include <sys/mutex.h>
46#include <sys/device.h>
47#include <sys/xcall.h>
48#include <sys/errno.h>
49#include <sys/systm.h>
50
51#ifdef __x86_64__
52#include <machine/fpu.h>
53#else
54#include <machine/npx.h>
55#endif /* __x86_64__ */
56#include <machine/frame.h>
57#include <machine/segments.h>
58
59#include <xen/intr.h>
60#include <xen/intrdefs.h>
61#include <xen/hypervisor.h>
62#include <xen/xen-public/vcpu.h>
63
64#ifdef __x86_64__
65extern void ddb_ipi(struct trapframe);
66#else
67extern void ddb_ipi(int, struct trapframe);
68#endif /* __x86_64__ */
69
70static void xen_ipi_halt(struct cpu_info *, struct intrframe *);
71static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *);
72static void xen_ipi_ddb(struct cpu_info *, struct intrframe *);
73static void xen_ipi_xcall(struct cpu_info *, struct intrframe *);
74static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *);
75
76static void (*ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) =
77{	/* In order of priority (see: xen/include/intrdefs.h */
78	xen_ipi_halt,
79	xen_ipi_synch_fpu,
80	xen_ipi_ddb,
81	xen_ipi_xcall,
82	xen_ipi_hvcb
83};
84
85static void
86xen_ipi_handler(struct cpu_info *ci, struct intrframe *regs)
87{
88	uint32_t pending;
89	int bit;
90
91	pending = atomic_swap_32(&ci->ci_ipis, 0);
92
93	KDASSERT((pending >> XEN_NIPIS) == 0);
94	while ((bit = ffs(pending)) != 0) {
95		bit--;
96		pending &= ~(1 << bit);
97		ci->ci_ipi_events[bit].ev_count++;
98		if (ipifunc[bit] != NULL) {
99			(*ipifunc[bit])(ci, regs);
100		} else {
101			panic("ipifunc[%d] unsupported!\n", bit);
102			/* NOTREACHED */
103		}
104	}
105}
106
107/* Must be called once for every cpu that expects to send/recv ipis */
108void
109xen_ipi_init(void)
110{
111	cpuid_t vcpu;
112	evtchn_port_t evtchn;
113	struct cpu_info *ci;
114
115	ci = curcpu();
116
117	vcpu = ci->ci_cpuid;
118	KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS);
119
120	evtchn = bind_vcpu_to_evtch(vcpu);
121	ci->ci_ipi_evtchn = evtchn;
122
123	KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS);
124
125	if (0 != event_set_handler(evtchn, (int (*)(void *))xen_ipi_handler,
126				   ci, IPL_HIGH, "ipi")) {
127		panic("event_set_handler(...) KPI violation\n");
128		/* NOTREACHED */
129	}
130
131	hypervisor_enable_event(evtchn);
132}
133
134/* prefer this to global variable */
135static inline u_int max_cpus(void)
136{
137	return maxcpus;
138}
139
140static inline bool /* helper */
141valid_ipimask(uint32_t ipimask)
142{
143	uint32_t masks =  XEN_IPI_HVCB | XEN_IPI_XCALL |
144		 XEN_IPI_DDB | XEN_IPI_SYNCH_FPU |
145		 XEN_IPI_HALT | XEN_IPI_KICK;
146
147	if (ipimask & ~masks) {
148		return false;
149	} else {
150		return true;
151	}
152
153}
154
155int
156xen_send_ipi(struct cpu_info *ci, uint32_t ipimask)
157{
158	evtchn_port_t evtchn;
159
160	KASSERT(ci != NULL || ci != curcpu());
161
162	if ((ci->ci_flags & CPUF_RUNNING) == 0) {
163		return ENOENT;
164	}
165
166	evtchn = ci->ci_ipi_evtchn;
167
168	KASSERTMSG(valid_ipimask(ipimask) == true,
169		"xen_send_ipi() called with invalid ipimask\n");
170
171	atomic_or_32(&ci->ci_ipis, ipimask);
172	hypervisor_notify_via_evtchn(evtchn);
173
174	return 0;
175}
176
177void
178xen_broadcast_ipi(uint32_t ipimask)
179{
180	struct cpu_info *ci, *self = curcpu();
181	CPU_INFO_ITERATOR cii;
182
183	KASSERTMSG(valid_ipimask(ipimask) == true,
184		"xen_broadcast_ipi() called with invalid ipimask\n");
185
186	/*
187	 * XXX-cherry: there's an implicit broadcast sending order
188	 * which I dislike. Randomise this ? :-)
189	 */
190
191	for (CPU_INFO_FOREACH(cii, ci)) {
192		if (ci == NULL)
193			continue;
194		if (ci == self)
195			continue;
196		if (ci->ci_data.cpu_idlelwp == NULL)
197			continue;
198		if ((ci->ci_flags & CPUF_PRESENT) == 0)
199			continue;
200		if (ci->ci_flags & (CPUF_RUNNING)) {
201			if (0 != xen_send_ipi(ci, ipimask)) {
202				panic("xen_ipi of %x from %s to %s failed\n",
203				      ipimask, cpu_name(curcpu()),
204				      cpu_name(ci));
205			}
206		}
207	}
208}
209
210/* MD wrapper for the xcall(9) callback. */
211#define PRIuCPUID	"lu" /* XXX: move this somewhere more appropriate */
212
213static void
214xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf)
215{
216	KASSERT(ci == curcpu());
217	KASSERT(ci != NULL);
218	if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL)) {
219		panic("vcpu%" PRIuCPUID "shutdown failed.\n", ci->ci_cpuid);
220	}
221
222}
223
224static void
225xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
226{
227	KASSERT(ci != NULL);
228	KASSERT(intrf != NULL);
229
230#ifdef __x86_64__
231	fpusave_cpu(true);
232#else
233	npxsave_cpu(true);
234#endif /* __x86_64__ */
235}
236
237static void
238xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf)
239{
240	KASSERT(ci != NULL);
241	KASSERT(intrf != NULL);
242
243#ifdef __x86_64__
244	ddb_ipi(intrf->if_tf);
245#else
246	struct trapframe tf;
247	tf.tf_gs = intrf->if_gs;
248	tf.tf_fs = intrf->if_fs;
249	tf.tf_es = intrf->if_es;
250	tf.tf_ds = intrf->if_ds;
251	tf.tf_edi = intrf->if_edi;
252	tf.tf_esi = intrf->if_esi;
253	tf.tf_ebp = intrf->if_ebp;
254	tf.tf_ebx = intrf->if_ebx;
255	tf.tf_ecx = intrf->if_ecx;
256	tf.tf_eax = intrf->if_eax;
257	tf.tf_trapno = intrf->__if_trapno;
258	tf.tf_err = intrf->__if_err;
259	tf.tf_eip = intrf->if_eip;
260	tf.tf_cs = intrf->if_cs;
261	tf.tf_eflags = intrf->if_eflags;
262	tf.tf_esp = intrf->if_esp;
263	tf.tf_ss = intrf->if_ss;
264
265	/* XXX: does i386/Xen have vm86 support ?
266	tf.tf_vm86_es;
267	tf.tf_vm86_ds;
268	tf.tf_vm86_fs;
269	tf.tf_vm86_gs;
270	   :XXX */
271
272	ddb_ipi(SEL_KPL, tf);
273#endif
274}
275
276static void
277xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf)
278{
279	KASSERT(ci != NULL);
280	KASSERT(intrf != NULL);
281
282	xc_ipi_handler();
283}
284
285void
286xc_send_ipi(struct cpu_info *ci)
287{
288
289	KASSERT(kpreempt_disabled());
290	KASSERT(curcpu() != ci);
291	if (ci) {
292		if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) {
293			panic("xen_send_ipi(XEN_IPI_XCALL) failed\n");
294		}
295	} else {
296		xen_broadcast_ipi(XEN_IPI_XCALL);
297	}
298}
299
300static void
301xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf)
302{
303	KASSERT(ci != NULL);
304	KASSERT(intrf != NULL);
305	KASSERT(ci == curcpu());
306	KASSERT(!ci->ci_vcpu->evtchn_upcall_mask);
307
308	hypervisor_force_callback();
309}
310