• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/xen/
1/*
2 * Xen hypercall batching.
3 *
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface.  This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
7 *
8 * This file implements a simple interface for multicalls.  There's a
9 * per-cpu buffer of outstanding multicalls.  When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc).  When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
15 *
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested.  There's no way to get per-multicall
18 * return results back.  It will BUG if any of the multicalls fail.
19 *
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
21 */
22#include <linux/percpu.h>
23#include <linux/hardirq.h>
24#include <linux/debugfs.h>
25
26#include <asm/xen/hypercall.h>
27
28#include "multicalls.h"
29#include "debugfs.h"
30
31#define MC_BATCH	32
32
33#define MC_DEBUG	1
34
35#define MC_ARGS		(MC_BATCH * 16)
36
37
38struct mc_buffer {
39	struct multicall_entry entries[MC_BATCH];
40#if MC_DEBUG
41	struct multicall_entry debug[MC_BATCH];
42	void *caller[MC_BATCH];
43#endif
44	unsigned char args[MC_ARGS];
45	struct callback {
46		void (*fn)(void *);
47		void *data;
48	} callbacks[MC_BATCH];
49	unsigned mcidx, argidx, cbidx;
50};
51
52static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
53DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
54
55/* flush reasons 0- slots, 1- args, 2- callbacks */
56enum flush_reasons
57{
58	FL_SLOTS,
59	FL_ARGS,
60	FL_CALLBACKS,
61
62	FL_N_REASONS
63};
64
65#ifdef CONFIG_XEN_DEBUG_FS
66#define NHYPERCALLS	40		/* not really */
67
68static struct {
69	unsigned histo[MC_BATCH+1];
70
71	unsigned issued;
72	unsigned arg_total;
73	unsigned hypercalls;
74	unsigned histo_hypercalls[NHYPERCALLS];
75
76	unsigned flush[FL_N_REASONS];
77} mc_stats;
78
79static u8 zero_stats;
80
81static inline void check_zero(void)
82{
83	if (unlikely(zero_stats)) {
84		memset(&mc_stats, 0, sizeof(mc_stats));
85		zero_stats = 0;
86	}
87}
88
89static void mc_add_stats(const struct mc_buffer *mc)
90{
91	int i;
92
93	check_zero();
94
95	mc_stats.issued++;
96	mc_stats.hypercalls += mc->mcidx;
97	mc_stats.arg_total += mc->argidx;
98
99	mc_stats.histo[mc->mcidx]++;
100	for(i = 0; i < mc->mcidx; i++) {
101		unsigned op = mc->entries[i].op;
102		if (op < NHYPERCALLS)
103			mc_stats.histo_hypercalls[op]++;
104	}
105}
106
107static void mc_stats_flush(enum flush_reasons idx)
108{
109	check_zero();
110
111	mc_stats.flush[idx]++;
112}
113
114#else  /* !CONFIG_XEN_DEBUG_FS */
115
116static inline void mc_add_stats(const struct mc_buffer *mc)
117{
118}
119
120static inline void mc_stats_flush(enum flush_reasons idx)
121{
122}
123#endif	/* CONFIG_XEN_DEBUG_FS */
124
125void xen_mc_flush(void)
126{
127	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
128	int ret = 0;
129	unsigned long flags;
130	int i;
131
132	BUG_ON(preemptible());
133
134	/* Disable interrupts in case someone comes in and queues
135	   something in the middle */
136	local_irq_save(flags);
137
138	mc_add_stats(b);
139
140	if (b->mcidx) {
141#if MC_DEBUG
142		memcpy(b->debug, b->entries,
143		       b->mcidx * sizeof(struct multicall_entry));
144#endif
145
146		if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
147			BUG();
148		for (i = 0; i < b->mcidx; i++)
149			if (b->entries[i].result < 0)
150				ret++;
151
152#if MC_DEBUG
153		if (ret) {
154			printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
155			       ret, smp_processor_id());
156			dump_stack();
157			for (i = 0; i < b->mcidx; i++) {
158				printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
159				       i+1, b->mcidx,
160				       b->debug[i].op,
161				       b->debug[i].args[0],
162				       b->entries[i].result,
163				       b->caller[i]);
164			}
165		}
166#endif
167
168		b->mcidx = 0;
169		b->argidx = 0;
170	} else
171		BUG_ON(b->argidx != 0);
172
173	for (i = 0; i < b->cbidx; i++) {
174		struct callback *cb = &b->callbacks[i];
175
176		(*cb->fn)(cb->data);
177	}
178	b->cbidx = 0;
179
180	local_irq_restore(flags);
181
182	WARN_ON(ret);
183}
184
185struct multicall_space __xen_mc_entry(size_t args)
186{
187	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
188	struct multicall_space ret;
189	unsigned argidx = roundup(b->argidx, sizeof(u64));
190
191	BUG_ON(preemptible());
192	BUG_ON(b->argidx > MC_ARGS);
193
194	if (b->mcidx == MC_BATCH ||
195	    (argidx + args) > MC_ARGS) {
196		mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
197		xen_mc_flush();
198		argidx = roundup(b->argidx, sizeof(u64));
199	}
200
201	ret.mc = &b->entries[b->mcidx];
202#ifdef MC_DEBUG
203	b->caller[b->mcidx] = __builtin_return_address(0);
204#endif
205	b->mcidx++;
206	ret.args = &b->args[argidx];
207	b->argidx = argidx + args;
208
209	BUG_ON(b->argidx > MC_ARGS);
210	return ret;
211}
212
213struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
214{
215	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
216	struct multicall_space ret = { NULL, NULL };
217
218	BUG_ON(preemptible());
219	BUG_ON(b->argidx > MC_ARGS);
220
221	if (b->mcidx == 0)
222		return ret;
223
224	if (b->entries[b->mcidx - 1].op != op)
225		return ret;
226
227	if ((b->argidx + size) > MC_ARGS)
228		return ret;
229
230	ret.mc = &b->entries[b->mcidx - 1];
231	ret.args = &b->args[b->argidx];
232	b->argidx += size;
233
234	BUG_ON(b->argidx > MC_ARGS);
235	return ret;
236}
237
238void xen_mc_callback(void (*fn)(void *), void *data)
239{
240	struct mc_buffer *b = &__get_cpu_var(mc_buffer);
241	struct callback *cb;
242
243	if (b->cbidx == MC_BATCH) {
244		mc_stats_flush(FL_CALLBACKS);
245		xen_mc_flush();
246	}
247
248	cb = &b->callbacks[b->cbidx++];
249	cb->fn = fn;
250	cb->data = data;
251}
252
253#ifdef CONFIG_XEN_DEBUG_FS
254
255static struct dentry *d_mc_debug;
256
257static int __init xen_mc_debugfs(void)
258{
259	struct dentry *d_xen = xen_init_debugfs();
260
261	if (d_xen == NULL)
262		return -ENOMEM;
263
264	d_mc_debug = debugfs_create_dir("multicalls", d_xen);
265
266	debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
267
268	debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
269	debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
270	debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
271
272	xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
273				     mc_stats.histo, MC_BATCH);
274	xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
275				     mc_stats.histo_hypercalls, NHYPERCALLS);
276	xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
277				     mc_stats.flush, FL_N_REASONS);
278
279	return 0;
280}
281fs_initcall(xen_mc_debugfs);
282
283#endif	/* CONFIG_XEN_DEBUG_FS */
284