• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/lguest/
1/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
2 * controls and communicates with the Guest.  For example, the first write will
3 * tell us the Guest's memory layout and entry point.  A read will run the
4 * Guest until something happens, such as a signal or the Guest doing a NOTIFY
5 * out to the Launcher.
6:*/
7#include <linux/uaccess.h>
8#include <linux/miscdevice.h>
9#include <linux/fs.h>
10#include <linux/sched.h>
11#include <linux/eventfd.h>
12#include <linux/file.h>
13#include <linux/slab.h>
14#include "lg.h"
15
16/*L:056
17 * Before we move on, let's jump ahead and look at what the kernel does when
18 * it needs to look up the eventfds.  That will complete our picture of how we
19 * use RCU.
20 *
21 * The notification value is in cpu->pending_notify: we return true if it went
22 * to an eventfd.
23 */
24bool send_notify_to_eventfd(struct lg_cpu *cpu)
25{
26	unsigned int i;
27	struct lg_eventfd_map *map;
28
29	/*
30	 * This "rcu_read_lock()" helps track when someone is still looking at
31	 * the (RCU-using) eventfds array.  It's not actually a lock at all;
32	 * indeed it's a noop in many configurations.  (You didn't expect me to
33	 * explain all the RCU secrets here, did you?)
34	 */
35	rcu_read_lock();
36	/*
37	 * rcu_dereference is the counter-side of rcu_assign_pointer(); it
38	 * makes sure we don't access the memory pointed to by
39	 * cpu->lg->eventfds before cpu->lg->eventfds is set.  Sounds crazy,
40	 * but Alpha allows this!  Paul McKenney points out that a really
41	 * aggressive compiler could have the same effect:
42	 *   http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html
43	 *
44	 * So play safe, use rcu_dereference to get the rcu-protected pointer:
45	 */
46	map = rcu_dereference(cpu->lg->eventfds);
47	/*
48	 * Simple array search: even if they add an eventfd while we do this,
49	 * we'll continue to use the old array and just won't see the new one.
50	 */
51	for (i = 0; i < map->num; i++) {
52		if (map->map[i].addr == cpu->pending_notify) {
53			eventfd_signal(map->map[i].event, 1);
54			cpu->pending_notify = 0;
55			break;
56		}
57	}
58	/* We're done with the rcu-protected variable cpu->lg->eventfds. */
59	rcu_read_unlock();
60
61	/* If we cleared the notification, it's because we found a match. */
62	return cpu->pending_notify == 0;
63}
64
65/*L:055
66 * One of the more tricksy tricks in the Linux Kernel is a technique called
67 * Read Copy Update.  Since one point of lguest is to teach lguest journeyers
68 * about kernel coding, I use it here.  (In case you're curious, other purposes
69 * include learning about virtualization and instilling a deep appreciation for
70 * simplicity and puppies).
71 *
72 * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we
73 * add new eventfds without ever blocking readers from accessing the array.
74 * The current Launcher only does this during boot, so that never happens.  But
75 * Read Copy Update is cool, and adding a lock risks damaging even more puppies
76 * than this code does.
77 *
78 * We allocate a brand new one-larger array, copy the old one and add our new
79 * element.  Then we make the lg eventfd pointer point to the new array.
80 * That's the easy part: now we need to free the old one, but we need to make
81 * sure no slow CPU somewhere is still looking at it.  That's what
82 * synchronize_rcu does for us: waits until every CPU has indicated that it has
83 * moved on to know it's no longer using the old one.
84 *
85 * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update.
86 */
87static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
88{
89	struct lg_eventfd_map *new, *old = lg->eventfds;
90
91	/*
92	 * We don't allow notifications on value 0 anyway (pending_notify of
93	 * 0 means "nothing pending").
94	 */
95	if (!addr)
96		return -EINVAL;
97
98	/*
99	 * Replace the old array with the new one, carefully: others can
100	 * be accessing it at the same time.
101	 */
102	new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
103		      GFP_KERNEL);
104	if (!new)
105		return -ENOMEM;
106
107	/* First make identical copy. */
108	memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
109	new->num = old->num;
110
111	/* Now append new entry. */
112	new->map[new->num].addr = addr;
113	new->map[new->num].event = eventfd_ctx_fdget(fd);
114	if (IS_ERR(new->map[new->num].event)) {
115		int err =  PTR_ERR(new->map[new->num].event);
116		kfree(new);
117		return err;
118	}
119	new->num++;
120
121	/*
122	 * Now put new one in place: rcu_assign_pointer() is a fancy way of
123	 * doing "lg->eventfds = new", but it uses memory barriers to make
124	 * absolutely sure that the contents of "new" written above is nailed
125	 * down before we actually do the assignment.
126	 *
127	 * We have to think about these kinds of things when we're operating on
128	 * live data without locks.
129	 */
130	rcu_assign_pointer(lg->eventfds, new);
131
132	/*
133	 * We're not in a big hurry.  Wait until noone's looking at old
134	 * version, then free it.
135	 */
136	synchronize_rcu();
137	kfree(old);
138
139	return 0;
140}
141
142/*L:052
143 * Receiving notifications from the Guest is usually done by attaching a
144 * particular LHCALL_NOTIFY value to an event filedescriptor.  The eventfd will
145 * become readable when the Guest does an LHCALL_NOTIFY with that value.
146 *
147 * This is really convenient for processing each virtqueue in a separate
148 * thread.
149 */
150static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
151{
152	unsigned long addr, fd;
153	int err;
154
155	if (get_user(addr, input) != 0)
156		return -EFAULT;
157	input++;
158	if (get_user(fd, input) != 0)
159		return -EFAULT;
160
161	/*
162	 * Just make sure two callers don't add eventfds at once.  We really
163	 * only need to lock against callers adding to the same Guest, so using
164	 * the Big Lguest Lock is overkill.  But this is setup, not a fast path.
165	 */
166	mutex_lock(&lguest_lock);
167	err = add_eventfd(lg, addr, fd);
168	mutex_unlock(&lguest_lock);
169
170	return err;
171}
172
173/*L:050
174 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
175 * number to /dev/lguest.
176 */
177static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
178{
179	unsigned long irq;
180
181	if (get_user(irq, input) != 0)
182		return -EFAULT;
183	if (irq >= LGUEST_IRQS)
184		return -EINVAL;
185
186	/*
187	 * Next time the Guest runs, the core code will see if it can deliver
188	 * this interrupt.
189	 */
190	set_interrupt(cpu, irq);
191	return 0;
192}
193
194/*L:040
195 * Once our Guest is initialized, the Launcher makes it run by reading
196 * from /dev/lguest.
197 */
198static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
199{
200	struct lguest *lg = file->private_data;
201	struct lg_cpu *cpu;
202	unsigned int cpu_id = *o;
203
204	/* You must write LHREQ_INITIALIZE first! */
205	if (!lg)
206		return -EINVAL;
207
208	/* Watch out for arbitrary vcpu indexes! */
209	if (cpu_id >= lg->nr_cpus)
210		return -EINVAL;
211
212	cpu = &lg->cpus[cpu_id];
213
214	/* If you're not the task which owns the Guest, go away. */
215	if (current != cpu->tsk)
216		return -EPERM;
217
218	/* If the Guest is already dead, we indicate why */
219	if (lg->dead) {
220		size_t len;
221
222		/* lg->dead either contains an error code, or a string. */
223		if (IS_ERR(lg->dead))
224			return PTR_ERR(lg->dead);
225
226		/* We can only return as much as the buffer they read with. */
227		len = min(size, strlen(lg->dead)+1);
228		if (copy_to_user(user, lg->dead, len) != 0)
229			return -EFAULT;
230		return len;
231	}
232
233	/*
234	 * If we returned from read() last time because the Guest sent I/O,
235	 * clear the flag.
236	 */
237	if (cpu->pending_notify)
238		cpu->pending_notify = 0;
239
240	/* Run the Guest until something interesting happens. */
241	return run_guest(cpu, (unsigned long __user *)user);
242}
243
244/*L:025
245 * This actually initializes a CPU.  For the moment, a Guest is only
246 * uniprocessor, so "id" is always 0.
247 */
248static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
249{
250	/* We have a limited number the number of CPUs in the lguest struct. */
251	if (id >= ARRAY_SIZE(cpu->lg->cpus))
252		return -EINVAL;
253
254	/* Set up this CPU's id, and pointer back to the lguest struct. */
255	cpu->id = id;
256	cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
257	cpu->lg->nr_cpus++;
258
259	/* Each CPU has a timer it can set. */
260	init_clockdev(cpu);
261
262	/*
263	 * We need a complete page for the Guest registers: they are accessible
264	 * to the Guest and we can only grant it access to whole pages.
265	 */
266	cpu->regs_page = get_zeroed_page(GFP_KERNEL);
267	if (!cpu->regs_page)
268		return -ENOMEM;
269
270	/* We actually put the registers at the bottom of the page. */
271	cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
272
273	/*
274	 * Now we initialize the Guest's registers, handing it the start
275	 * address.
276	 */
277	lguest_arch_setup_regs(cpu, start_ip);
278
279	/*
280	 * We keep a pointer to the Launcher task (ie. current task) for when
281	 * other Guests want to wake this one (eg. console input).
282	 */
283	cpu->tsk = current;
284
285	/*
286	 * We need to keep a pointer to the Launcher's memory map, because if
287	 * the Launcher dies we need to clean it up.  If we don't keep a
288	 * reference, it is destroyed before close() is called.
289	 */
290	cpu->mm = get_task_mm(cpu->tsk);
291
292	/*
293	 * We remember which CPU's pages this Guest used last, for optimization
294	 * when the same Guest runs on the same CPU twice.
295	 */
296	cpu->last_pages = NULL;
297
298	/* No error == success. */
299	return 0;
300}
301
302/*L:020
303 * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
304 * addition to the LHREQ_INITIALIZE value).  These are:
305 *
306 * base: The start of the Guest-physical memory inside the Launcher memory.
307 *
308 * pfnlimit: The highest (Guest-physical) page number the Guest should be
309 * allowed to access.  The Guest memory lives inside the Launcher, so it sets
310 * this to ensure the Guest can only reach its own memory.
311 *
312 * start: The first instruction to execute ("eip" in x86-speak).
313 */
314static int initialize(struct file *file, const unsigned long __user *input)
315{
316	/* "struct lguest" contains all we (the Host) know about a Guest. */
317	struct lguest *lg;
318	int err;
319	unsigned long args[3];
320
321	/*
322	 * We grab the Big Lguest lock, which protects against multiple
323	 * simultaneous initializations.
324	 */
325	mutex_lock(&lguest_lock);
326	/* You can't initialize twice!  Close the device and start again... */
327	if (file->private_data) {
328		err = -EBUSY;
329		goto unlock;
330	}
331
332	if (copy_from_user(args, input, sizeof(args)) != 0) {
333		err = -EFAULT;
334		goto unlock;
335	}
336
337	lg = kzalloc(sizeof(*lg), GFP_KERNEL);
338	if (!lg) {
339		err = -ENOMEM;
340		goto unlock;
341	}
342
343	lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
344	if (!lg->eventfds) {
345		err = -ENOMEM;
346		goto free_lg;
347	}
348	lg->eventfds->num = 0;
349
350	/* Populate the easy fields of our "struct lguest" */
351	lg->mem_base = (void __user *)args[0];
352	lg->pfn_limit = args[1];
353
354	/* This is the first cpu (cpu 0) and it will start booting at args[2] */
355	err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
356	if (err)
357		goto free_eventfds;
358
359	/*
360	 * Initialize the Guest's shadow page tables, using the toplevel
361	 * address the Launcher gave us.  This allocates memory, so can fail.
362	 */
363	err = init_guest_pagetable(lg);
364	if (err)
365		goto free_regs;
366
367	/* We keep our "struct lguest" in the file's private_data. */
368	file->private_data = lg;
369
370	mutex_unlock(&lguest_lock);
371
372	/* And because this is a write() call, we return the length used. */
373	return sizeof(args);
374
375free_regs:
376	free_page(lg->cpus[0].regs_page);
377free_eventfds:
378	kfree(lg->eventfds);
379free_lg:
380	kfree(lg);
381unlock:
382	mutex_unlock(&lguest_lock);
383	return err;
384}
385
386/*L:010
387 * The first operation the Launcher does must be a write.  All writes
388 * start with an unsigned long number: for the first write this must be
389 * LHREQ_INITIALIZE to set up the Guest.  After that the Launcher can use
390 * writes of other values to send interrupts or set up receipt of notifications.
391 *
392 * Note that we overload the "offset" in the /dev/lguest file to indicate what
393 * CPU number we're dealing with.  Currently this is always 0 since we only
394 * support uniprocessor Guests, but you can see the beginnings of SMP support
395 * here.
396 */
397static ssize_t write(struct file *file, const char __user *in,
398		     size_t size, loff_t *off)
399{
400	/*
401	 * Once the Guest is initialized, we hold the "struct lguest" in the
402	 * file private data.
403	 */
404	struct lguest *lg = file->private_data;
405	const unsigned long __user *input = (const unsigned long __user *)in;
406	unsigned long req;
407	struct lg_cpu *uninitialized_var(cpu);
408	unsigned int cpu_id = *off;
409
410	/* The first value tells us what this request is. */
411	if (get_user(req, input) != 0)
412		return -EFAULT;
413	input++;
414
415	/* If you haven't initialized, you must do that first. */
416	if (req != LHREQ_INITIALIZE) {
417		if (!lg || (cpu_id >= lg->nr_cpus))
418			return -EINVAL;
419		cpu = &lg->cpus[cpu_id];
420
421		/* Once the Guest is dead, you can only read() why it died. */
422		if (lg->dead)
423			return -ENOENT;
424	}
425
426	switch (req) {
427	case LHREQ_INITIALIZE:
428		return initialize(file, input);
429	case LHREQ_IRQ:
430		return user_send_irq(cpu, input);
431	case LHREQ_EVENTFD:
432		return attach_eventfd(lg, input);
433	default:
434		return -EINVAL;
435	}
436}
437
438/*L:060
439 * The final piece of interface code is the close() routine.  It reverses
440 * everything done in initialize().  This is usually called because the
441 * Launcher exited.
442 *
443 * Note that the close routine returns 0 or a negative error number: it can't
444 * really fail, but it can whine.  I blame Sun for this wart, and K&R C for
445 * letting them do it.
446:*/
447static int close(struct inode *inode, struct file *file)
448{
449	struct lguest *lg = file->private_data;
450	unsigned int i;
451
452	/* If we never successfully initialized, there's nothing to clean up */
453	if (!lg)
454		return 0;
455
456	/*
457	 * We need the big lock, to protect from inter-guest I/O and other
458	 * Launchers initializing guests.
459	 */
460	mutex_lock(&lguest_lock);
461
462	/* Free up the shadow page tables for the Guest. */
463	free_guest_pagetable(lg);
464
465	for (i = 0; i < lg->nr_cpus; i++) {
466		/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
467		hrtimer_cancel(&lg->cpus[i].hrt);
468		/* We can free up the register page we allocated. */
469		free_page(lg->cpus[i].regs_page);
470		/*
471		 * Now all the memory cleanups are done, it's safe to release
472		 * the Launcher's memory management structure.
473		 */
474		mmput(lg->cpus[i].mm);
475	}
476
477	/* Release any eventfds they registered. */
478	for (i = 0; i < lg->eventfds->num; i++)
479		eventfd_ctx_put(lg->eventfds->map[i].event);
480	kfree(lg->eventfds);
481
482	/*
483	 * If lg->dead doesn't contain an error code it will be NULL or a
484	 * kmalloc()ed string, either of which is ok to hand to kfree().
485	 */
486	if (!IS_ERR(lg->dead))
487		kfree(lg->dead);
488	/* Free the memory allocated to the lguest_struct */
489	kfree(lg);
490	/* Release lock and exit. */
491	mutex_unlock(&lguest_lock);
492
493	return 0;
494}
495
496/*L:000
497 * Welcome to our journey through the Launcher!
498 *
499 * The Launcher is the Host userspace program which sets up, runs and services
500 * the Guest.  In fact, many comments in the Drivers which refer to "the Host"
501 * doing things are inaccurate: the Launcher does all the device handling for
502 * the Guest, but the Guest can't know that.
503 *
504 * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
505 * shall see more of that later.
506 *
507 * We begin our understanding with the Host kernel interface which the Launcher
508 * uses: reading and writing a character device called /dev/lguest.  All the
509 * work happens in the read(), write() and close() routines:
510 */
511static const struct file_operations lguest_fops = {
512	.owner	 = THIS_MODULE,
513	.release = close,
514	.write	 = write,
515	.read	 = read,
516};
517
518/*
519 * This is a textbook example of a "misc" character device.  Populate a "struct
520 * miscdevice" and register it with misc_register().
521 */
522static struct miscdevice lguest_dev = {
523	.minor	= MISC_DYNAMIC_MINOR,
524	.name	= "lguest",
525	.fops	= &lguest_fops,
526};
527
528int __init lguest_device_init(void)
529{
530	return misc_register(&lguest_dev);
531}
532
533void __exit lguest_device_remove(void)
534{
535	misc_deregister(&lguest_dev);
536}
537