• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/virtio/
1/* Virtio ring implementation.
2 *
3 *  Copyright 2007 Rusty Russell IBM Corporation
4 *
5 *  This program is free software; you can redistribute it and/or modify
6 *  it under the terms of the GNU General Public License as published by
7 *  the Free Software Foundation; either version 2 of the License, or
8 *  (at your option) any later version.
9 *
10 *  This program is distributed in the hope that it will be useful,
11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 *  GNU General Public License for more details.
14 *
15 *  You should have received a copy of the GNU General Public License
16 *  along with this program; if not, write to the Free Software
17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
21#include <linux/virtio_config.h>
22#include <linux/device.h>
23#include <linux/slab.h>
24
25/* virtio guest is communicating with a virtual "device" that actually runs on
26 * a host processor.  Memory barriers are used to control SMP effects. */
27#ifdef CONFIG_SMP
28/* Where possible, use SMP barriers which are more lightweight than mandatory
29 * barriers, because mandatory barriers control MMIO effects on accesses
30 * through relaxed memory I/O windows (which virtio does not use). */
31#define virtio_mb() smp_mb()
32#define virtio_rmb() smp_rmb()
33#define virtio_wmb() smp_wmb()
34#else
35/* We must force memory ordering even if guest is UP since host could be
36 * running on another CPU, but SMP barriers are defined to barrier() in that
37 * configuration. So fall back to mandatory barriers instead. */
38#define virtio_mb() mb()
39#define virtio_rmb() rmb()
40#define virtio_wmb() wmb()
41#endif
42
43#ifdef DEBUG
44/* For development, we want to crash whenever the ring is screwed. */
45#define BAD_RING(_vq, fmt, args...)				\
46	do {							\
47		dev_err(&(_vq)->vq.vdev->dev,			\
48			"%s:"fmt, (_vq)->vq.name, ##args);	\
49		BUG();						\
50	} while (0)
51/* Caller is supposed to guarantee no reentry. */
52#define START_USE(_vq)						\
53	do {							\
54		if ((_vq)->in_use)				\
55			panic("%s:in_use = %i\n",		\
56			      (_vq)->vq.name, (_vq)->in_use);	\
57		(_vq)->in_use = __LINE__;			\
58	} while (0)
59#define END_USE(_vq) \
60	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
61#else
62#define BAD_RING(_vq, fmt, args...)				\
63	do {							\
64		dev_err(&_vq->vq.vdev->dev,			\
65			"%s:"fmt, (_vq)->vq.name, ##args);	\
66		(_vq)->broken = true;				\
67	} while (0)
68#define START_USE(vq)
69#define END_USE(vq)
70#endif
71
72struct vring_virtqueue
73{
74	struct virtqueue vq;
75
76	/* Actual memory layout for this queue */
77	struct vring vring;
78
79	/* Other side has made a mess, don't try any more. */
80	bool broken;
81
82	/* Host supports indirect buffers */
83	bool indirect;
84
85	/* Number of free buffers */
86	unsigned int num_free;
87	/* Head of free buffer list. */
88	unsigned int free_head;
89	/* Number we've added since last sync. */
90	unsigned int num_added;
91
92	/* Last used index we've seen. */
93	u16 last_used_idx;
94
95	void (*notify)(struct virtqueue *vq);
96
97#ifdef DEBUG
98	/* They're supposed to lock for us. */
99	unsigned int in_use;
100#endif
101
102	/* Tokens for callbacks. */
103	void *data[];
104};
105
106#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
107
108/* Set up an indirect table of descriptors and add it to the queue. */
109static int vring_add_indirect(struct vring_virtqueue *vq,
110			      struct scatterlist sg[],
111			      unsigned int out,
112			      unsigned int in,
113			      gfp_t gfp)
114{
115	struct vring_desc *desc;
116	unsigned head;
117	int i;
118
119	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
120	if (!desc)
121		return -ENOMEM;
122
123	/* Transfer entries from the sg list into the indirect page */
124	for (i = 0; i < out; i++) {
125		desc[i].flags = VRING_DESC_F_NEXT;
126		desc[i].addr = sg_phys(sg);
127		desc[i].len = sg->length;
128		desc[i].next = i+1;
129		sg++;
130	}
131	for (; i < (out + in); i++) {
132		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
133		desc[i].addr = sg_phys(sg);
134		desc[i].len = sg->length;
135		desc[i].next = i+1;
136		sg++;
137	}
138
139	/* Last one doesn't continue. */
140	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
141	desc[i-1].next = 0;
142
143	/* We're about to use a buffer */
144	vq->num_free--;
145
146	/* Use a single buffer which doesn't continue */
147	head = vq->free_head;
148	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
149	vq->vring.desc[head].addr = virt_to_phys(desc);
150	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
151
152	/* Update free pointer */
153	vq->free_head = vq->vring.desc[head].next;
154
155	return head;
156}
157
158int virtqueue_add_buf_gfp(struct virtqueue *_vq,
159			  struct scatterlist sg[],
160			  unsigned int out,
161			  unsigned int in,
162			  void *data,
163			  gfp_t gfp)
164{
165	struct vring_virtqueue *vq = to_vvq(_vq);
166	unsigned int i, avail, uninitialized_var(prev);
167	int head;
168
169	START_USE(vq);
170
171	BUG_ON(data == NULL);
172
173	if (vq->indirect && (out + in) > 1 && vq->num_free) {
174		head = vring_add_indirect(vq, sg, out, in, gfp);
175		if (likely(head >= 0))
176			goto add_head;
177	}
178
179	BUG_ON(out + in > vq->vring.num);
180	BUG_ON(out + in == 0);
181
182	if (vq->num_free < out + in) {
183		pr_debug("Can't add buf len %i - avail = %i\n",
184			 out + in, vq->num_free);
185		if (out)
186			vq->notify(&vq->vq);
187		END_USE(vq);
188		return -ENOSPC;
189	}
190
191	/* We're about to use some buffers from the free list. */
192	vq->num_free -= out + in;
193
194	head = vq->free_head;
195	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
196		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
197		vq->vring.desc[i].addr = sg_phys(sg);
198		vq->vring.desc[i].len = sg->length;
199		prev = i;
200		sg++;
201	}
202	for (; in; i = vq->vring.desc[i].next, in--) {
203		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
204		vq->vring.desc[i].addr = sg_phys(sg);
205		vq->vring.desc[i].len = sg->length;
206		prev = i;
207		sg++;
208	}
209	/* Last one doesn't continue. */
210	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
211
212	/* Update free pointer */
213	vq->free_head = i;
214
215add_head:
216	/* Set token. */
217	vq->data[head] = data;
218
219	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
220	vq->vring.avail->ring[avail] = head;
221
222	pr_debug("Added buffer head %i to %p\n", head, vq);
223	END_USE(vq);
224
225	/* If we're indirect, we can fit many (assuming not OOM). */
226	if (vq->indirect)
227		return vq->num_free ? vq->vring.num : 0;
228	return vq->num_free;
229}
230EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
231
232void virtqueue_kick(struct virtqueue *_vq)
233{
234	struct vring_virtqueue *vq = to_vvq(_vq);
235	START_USE(vq);
236	/* Descriptors and available array need to be set before we expose the
237	 * new available array entries. */
238	virtio_wmb();
239
240	vq->vring.avail->idx += vq->num_added;
241	vq->num_added = 0;
242
243	/* Need to update avail index before checking if we should notify */
244	virtio_mb();
245
246	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
247		/* Prod other side to tell it about changes. */
248		vq->notify(&vq->vq);
249
250	END_USE(vq);
251}
252EXPORT_SYMBOL_GPL(virtqueue_kick);
253
254static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
255{
256	unsigned int i;
257
258	/* Clear data ptr. */
259	vq->data[head] = NULL;
260
261	/* Put back on free list: find end */
262	i = head;
263
264	/* Free the indirect table */
265	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
266		kfree(phys_to_virt(vq->vring.desc[i].addr));
267
268	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
269		i = vq->vring.desc[i].next;
270		vq->num_free++;
271	}
272
273	vq->vring.desc[i].next = vq->free_head;
274	vq->free_head = head;
275	/* Plus final descriptor */
276	vq->num_free++;
277}
278
279static inline bool more_used(const struct vring_virtqueue *vq)
280{
281	return vq->last_used_idx != vq->vring.used->idx;
282}
283
284void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
285{
286	struct vring_virtqueue *vq = to_vvq(_vq);
287	void *ret;
288	unsigned int i;
289
290	START_USE(vq);
291
292	if (unlikely(vq->broken)) {
293		END_USE(vq);
294		return NULL;
295	}
296
297	if (!more_used(vq)) {
298		pr_debug("No more buffers in queue\n");
299		END_USE(vq);
300		return NULL;
301	}
302
303	/* Only get used array entries after they have been exposed by host. */
304	virtio_rmb();
305
306	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
307	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
308
309	if (unlikely(i >= vq->vring.num)) {
310		BAD_RING(vq, "id %u out of range\n", i);
311		return NULL;
312	}
313	if (unlikely(!vq->data[i])) {
314		BAD_RING(vq, "id %u is not a head!\n", i);
315		return NULL;
316	}
317
318	/* detach_buf clears data, so grab it now. */
319	ret = vq->data[i];
320	detach_buf(vq, i);
321	vq->last_used_idx++;
322	END_USE(vq);
323	return ret;
324}
325EXPORT_SYMBOL_GPL(virtqueue_get_buf);
326
327void virtqueue_disable_cb(struct virtqueue *_vq)
328{
329	struct vring_virtqueue *vq = to_vvq(_vq);
330
331	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
332}
333EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
334
335bool virtqueue_enable_cb(struct virtqueue *_vq)
336{
337	struct vring_virtqueue *vq = to_vvq(_vq);
338
339	START_USE(vq);
340
341	/* We optimistically turn back on interrupts, then check if there was
342	 * more to do. */
343	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
344	virtio_mb();
345	if (unlikely(more_used(vq))) {
346		END_USE(vq);
347		return false;
348	}
349
350	END_USE(vq);
351	return true;
352}
353EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
354
355void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
356{
357	struct vring_virtqueue *vq = to_vvq(_vq);
358	unsigned int i;
359	void *buf;
360
361	START_USE(vq);
362
363	for (i = 0; i < vq->vring.num; i++) {
364		if (!vq->data[i])
365			continue;
366		/* detach_buf clears data, so grab it now. */
367		buf = vq->data[i];
368		detach_buf(vq, i);
369		END_USE(vq);
370		return buf;
371	}
372	/* That should have freed everything. */
373	BUG_ON(vq->num_free != vq->vring.num);
374
375	END_USE(vq);
376	return NULL;
377}
378EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
379
380irqreturn_t vring_interrupt(int irq, void *_vq)
381{
382	struct vring_virtqueue *vq = to_vvq(_vq);
383
384	if (!more_used(vq)) {
385		pr_debug("virtqueue interrupt with no work for %p\n", vq);
386		return IRQ_NONE;
387	}
388
389	if (unlikely(vq->broken))
390		return IRQ_HANDLED;
391
392	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
393	if (vq->vq.callback)
394		vq->vq.callback(&vq->vq);
395
396	return IRQ_HANDLED;
397}
398EXPORT_SYMBOL_GPL(vring_interrupt);
399
400struct virtqueue *vring_new_virtqueue(unsigned int num,
401				      unsigned int vring_align,
402				      struct virtio_device *vdev,
403				      void *pages,
404				      void (*notify)(struct virtqueue *),
405				      void (*callback)(struct virtqueue *),
406				      const char *name)
407{
408	struct vring_virtqueue *vq;
409	unsigned int i;
410
411	/* We assume num is a power of 2. */
412	if (num & (num - 1)) {
413		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
414		return NULL;
415	}
416
417	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
418	if (!vq)
419		return NULL;
420
421	vring_init(&vq->vring, num, pages, vring_align);
422	vq->vq.callback = callback;
423	vq->vq.vdev = vdev;
424	vq->vq.name = name;
425	vq->notify = notify;
426	vq->broken = false;
427	vq->last_used_idx = 0;
428	vq->num_added = 0;
429	list_add_tail(&vq->vq.list, &vdev->vqs);
430#ifdef DEBUG
431	vq->in_use = false;
432#endif
433
434	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
435
436	/* No callback?  Tell other side not to bother us. */
437	if (!callback)
438		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
439
440	/* Put everything in free lists. */
441	vq->num_free = num;
442	vq->free_head = 0;
443	for (i = 0; i < num-1; i++) {
444		vq->vring.desc[i].next = i+1;
445		vq->data[i] = NULL;
446	}
447	vq->data[i] = NULL;
448
449	return &vq->vq;
450}
451EXPORT_SYMBOL_GPL(vring_new_virtqueue);
452
453void vring_del_virtqueue(struct virtqueue *vq)
454{
455	list_del(&vq->list);
456	kfree(to_vvq(vq));
457}
458EXPORT_SYMBOL_GPL(vring_del_virtqueue);
459
460/* Manipulates transport-specific feature bits. */
461void vring_transport_features(struct virtio_device *vdev)
462{
463	unsigned int i;
464
465	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
466		switch (i) {
467		case VIRTIO_RING_F_INDIRECT_DESC:
468			break;
469		default:
470			/* We don't understand this bit. */
471			clear_bit(i, vdev->features);
472		}
473	}
474}
475EXPORT_SYMBOL_GPL(vring_transport_features);
476
477MODULE_LICENSE("GPL");
478