1/*
2 * Copyright 2013, 2018, J��r��me Duval, jerome.duval@gmail.com.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "VirtioPrivate.h"
8
9
10static inline uint32
11round_to_pagesize(uint32 size)
12{
13	return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
14}
15
16
17area_id
18alloc_mem(void **virt, phys_addr_t *phy, size_t size, uint32 protection,
19	const char *name)
20{
21	physical_entry pe;
22	void * virtadr;
23	area_id areaid;
24	status_t rv;
25
26	TRACE("allocating %ld bytes for %s\n", size, name);
27
28	size = round_to_pagesize(size);
29	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
30		B_CONTIGUOUS, protection);
31	if (areaid < B_OK) {
32		ERROR("couldn't allocate area %s\n", name);
33		return B_ERROR;
34	}
35	rv = get_memory_map(virtadr, size, &pe, 1);
36	if (rv < B_OK) {
37		delete_area(areaid);
38		ERROR("couldn't get mapping for %s\n", name);
39		return B_ERROR;
40	}
41	if (virt)
42		*virt = virtadr;
43	if (phy)
44		*phy = pe.address;
45	TRACE("area = %" B_PRId32 ", size = %ld, virt = %p, phy = %#" B_PRIxPHYSADDR "\n",
46		areaid, size, virtadr, pe.address);
47	return areaid;
48}
49
50
51class TransferDescriptor {
52public:
53								TransferDescriptor(VirtioQueue* queue,
54									uint16 indirectMaxSize);
55								~TransferDescriptor();
56
57			status_t			InitCheck() { return fStatus; }
58
59			uint16				Size() { return fDescriptorCount; }
60			void				SetTo(uint16 size, void *cookie);
61			void*				Cookie() { return fCookie; }
62			void				Unset();
63			struct vring_desc*	Indirect() { return fIndirect; }
64			phys_addr_t			PhysAddr() { return fPhysAddr; }
65private:
66			status_t			fStatus;
67			VirtioQueue*		fQueue;
68			void*				fCookie;
69
70			struct vring_desc* 	fIndirect;
71			size_t 				fAreaSize;
72			area_id				fArea;
73			phys_addr_t 		fPhysAddr;
74			uint16				fDescriptorCount;
75};
76
77
78TransferDescriptor::TransferDescriptor(VirtioQueue* queue, uint16 indirectMaxSize)
79	: fQueue(queue),
80	fCookie(NULL),
81	fIndirect(NULL),
82	fAreaSize(0),
83	fArea(-1),
84	fPhysAddr(0),
85	fDescriptorCount(0)
86{
87	fStatus = B_OK;
88	struct vring_desc* virtAddr;
89	phys_addr_t physAddr;
90
91	if (indirectMaxSize > 0) {
92		fAreaSize = indirectMaxSize * sizeof(struct vring_desc);
93		fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize,
94			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, "virtqueue");
95		if (fArea < B_OK) {
96			fStatus = fArea;
97			return;
98		}
99		memset(virtAddr, 0, fAreaSize);
100		fIndirect = virtAddr;
101		fPhysAddr = physAddr;
102
103		for (uint16 i = 0; i < indirectMaxSize - 1; i++)
104			fIndirect[i].next = i + 1;
105		fIndirect[indirectMaxSize - 1].next = UINT16_MAX;
106	}
107}
108
109
110TransferDescriptor::~TransferDescriptor()
111{
112	if (fArea > B_OK)
113		delete_area(fArea);
114}
115
116
117void
118TransferDescriptor::SetTo(uint16 size, void *cookie)
119{
120	fCookie = cookie;
121	fDescriptorCount = size;
122}
123
124
125void
126TransferDescriptor::Unset()
127{
128	fCookie = NULL;
129	fDescriptorCount = 0;
130}
131
132
133//	#pragma mark -
134
135
136VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
137	uint16 ringSize)
138	:
139	fDevice(device),
140	fQueueNumber(queueNumber),
141	fRingSize(ringSize),
142	fRingFree(ringSize),
143	fRingHeadIndex(0),
144	fRingUsedIndex(0),
145	fStatus(B_OK),
146	fIndirectMaxSize(0),
147	fCallback(NULL),
148	fCookie(NULL)
149{
150	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
151	if (fDescriptors == NULL) {
152		fStatus = B_NO_MEMORY;
153		return;
154	}
155
156	uint8* virtAddr;
157	phys_addr_t physAddr;
158	fAreaSize = vring_size(fRingSize, device->Alignment());
159	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize,
160		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, "virtqueue");
161	if (fArea < B_OK) {
162		fStatus = fArea;
163		return;
164	}
165	memset(virtAddr, 0, fAreaSize);
166	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());
167
168	for (uint16 i = 0; i < fRingSize - 1; i++)
169		fRing.desc[i].next = i + 1;
170	fRing.desc[fRingSize - 1].next = UINT16_MAX;
171
172	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
173		fIndirectMaxSize = 128;
174
175	for (uint16 i = 0; i < fRingSize; i++) {
176		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
177		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
178			fStatus = B_NO_MEMORY;
179			return;
180		}
181	}
182
183	DisableInterrupt();
184
185	device->SetupQueue(fQueueNumber, physAddr,
186		physAddr + ((addr_t)fRing.avail - (addr_t)fRing.desc),
187		physAddr + ((addr_t)fRing.used - (addr_t)fRing.desc));
188}
189
190
191VirtioQueue::~VirtioQueue()
192{
193	delete_area(fArea);
194	for (uint16 i = 0; i < fRingSize; i++) {
195		delete fDescriptors[i];
196	}
197	delete[] fDescriptors;
198}
199
200
201status_t
202VirtioQueue::SetupInterrupt(virtio_callback_func handler, void *cookie)
203{
204	fCallback = handler;
205	fCookie = cookie;
206
207	return B_OK;
208}
209
210
211
212void
213VirtioQueue::DisableInterrupt()
214{
215	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
216		fRing.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
217}
218
219
220void
221VirtioQueue::EnableInterrupt()
222{
223	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
224		fRing.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
225}
226
227
228void
229VirtioQueue::NotifyHost()
230{
231	fDevice->NotifyQueue(fQueueNumber);
232}
233
234
235status_t
236VirtioQueue::Interrupt()
237{
238	CALLED();
239
240	DisableInterrupt();
241
242	if (fCallback != NULL)
243		fCallback(Device()->DriverCookie(), fCookie);
244
245	EnableInterrupt();
246	return B_OK;
247}
248
249
250bool
251VirtioQueue::Dequeue(void** _cookie, uint32* _usedLength)
252{
253	TRACE("Dequeue() fRingUsedIndex: %u\n", fRingUsedIndex);
254
255	if (fRingUsedIndex == fRing.used->idx)
256		return false;
257
258	uint16 usedIndex = fRingUsedIndex++ & (fRingSize - 1);
259	TRACE("Dequeue() usedIndex: %u\n", usedIndex);
260	struct vring_used_elem *element = &fRing.used->ring[usedIndex];
261	uint16 descriptorIndex = element->id;
262	if (_usedLength != NULL)
263		*_usedLength = element->len;
264
265	void* cookie = fDescriptors[descriptorIndex]->Cookie();
266	if (_cookie != NULL)
267		*_cookie = cookie;
268
269	uint16 size = fDescriptors[descriptorIndex]->Size();
270	if (size == 0)
271		panic("VirtioQueue::Dequeue() size is zero\n");
272	fDescriptors[descriptorIndex]->Unset();
273	fRingFree += size;
274	size--;
275
276	uint16 index = descriptorIndex;
277	if ((fRing.desc[index].flags & VRING_DESC_F_INDIRECT) == 0) {
278		while ((fRing.desc[index].flags & VRING_DESC_F_NEXT) != 0) {
279			index = fRing.desc[index].next;
280			size--;
281		}
282	}
283
284	if (size > 0)
285		panic("VirtioQueue::Dequeue() descriptors left %d\n", size);
286
287	fRing.desc[index].next = fRingHeadIndex;
288	fRingHeadIndex = descriptorIndex;
289	TRACE("Dequeue() fRingHeadIndex: %u\n", fRingHeadIndex);
290
291	return true;
292}
293
294
295status_t
296VirtioQueue::QueueRequest(const physical_entry* vector, size_t readVectorCount,
297	size_t writtenVectorCount, void *cookie)
298{
299	CALLED();
300	size_t count = readVectorCount + writtenVectorCount;
301	if (count < 1)
302		return B_BAD_VALUE;
303	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) {
304		return QueueRequestIndirect(vector, readVectorCount,
305			writtenVectorCount, cookie);
306	}
307
308	if (count > fRingFree)
309		return B_BUSY;
310
311	uint16 insertIndex = fRingHeadIndex;
312	fDescriptors[insertIndex]->SetTo(count, cookie);
313
314	// enqueue
315	uint16 index = QueueVector(insertIndex, fRing.desc, vector,
316		readVectorCount, writtenVectorCount);
317
318	fRingHeadIndex = index;
319	fRingFree -= count;
320
321	UpdateAvailable(insertIndex);
322
323	NotifyHost();
324
325	return B_OK;
326}
327
328
329status_t
330VirtioQueue::QueueRequestIndirect(const physical_entry* vector,
331	size_t readVectorCount,	size_t writtenVectorCount,
332	void *cookie)
333{
334	CALLED();
335	size_t count = readVectorCount + writtenVectorCount;
336	if (count > fRingFree || count > fIndirectMaxSize)
337		return B_BUSY;
338
339	uint16 insertIndex = fRingHeadIndex;
340	fDescriptors[insertIndex]->SetTo(1, cookie);
341
342	// enqueue
343	uint16 index = QueueVector(0, fDescriptors[insertIndex]->Indirect(),
344		vector, readVectorCount, writtenVectorCount);
345
346	fRing.desc[insertIndex].addr = fDescriptors[insertIndex]->PhysAddr();
347	fRing.desc[insertIndex].len = index * sizeof(struct vring_desc);
348	fRing.desc[insertIndex].flags = VRING_DESC_F_INDIRECT;
349	fRingHeadIndex = fRing.desc[insertIndex].next;
350	fRingFree--;
351
352	UpdateAvailable(insertIndex);
353
354	NotifyHost();
355
356	return B_OK;
357}
358
359
360void
361VirtioQueue::UpdateAvailable(uint16 index)
362{
363	CALLED();
364	uint16 available = fRing.avail->idx & (fRingSize - 1);
365	fRing.avail->ring[available] = index;
366	fRing.avail->idx++;
367}
368
369
370uint16
371VirtioQueue::QueueVector(uint16 insertIndex, struct vring_desc *desc,
372	const physical_entry* vector, size_t readVectorCount,
373	size_t writtenVectorCount)
374{
375	CALLED();
376	uint16 index = insertIndex;
377	size_t total = readVectorCount + writtenVectorCount;
378	for (size_t i = 0; i < total; i++, index = desc[index].next) {
379		desc[index].addr = vector[i].address;
380		desc[index].len =  vector[i].size;
381		desc[index].flags = 0;
382		if (i < total - 1)
383			desc[index].flags |= VRING_DESC_F_NEXT;
384		if (i >= readVectorCount)
385			desc[index].flags |= VRING_DESC_F_WRITE;
386	}
387
388	return index;
389}
390