1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include "dma_resources.h"
9
10#include <device_manager.h>
11
12#include <kernel.h>
13#include <util/AutoLock.h>
14#include <vm/vm.h>
15
16#include "IORequest.h"
17
18
19//#define TRACE_DMA_RESOURCE
20#ifdef TRACE_DMA_RESOURCE
21#	define TRACE(x...) dprintf(x)
22#else
23#	define TRACE(x...) ;
24#endif
25
26
27extern device_manager_info gDeviceManagerModule;
28
29const phys_size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
30
31
32DMABuffer*
33DMABuffer::Create(size_t count)
34{
35	DMABuffer* buffer = (DMABuffer*)malloc(
36		sizeof(DMABuffer) + sizeof(generic_io_vec) * (count - 1));
37	if (buffer == NULL)
38		return NULL;
39
40	buffer->fVecCount = count;
41
42	return buffer;
43}
44
45
46void
47DMABuffer::SetVecCount(uint32 count)
48{
49	fVecCount = count;
50}
51
52
53void
54DMABuffer::AddVec(generic_addr_t base, generic_size_t size)
55{
56	generic_io_vec& vec = fVecs[fVecCount++];
57	vec.base = base;
58	vec.length = size;
59}
60
61
62bool
63DMABuffer::UsesBounceBufferAt(uint32 index)
64{
65	if (index >= fVecCount || fBounceBuffer == NULL)
66		return false;
67
68	return fVecs[index].base >= fBounceBuffer->physical_address
69		&& fVecs[index].base
70				< fBounceBuffer->physical_address + fBounceBuffer->size;
71}
72
73
74void
75DMABuffer::Dump() const
76{
77	kprintf("DMABuffer at %p\n", this);
78
79	kprintf("  bounce buffer:      %p (physical %#" B_PRIxPHYSADDR ")\n",
80		fBounceBuffer->address, fBounceBuffer->physical_address);
81	kprintf("  bounce buffer size: %" B_PRIxPHYSADDR "\n", fBounceBuffer->size);
82	kprintf("  vecs:               %" B_PRIu32 "\n", fVecCount);
83
84	for (uint32 i = 0; i < fVecCount; i++) {
85		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
86			i, fVecs[i].base, fVecs[i].length);
87	}
88}
89
90
91//	#pragma mark -
92
93
94DMAResource::DMAResource()
95{
96	mutex_init(&fLock, "dma resource");
97}
98
99
100DMAResource::~DMAResource()
101{
102	mutex_destroy(&fLock);
103	free(fScratchVecs);
104
105// TODO: Delete DMABuffers and BounceBuffers!
106}
107
108
109status_t
110DMAResource::Init(device_node* node, generic_size_t blockSize,
111	uint32 bufferCount, uint32 bounceBufferCount)
112{
113	dma_restrictions restrictions;
114	memset(&restrictions, 0, sizeof(dma_restrictions));
115
116	// TODO: add DMA attributes instead of reusing block_io's
117
118	uint32 value;
119	if (gDeviceManagerModule.get_attr_uint32(node,
120			B_DMA_ALIGNMENT, &value, true) == B_OK)
121		restrictions.alignment = (generic_size_t)value + 1;
122
123	if (gDeviceManagerModule.get_attr_uint32(node,
124			B_DMA_BOUNDARY, &value, true) == B_OK)
125		restrictions.boundary = (generic_size_t)value + 1;
126
127	if (gDeviceManagerModule.get_attr_uint32(node,
128			B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK)
129		restrictions.max_segment_size = (generic_size_t)value * blockSize;
130
131	if (gDeviceManagerModule.get_attr_uint32(node,
132			B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK)
133		restrictions.max_transfer_size = (generic_size_t)value * blockSize;
134
135	if (gDeviceManagerModule.get_attr_uint32(node,
136			B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK)
137		restrictions.max_segment_count = value;
138
139	uint64 value64;
140	if (gDeviceManagerModule.get_attr_uint64(node,
141			B_DMA_LOW_ADDRESS, &value64, true) == B_OK) {
142		restrictions.low_address = value64;
143	}
144
145	if (gDeviceManagerModule.get_attr_uint64(node,
146			B_DMA_HIGH_ADDRESS, &value64, true) == B_OK) {
147		restrictions.high_address = value64;
148	}
149
150	return Init(restrictions, blockSize, bufferCount, bounceBufferCount);
151}
152
153
154status_t
155DMAResource::Init(const dma_restrictions& restrictions,
156	generic_size_t blockSize, uint32 bufferCount, uint32 bounceBufferCount)
157{
158	fRestrictions = restrictions;
159	fBlockSize = blockSize == 0 ? 1 : blockSize;
160	fBufferCount = bufferCount;
161	fBounceBufferCount = bounceBufferCount;
162	fBounceBufferSize = 0;
163
164	if (fRestrictions.high_address == 0)
165		fRestrictions.high_address = ~(generic_addr_t)0;
166	if (fRestrictions.max_segment_count == 0)
167		fRestrictions.max_segment_count = 16;
168	if (fRestrictions.alignment == 0)
169		fRestrictions.alignment = 1;
170	if (fRestrictions.max_transfer_size == 0)
171		fRestrictions.max_transfer_size = ~(generic_size_t)0;
172	if (fRestrictions.max_segment_size == 0)
173		fRestrictions.max_segment_size = ~(generic_size_t)0;
174
175	if (_NeedsBoundsBuffers()) {
176		fBounceBufferSize = fRestrictions.max_segment_size
177			* min_c(fRestrictions.max_segment_count, 4);
178		if (fBounceBufferSize > kMaxBounceBufferSize)
179			fBounceBufferSize = kMaxBounceBufferSize;
180		TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
181			fBounceBufferSize);
182	}
183
184	dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR "/%" B_PRIxGENADDR
185		", max segment count %" B_PRIu32 ", align %" B_PRIuGENADDR ", "
186		"boundary %" B_PRIuGENADDR ", max transfer %" B_PRIuGENADDR
187		", max segment size %" B_PRIuGENADDR "\n", this,
188		fRestrictions.low_address, fRestrictions.high_address,
189		fRestrictions.max_segment_count, fRestrictions.alignment,
190		fRestrictions.boundary, fRestrictions.max_transfer_size,
191		fRestrictions.max_segment_size);
192
193	fScratchVecs = (generic_io_vec*)malloc(
194		sizeof(generic_io_vec) * fRestrictions.max_segment_count);
195	if (fScratchVecs == NULL)
196		return B_NO_MEMORY;
197
198	for (size_t i = 0; i < fBufferCount; i++) {
199		DMABuffer* buffer;
200		status_t error = CreateBuffer(&buffer);
201		if (error != B_OK)
202			return error;
203
204		fDMABuffers.Add(buffer);
205	}
206
207	// TODO: create bounce buffers in as few areas as feasible
208	for (size_t i = 0; i < fBounceBufferCount; i++) {
209		DMABounceBuffer* buffer;
210		status_t error = CreateBounceBuffer(&buffer);
211		if (error != B_OK)
212			return error;
213
214		fBounceBuffers.Add(buffer);
215	}
216
217	return B_OK;
218}
219
220
221status_t
222DMAResource::CreateBuffer(DMABuffer** _buffer)
223{
224	DMABuffer* buffer = DMABuffer::Create(fRestrictions.max_segment_count);
225	if (buffer == NULL)
226		return B_NO_MEMORY;
227
228	*_buffer = buffer;
229	return B_OK;
230}
231
232
233status_t
234DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
235{
236	void* bounceBuffer = NULL;
237	phys_addr_t physicalBase = 0;
238	area_id area = -1;
239	phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
240
241	virtual_address_restrictions virtualRestrictions = {};
242	virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
243	physical_address_restrictions physicalRestrictions = {};
244	physicalRestrictions.low_address = fRestrictions.low_address;
245	physicalRestrictions.high_address = fRestrictions.high_address;
246	physicalRestrictions.alignment = fRestrictions.alignment;
247	physicalRestrictions.boundary = fRestrictions.boundary;
248	area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
249		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
250		&physicalRestrictions, &bounceBuffer);
251	if (area < B_OK)
252		return area;
253
254	physical_entry entry;
255	if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
256		panic("get_memory_map() failed.");
257		delete_area(area);
258		return B_ERROR;
259	}
260
261	physicalBase = entry.address;
262
263	ASSERT(fRestrictions.high_address >= physicalBase + size);
264
265	DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
266	if (buffer == NULL) {
267		delete_area(area);
268		return B_NO_MEMORY;
269	}
270
271	buffer->address = bounceBuffer;
272	buffer->physical_address = physicalBase;
273	buffer->size = size;
274
275	*_buffer = buffer;
276	return B_OK;
277}
278
279
280inline void
281DMAResource::_RestrictBoundaryAndSegmentSize(generic_addr_t base,
282	generic_addr_t& length)
283{
284	if (length > fRestrictions.max_segment_size)
285		length = fRestrictions.max_segment_size;
286	if (fRestrictions.boundary > 0) {
287		generic_addr_t baseBoundary = base / fRestrictions.boundary;
288		if (baseBoundary
289				!= (base + (length - 1)) / fRestrictions.boundary) {
290			length = (baseBoundary + 1) * fRestrictions.boundary - base;
291		}
292	}
293}
294
295
296void
297DMAResource::_CutBuffer(DMABuffer& buffer, phys_addr_t& physicalBounceBuffer,
298	phys_size_t& bounceLeft, generic_size_t toCut)
299{
300	int32 vecCount = buffer.VecCount();
301	for (int32 i = vecCount - 1; toCut > 0 && i >= 0; i--) {
302		generic_io_vec& vec = buffer.VecAt(i);
303		generic_size_t length = vec.length;
304		bool inBounceBuffer = buffer.UsesBounceBufferAt(i);
305
306		if (length <= toCut) {
307			vecCount--;
308			toCut -= length;
309
310			if (inBounceBuffer) {
311				bounceLeft += length;
312				physicalBounceBuffer -= length;
313			}
314		} else {
315			vec.length -= toCut;
316
317			if (inBounceBuffer) {
318				bounceLeft += toCut;
319				physicalBounceBuffer -= toCut;
320			}
321			break;
322		}
323	}
324
325	buffer.SetVecCount(vecCount);
326}
327
328
329/*!	Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
330	Takes care of boundary, and segment restrictions. \a length must be aligned.
331	If \a fixedLength is requested, this function will fail if it cannot
332	satisfy the request.
333
334	\return 0 if the request cannot be satisfied. There could have been some
335		additions to the DMA buffer, and you will need to cut them back.
336	TODO: is that what we want here?
337	\return >0 the number of bytes added to the buffer.
338*/
339phys_size_t
340DMAResource::_AddBounceBuffer(DMABuffer& buffer,
341	phys_addr_t& physicalBounceBuffer, phys_size_t& bounceLeft,
342	generic_size_t length, bool fixedLength)
343{
344	if (bounceLeft < length) {
345		if (fixedLength)
346			return 0;
347
348		length = bounceLeft;
349	}
350
351	phys_size_t bounceUsed = 0;
352
353	uint32 vecCount = buffer.VecCount();
354	if (vecCount > 0) {
355		// see if we can join the bounce buffer with the previously last vec
356		generic_io_vec& vec = buffer.VecAt(vecCount - 1);
357		generic_addr_t vecBase = vec.base;
358		generic_size_t vecLength = vec.length;
359
360		if (vecBase + vecLength == physicalBounceBuffer) {
361			vecLength += length;
362			_RestrictBoundaryAndSegmentSize(vecBase, vecLength);
363
364			generic_size_t lengthDiff = vecLength - vec.length;
365			length -= lengthDiff;
366
367			physicalBounceBuffer += lengthDiff;
368			bounceLeft -= lengthDiff;
369			bounceUsed += lengthDiff;
370
371			vec.length = vecLength;
372		}
373	}
374
375	while (length > 0) {
376		// We need to add another bounce vec
377
378		if (vecCount == fRestrictions.max_segment_count)
379			return fixedLength ? 0 : bounceUsed;
380
381		generic_addr_t vecLength = length;
382		_RestrictBoundaryAndSegmentSize(physicalBounceBuffer, vecLength);
383
384		buffer.AddVec(physicalBounceBuffer, vecLength);
385		vecCount++;
386
387		physicalBounceBuffer += vecLength;
388		bounceLeft -= vecLength;
389		bounceUsed += vecLength;
390		length -= vecLength;
391	}
392
393	return bounceUsed;
394}
395
396
397status_t
398DMAResource::TranslateNext(IORequest* request, IOOperation* operation,
399	generic_size_t maxOperationLength)
400{
401	IOBuffer* buffer = request->Buffer();
402	off_t originalOffset = request->Offset() + request->Length()
403		- request->RemainingBytes();
404	off_t offset = originalOffset;
405	generic_size_t partialBegin = offset & (fBlockSize - 1);
406
407	// current iteration state
408	uint32 vecIndex = request->VecIndex();
409	uint32 vecOffset = request->VecOffset();
410	generic_size_t totalLength = min_c(request->RemainingBytes(),
411		fRestrictions.max_transfer_size);
412
413	if (maxOperationLength > 0
414		&& maxOperationLength < totalLength + partialBegin) {
415		totalLength = maxOperationLength - partialBegin;
416	}
417
418	MutexLocker locker(fLock);
419
420	DMABuffer* dmaBuffer = fDMABuffers.RemoveHead();
421	if (dmaBuffer == NULL)
422		return B_BUSY;
423
424	dmaBuffer->SetVecCount(0);
425
426	generic_io_vec* vecs = NULL;
427	uint32 segmentCount = 0;
428
429	TRACE("  offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n",
430		offset, request->RemainingBytes(), fBlockSize, partialBegin);
431
432	if (buffer->IsVirtual()) {
433		// Unless we need the bounce buffer anyway, we have to translate the
434		// virtual addresses to physical addresses, so we can check the DMA
435		// restrictions.
436		TRACE("  buffer is virtual %s\n", buffer->IsUser() ? "user" : "kernel");
437		// TODO: !partialOperation || totalLength >= fBlockSize
438		// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
439		if (true) {
440			generic_size_t transferLeft = totalLength;
441			vecs = fScratchVecs;
442
443			TRACE("  create physical map (for %ld vecs)\n", buffer->VecCount());
444			for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
445				generic_io_vec& vec = buffer->VecAt(i);
446				generic_addr_t base = vec.base + vecOffset;
447				generic_size_t size = vec.length - vecOffset;
448				vecOffset = 0;
449				if (size > transferLeft)
450					size = transferLeft;
451
452				while (size > 0 && segmentCount
453						< fRestrictions.max_segment_count) {
454					physical_entry entry;
455					uint32 count = 1;
456					get_memory_map_etc(request->TeamID(), (void*)base, size,
457						&entry, &count);
458
459					vecs[segmentCount].base = entry.address;
460					vecs[segmentCount].length = entry.size;
461
462					transferLeft -= entry.size;
463					base += entry.size;
464					size -= entry.size;
465					segmentCount++;
466				}
467
468				if (transferLeft == 0)
469					break;
470			}
471
472			totalLength -= transferLeft;
473		}
474
475		vecIndex = 0;
476		vecOffset = 0;
477	} else {
478		// We do already have physical addresses.
479		locker.Unlock();
480		vecs = buffer->Vecs();
481		segmentCount = min_c(buffer->VecCount() - vecIndex,
482			fRestrictions.max_segment_count);
483	}
484
485#ifdef TRACE_DMA_RESOURCE
486	TRACE("  physical count %lu\n", segmentCount);
487	for (uint32 i = 0; i < segmentCount; i++) {
488		TRACE("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIxGENADDR "\n",
489			i, vecs[vecIndex + i].base, vecs[vecIndex + i].length);
490	}
491#endif
492
493	// check alignment, boundaries, etc. and set vecs in DMA buffer
494
495	// Fetch a bounce buffer we can use for the DMABuffer.
496	// TODO: We should do that lazily when needed!
497	DMABounceBuffer* bounceBuffer = NULL;
498	if (_NeedsBoundsBuffers()) {
499		bounceBuffer = fBounceBuffers.Head();
500		if (bounceBuffer == NULL)
501			return B_BUSY;
502	}
503	dmaBuffer->SetBounceBuffer(bounceBuffer);
504
505	generic_size_t dmaLength = 0;
506	phys_addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBufferAddress();
507	phys_size_t bounceLeft = fBounceBufferSize;
508	generic_size_t transferLeft = totalLength;
509
510	// If the offset isn't block-aligned, use the bounce buffer to bridge the
511	// gap to the start of the vec.
512	if (partialBegin > 0) {
513		generic_size_t length;
514		if (request->IsWrite()) {
515			// we always need to read in a whole block for the partial write
516			length = fBlockSize;
517		} else {
518			length = (partialBegin + fRestrictions.alignment - 1)
519				& ~(fRestrictions.alignment - 1);
520		}
521
522		if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft,
523				length, true) == 0) {
524			TRACE("  adding partial begin failed, length %lu!\n", length);
525			return B_BAD_VALUE;
526		}
527
528		dmaLength += length;
529
530		generic_size_t transferred = length - partialBegin;
531		vecOffset += transferred;
532		offset -= partialBegin;
533
534		if (transferLeft > transferred)
535			transferLeft -= transferred;
536		else
537			transferLeft = 0;
538
539		TRACE("  partial begin, using bounce buffer: offset: %lld, length: "
540			"%lu\n", offset, length);
541	}
542
543	for (uint32 i = vecIndex;
544			i < vecIndex + segmentCount && transferLeft > 0;) {
545		if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count)
546			break;
547
548		const generic_io_vec& vec = vecs[i];
549		if (vec.length <= vecOffset) {
550			vecOffset -= vec.length;
551			i++;
552			continue;
553		}
554
555		generic_addr_t base = vec.base + vecOffset;
556		generic_size_t maxLength = vec.length - vecOffset;
557		if (maxLength > transferLeft)
558			maxLength = transferLeft;
559		generic_size_t length = maxLength;
560
561		// Cut the vec according to transfer size, segment size, and boundary.
562
563		if (dmaLength + length > fRestrictions.max_transfer_size) {
564			length = fRestrictions.max_transfer_size - dmaLength;
565			TRACE("  vec %lu: restricting length to %lu due to transfer size "
566				"limit\n", i, length);
567		}
568		_RestrictBoundaryAndSegmentSize(base, length);
569
570		phys_size_t useBounceBufferSize = 0;
571
572		// Check low address: use bounce buffer for range to low address.
573		// Check alignment: if not aligned, use bounce buffer for complete vec.
574		if (base < fRestrictions.low_address) {
575			useBounceBufferSize = fRestrictions.low_address - base;
576			TRACE("  vec %lu: below low address, using bounce buffer: %lu\n", i,
577				useBounceBufferSize);
578		} else if (base & (fRestrictions.alignment - 1)) {
579			useBounceBufferSize = length;
580			TRACE("  vec %lu: misalignment, using bounce buffer: %lu\n", i,
581				useBounceBufferSize);
582		}
583
584		// Enforce high address restriction
585		if (base > fRestrictions.high_address)
586			useBounceBufferSize = length;
587		else if (base + length > fRestrictions.high_address)
588			length = fRestrictions.high_address - base;
589
590		// Align length as well
591		if (useBounceBufferSize == 0)
592			length &= ~(fRestrictions.alignment - 1);
593
594		// If length is 0, use bounce buffer for complete vec.
595		if (length == 0) {
596			length = maxLength;
597			useBounceBufferSize = length;
598			TRACE("  vec %lu: 0 length, using bounce buffer: %lu\n", i,
599				useBounceBufferSize);
600		}
601
602		if (useBounceBufferSize > 0) {
603			// alignment could still be wrong (we round up here)
604			useBounceBufferSize = (useBounceBufferSize
605				+ fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1);
606
607			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
608				bounceLeft, useBounceBufferSize, false);
609			if (length == 0) {
610				TRACE("  vec %lu: out of bounce buffer space\n", i);
611				// We don't have any bounce buffer space left, we need to move
612				// this request to the next I/O operation.
613				break;
614			}
615			TRACE("  vec %lu: final bounce length: %lu\n", i, length);
616		} else {
617			TRACE("  vec %lu: final length restriction: %lu\n", i, length);
618			dmaBuffer->AddVec(base, length);
619		}
620
621		dmaLength += length;
622		vecOffset += length;
623		transferLeft -= min_c(length, transferLeft);
624	}
625
626	// If we're writing partially, we always need to have a block sized bounce
627	// buffer (or else we would overwrite memory to be written on the read in
628	// the first phase).
629	off_t requestEnd = request->Offset() + request->Length();
630	if (request->IsWrite()) {
631		generic_size_t diff = dmaLength & (fBlockSize - 1);
632
633		// If the transfer length is block aligned and we're writing past the
634		// end of the given data, we still have to check the whether the last
635		// vec is a bounce buffer segment shorter than the block size. If so, we
636		// have to cut back the complete block and use a bounce buffer for it
637		// entirely.
638		if (diff == 0 && offset + (off_t)dmaLength > requestEnd) {
639			const generic_io_vec& dmaVec
640				= dmaBuffer->VecAt(dmaBuffer->VecCount() - 1);
641			ASSERT(dmaVec.base >= dmaBuffer->PhysicalBounceBufferAddress()
642				&& dmaVec.base
643					< dmaBuffer->PhysicalBounceBufferAddress()
644						+ fBounceBufferSize);
645				// We can be certain that the last vec is a bounce buffer vec,
646				// since otherwise the DMA buffer couldn't exceed the end of the
647				// request data.
648			if (dmaVec.length < fBlockSize)
649				diff = fBlockSize;
650		}
651
652		if (diff != 0) {
653			// Not yet block aligned -- cut back to the previous block and add
654			// a block-sized bounce buffer segment.
655			TRACE("  partial end write: %lu, diff %lu\n", dmaLength, diff);
656
657			_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff);
658			dmaLength -= diff;
659
660			if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
661					bounceLeft, fBlockSize, true) == 0) {
662				// If we cannot write anything, we can't process the request at
663				// all.
664				TRACE("  adding bounce buffer failed!!!\n");
665				if (dmaLength == 0)
666					return B_BAD_VALUE;
667			} else
668				dmaLength += fBlockSize;
669		}
670	}
671
672	// If total length not block aligned, use bounce buffer for padding (read
673	// case only).
674	while ((dmaLength & (fBlockSize - 1)) != 0) {
675		TRACE("  dmaLength not block aligned: %lu\n", dmaLength);
676			generic_size_t length
677				= (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
678
679		// If total length > max transfer size, segment count > max segment
680		// count, truncate.
681		// TODO: sometimes we can replace the last vec with the bounce buffer
682		// to let it match the restrictions.
683		if (length > fRestrictions.max_transfer_size
684			|| dmaBuffer->VecCount() == fRestrictions.max_segment_count
685			|| bounceLeft < length - dmaLength) {
686			// cut the part of dma length
687			TRACE("  can't align length due to max transfer size, segment "
688				"count restrictions, or lacking bounce buffer space\n");
689			generic_size_t toCut = dmaLength
690				& (max_c(fBlockSize, fRestrictions.alignment) - 1);
691			dmaLength -= toCut;
692			if (dmaLength == 0) {
693				// This can only happen, when we have too many small segments
694				// and hit the max segment count. In this case we just use the
695				// bounce buffer for as much as possible of the total length.
696				dmaBuffer->SetVecCount(0);
697				generic_addr_t base = dmaBuffer->PhysicalBounceBufferAddress();
698				dmaLength = min_c(totalLength, fBounceBufferSize)
699					& ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
700				_RestrictBoundaryAndSegmentSize(base, dmaLength);
701				dmaBuffer->AddVec(base, dmaLength);
702
703				physicalBounceBuffer = base + dmaLength;
704				bounceLeft = fBounceBufferSize - dmaLength;
705			} else {
706				_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut);
707			}
708		} else {
709			TRACE("  adding %lu bytes final bounce buffer\n",
710				length - dmaLength);
711			length -= dmaLength;
712			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
713				bounceLeft, length, true);
714			if (length == 0)
715				panic("don't do this to me!");
716			dmaLength += length;
717		}
718	}
719
720	operation->SetBuffer(dmaBuffer);
721	operation->SetBlockSize(fBlockSize);
722	operation->SetOriginalRange(originalOffset,
723		min_c(offset + (off_t)dmaLength, requestEnd) - originalOffset);
724	operation->SetRange(offset, dmaLength);
725	operation->SetPartial(partialBegin != 0,
726		offset + (off_t)dmaLength > requestEnd);
727
728	// If we don't need the bounce buffer, we put it back, otherwise
729	operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize);
730	if (operation->UsesBounceBuffer())
731		fBounceBuffers.RemoveHead();
732	else
733		dmaBuffer->SetBounceBuffer(NULL);
734
735
736	status_t error = operation->Prepare(request);
737	if (error != B_OK)
738		return error;
739
740	request->Advance(operation->OriginalLength());
741
742	return B_OK;
743}
744
745
746void
747DMAResource::RecycleBuffer(DMABuffer* buffer)
748{
749	if (buffer == NULL)
750		return;
751
752	MutexLocker _(fLock);
753	fDMABuffers.Add(buffer);
754	if (buffer->BounceBuffer() != NULL) {
755		fBounceBuffers.Add(buffer->BounceBuffer());
756		buffer->SetBounceBuffer(NULL);
757	}
758}
759
760
761bool
762DMAResource::_NeedsBoundsBuffers() const
763{
764	return fRestrictions.alignment > 1
765		|| fRestrictions.low_address != 0
766		|| fRestrictions.high_address != ~(generic_addr_t)0
767		|| fBlockSize > 1;
768}
769
770
771
772
773#if 0
774
775
776status_t
777create_dma_resource(restrictions)
778{
779	// Restrictions are: transfer size, address space, alignment
780	// segment min/max size, num segments
781}
782
783
784void
785delete_dma_resource(resource)
786{
787}
788
789
790dma_buffer_alloc(resource, size)
791{
792}
793
794
795dma_buffer_free(buffer)
796{
797//	Allocates or frees memory in that DMA buffer.
798}
799
800#endif	// 0
801