1/*
2 * Copyright 2010-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "IOCache.h"
8
9#include <algorithm>
10
11#include <condition_variable.h>
12#include <heap.h>
13#include <low_resource_manager.h>
14#include <util/AutoLock.h>
15#include <vm/vm.h>
16#include <vm/VMAddressSpace.h>
17#include <vm/VMCache.h>
18#include <vm/VMTranslationMap.h>
19
20
21//#define TRACE_IO_CACHE 1
22#ifdef TRACE_IO_CACHE
23#	define TRACE(format...)	dprintf(format)
24#else
25#	define TRACE(format...)	do {} while (false)
26#endif
27
28
29static inline bool
30page_physical_number_less(const vm_page* a, const vm_page* b)
31{
32	return a->physical_page_number < b->physical_page_number;
33}
34
35
36struct IOCache::Operation : IOOperation {
37	ConditionVariable	finishedCondition;
38};
39
40
41IOCache::IOCache(DMAResource* resource, size_t cacheLineSize)
42	:
43	IOScheduler(resource),
44	fDeviceCapacity(0),
45	fLineSize(cacheLineSize),
46	fPagesPerLine(cacheLineSize / B_PAGE_SIZE),
47	fArea(-1),
48	fCache(NULL),
49	fPages(NULL),
50	fVecs(NULL)
51{
52	ASSERT(resource != NULL);
53	TRACE("%p->IOCache::IOCache(%p, %" B_PRIuSIZE ")\n", this, resource,
54		cacheLineSize);
55
56	if (cacheLineSize < B_PAGE_SIZE
57		|| (cacheLineSize & (cacheLineSize - 1)) != 0) {
58		panic("Invalid cache line size (%" B_PRIuSIZE "). Must be a power of 2 "
59			"multiple of the page size.", cacheLineSize);
60	}
61
62	mutex_init(&fSerializationLock, "I/O cache request serialization");
63
64	fLineSizeShift = 0;
65	while (cacheLineSize != 1) {
66		fLineSizeShift++;
67		cacheLineSize >>= 1;
68	}
69}
70
71
72IOCache::~IOCache()
73{
74	if (fArea >= 0) {
75		vm_page_unreserve_pages(&fMappingReservation);
76		delete_area(fArea);
77	}
78
79	delete[] fPages;
80	delete[] fVecs;
81
82	mutex_destroy(&fSerializationLock);
83}
84
85
86status_t
87IOCache::Init(const char* name)
88{
89	TRACE("%p->IOCache::Init(\"%s\")\n", this, name);
90
91	status_t error = IOScheduler::Init(name);
92	if (error != B_OK)
93		return error;
94
95	// create the area for mapping cache lines
96	fArea = vm_create_null_area(B_SYSTEM_TEAM, "I/O cache line", &fAreaBase,
97		B_ANY_KERNEL_ADDRESS, fLineSize, 0);
98	if (fArea < 0)
99		return fArea;
100
101	// reserve pages for mapping a complete cache line
102	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
103	VMTranslationMap* translationMap = addressSpace->TranslationMap();
104	size_t pagesNeeded = translationMap->MaxPagesNeededToMap((addr_t)fAreaBase,
105		(addr_t)fAreaBase + fLineSize - 1);
106	vm_page_reserve_pages(&fMappingReservation, pagesNeeded,
107		VM_PRIORITY_SYSTEM);
108
109	// get the area's cache
110	VMArea* area = VMAreaHash::Lookup(fArea);
111	if (area == NULL) {
112		panic("IOCache::Init(): Where's our area (id: %" B_PRId32 ")?!", fArea);
113		return B_ERROR;
114	}
115	fCache = area->cache;
116
117	// allocate arrays for pages and io vecs
118	fPages = new(std::nothrow) vm_page*[fPagesPerLine];
119	fVecs = new(std::nothrow) generic_io_vec[fPagesPerLine];
120	if (fPages == NULL || fVecs == NULL)
121		return B_NO_MEMORY;
122
123	return B_OK;
124}
125
126
127void
128IOCache::SetDeviceCapacity(off_t deviceCapacity)
129{
130	TRACE("%p->IOCache::SetDeviceCapacity(%" B_PRIdOFF ")\n", this,
131		deviceCapacity);
132
133	MutexLocker serializationLocker(fSerializationLock);
134	AutoLocker<VMCache> cacheLocker(fCache);
135
136	fDeviceCapacity = deviceCapacity;
137}
138
139
140void
141IOCache::MediaChanged()
142{
143	TRACE("%p->IOCache::MediaChanged()\n", this);
144
145	MutexLocker serializationLocker(fSerializationLock);
146	AutoLocker<VMCache> cacheLocker(fCache);
147
148	// new media -- burn all cached data
149	while (vm_page* page = fCache->pages.Root()) {
150		DEBUG_PAGE_ACCESS_START(page);
151		fCache->RemovePage(page);
152		vm_page_free(NULL, page);
153	}
154}
155
156
157status_t
158IOCache::ScheduleRequest(IORequest* request)
159{
160	TRACE("%p->IOCache::ScheduleRequest(%p)\n", this, request);
161
162	// lock the request's memory
163	status_t error;
164	IOBuffer* buffer = request->Buffer();
165	if (buffer->IsVirtual()) {
166		error = buffer->LockMemory(request->TeamID(), request->IsWrite());
167		if (error != B_OK) {
168			request->SetStatusAndNotify(error);
169			return error;
170		}
171	}
172
173	// we completely serialize all I/O in FIFO order
174	MutexLocker serializationLocker(fSerializationLock);
175	generic_size_t bytesTransferred = 0;
176	error = _DoRequest(request, bytesTransferred);
177	serializationLocker.Unlock();
178
179	// unlock memory
180	if (buffer->IsVirtual())
181		buffer->UnlockMemory(request->TeamID(), request->IsWrite());
182
183	// set status and notify
184	if (error == B_OK) {
185		request->SetTransferredBytes(bytesTransferred < request->Length(),
186			bytesTransferred);
187		request->SetStatusAndNotify(B_OK);
188	} else
189		request->SetStatusAndNotify(error);
190
191	return error;
192}
193
194
195void
196IOCache::AbortRequest(IORequest* request, status_t status)
197{
198	// TODO:...
199}
200
201
202void
203IOCache::OperationCompleted(IOOperation* operation, status_t status,
204	generic_size_t transferredBytes)
205{
206	if (status == B_OK) {
207		// always fail in case of partial transfers
208		((Operation*)operation)->finishedCondition.NotifyAll(false,
209			transferredBytes == operation->Length() ? B_OK : B_ERROR);
210	} else
211		((Operation*)operation)->finishedCondition.NotifyAll(false, status);
212}
213
214
215void
216IOCache::Dump() const
217{
218	kprintf("IOCache at %p\n", this);
219	kprintf("  DMA resource:   %p\n", fDMAResource);
220}
221
222
223status_t
224IOCache::_DoRequest(IORequest* request, generic_size_t& _bytesTransferred)
225{
226	off_t offset = request->Offset();
227	generic_size_t length = request->Length();
228
229	TRACE("%p->IOCache::ScheduleRequest(%p): offset: %" B_PRIdOFF
230		", length: %" B_PRIuSIZE "\n", this, request, offset, length);
231
232	if (offset < 0 || offset > fDeviceCapacity)
233		return B_BAD_VALUE;
234
235	// truncate the request to the device capacity
236	if (fDeviceCapacity - offset < (off_t)length)
237		length = fDeviceCapacity - offset;
238
239	_bytesTransferred = 0;
240
241	while (length > 0) {
242		// the start of the current cache line
243		off_t lineOffset = (offset >> fLineSizeShift) << fLineSizeShift;
244
245		// intersection of request and cache line
246		off_t cacheLineEnd = std::min(lineOffset + (off_t)fLineSize, fDeviceCapacity);
247		size_t requestLineLength
248			= std::min(cacheLineEnd - offset, (off_t)length);
249
250		// transfer the data of the cache line
251		status_t error = _TransferRequestLine(request, lineOffset,
252			cacheLineEnd - lineOffset, offset, requestLineLength);
253		if (error != B_OK)
254			return error;
255
256		offset = cacheLineEnd;
257		length -= requestLineLength;
258		_bytesTransferred += requestLineLength;
259	}
260
261	return B_OK;
262}
263
264
265status_t
266IOCache::_TransferRequestLine(IORequest* request, off_t lineOffset,
267	size_t lineSize, off_t requestOffset, size_t requestLength)
268{
269	TRACE("%p->IOCache::_TransferRequestLine(%p, %" B_PRIdOFF
270		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
271		requestOffset, requestLength);
272
273	// check whether there are pages of the cache line and the mark them used
274	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
275	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
276
277	AutoLocker<VMCache> cacheLocker(fCache);
278
279	page_num_t firstMissing = 0;
280	page_num_t lastMissing = 0;
281	page_num_t missingPages = 0;
282	page_num_t pageOffset = firstPageOffset;
283
284	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
285		true);
286	while (pageOffset < firstPageOffset + linePageCount) {
287		vm_page* page = it.Next();
288		page_num_t currentPageOffset;
289		if (page == NULL
290			|| page->cache_offset >= firstPageOffset + linePageCount) {
291			page = NULL;
292			currentPageOffset = firstPageOffset + linePageCount;
293		} else
294			currentPageOffset = page->cache_offset;
295
296		if (pageOffset < currentPageOffset) {
297			// pages are missing
298			if (missingPages == 0)
299				firstMissing = pageOffset;
300			lastMissing = currentPageOffset - 1;
301			missingPages += currentPageOffset - pageOffset;
302
303			for (; pageOffset < currentPageOffset; pageOffset++)
304				fPages[pageOffset - firstPageOffset] = NULL;
305		}
306
307		if (page != NULL) {
308			fPages[pageOffset++ - firstPageOffset] = page;
309			DEBUG_PAGE_ACCESS_START(page);
310			vm_page_set_state(page, PAGE_STATE_UNUSED);
311			DEBUG_PAGE_ACCESS_END(page);
312		}
313	}
314
315	cacheLocker.Unlock();
316
317	bool isVIP = (request->Flags() & B_VIP_IO_REQUEST) != 0;
318
319	if (missingPages > 0) {
320// TODO: If this is a read request and the missing pages range doesn't intersect
321// with the request, just satisfy the request and don't read anything at all.
322		// There are pages of the cache line missing. We have to allocate fresh
323		// ones.
324
325		// reserve
326		vm_page_reservation reservation;
327		if (!vm_page_try_reserve_pages(&reservation, missingPages,
328				VM_PRIORITY_SYSTEM)) {
329			_DiscardPages(firstMissing - firstPageOffset, missingPages);
330
331			// fall back to uncached transfer
332			return _TransferRequestLineUncached(request, lineOffset,
333				requestOffset, requestLength);
334		}
335
336		// Allocate the missing pages and remove the already existing pages in
337		// the range from the cache. We're going to read/write the whole range
338		// anyway and this way we can sort it, possibly improving the physical
339		// vecs.
340// TODO: When memory is low, we should consider cannibalizing ourselves or
341// simply transferring past the cache!
342		for (pageOffset = firstMissing; pageOffset <= lastMissing;
343				pageOffset++) {
344			page_num_t index = pageOffset - firstPageOffset;
345			if (fPages[index] == NULL) {
346				fPages[index] = vm_page_allocate_page(&reservation,
347					PAGE_STATE_UNUSED);
348				DEBUG_PAGE_ACCESS_END(fPages[index]);
349			} else {
350				cacheLocker.Lock();
351				fCache->RemovePage(fPages[index]);
352				cacheLocker.Unlock();
353			}
354		}
355
356		missingPages = lastMissing - firstMissing + 1;
357
358		// sort the page array by physical page number
359		std::sort(fPages + firstMissing - firstPageOffset,
360			fPages + lastMissing - firstPageOffset + 1,
361			page_physical_number_less);
362
363		// add the pages to the cache
364		cacheLocker.Lock();
365
366		for (pageOffset = firstMissing; pageOffset <= lastMissing;
367				pageOffset++) {
368			page_num_t index = pageOffset - firstPageOffset;
369			fCache->InsertPage(fPages[index], (off_t)pageOffset * B_PAGE_SIZE);
370		}
371
372		cacheLocker.Unlock();
373
374		// Read in the missing pages, if this is a read request or a write
375		// request that doesn't cover the complete missing range.
376		if (request->IsRead()
377			|| requestOffset < (off_t)firstMissing * B_PAGE_SIZE
378			|| requestOffset + (off_t)requestLength
379				> (off_t)(lastMissing + 1) * B_PAGE_SIZE) {
380			status_t error = _TransferPages(firstMissing - firstPageOffset,
381				missingPages, false, isVIP);
382			if (error != B_OK) {
383				dprintf("IOCache::_TransferRequestLine(): Failed to read into "
384					"cache (offset: %" B_PRIdOFF ", length: %" B_PRIuSIZE "), "
385					"trying uncached read (offset: %" B_PRIdOFF ", length: %"
386					B_PRIuSIZE ")\n", (off_t)firstMissing * B_PAGE_SIZE,
387					(size_t)missingPages * B_PAGE_SIZE, requestOffset,
388					requestLength);
389
390				_DiscardPages(firstMissing - firstPageOffset, missingPages);
391
392				// Try again using an uncached transfer
393				return _TransferRequestLineUncached(request, lineOffset,
394					requestOffset, requestLength);
395			}
396		}
397	}
398
399	if (request->IsRead()) {
400		// copy data to request
401		status_t error = _CopyPages(request, requestOffset - lineOffset,
402			requestOffset, requestLength, true);
403		_CachePages(0, linePageCount);
404		return error;
405	}
406
407	// copy data from request
408	status_t error = _CopyPages(request, requestOffset - lineOffset,
409		requestOffset, requestLength, false);
410	if (error != B_OK) {
411		_DiscardPages(0, linePageCount);
412		return error;
413	}
414
415	// write the pages to disk
416	page_num_t firstPage = (requestOffset - lineOffset) / B_PAGE_SIZE;
417	page_num_t endPage = (requestOffset + requestLength - lineOffset
418		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
419	error = _TransferPages(firstPage, endPage - firstPage, true, isVIP);
420
421	if (error != B_OK) {
422		_DiscardPages(firstPage, endPage - firstPage);
423		return error;
424	}
425
426	_CachePages(0, linePageCount);
427	return error;
428}
429
430
431status_t
432IOCache::_TransferRequestLineUncached(IORequest* request, off_t lineOffset,
433	off_t requestOffset, size_t requestLength)
434{
435	TRACE("%p->IOCache::_TransferRequestLineUncached(%p, %" B_PRIdOFF
436		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
437		requestOffset, requestLength);
438
439	// Advance the request to the interesting offset, so the DMAResource can
440	// provide us with fitting operations.
441	off_t actualRequestOffset
442		= request->Offset() + request->Length() - request->RemainingBytes();
443	if (actualRequestOffset > requestOffset) {
444		dprintf("IOCache::_TransferRequestLineUncached(): Request %p advanced "
445			"beyond current cache line (%" B_PRIdOFF " vs. %" B_PRIdOFF ")\n",
446			request, actualRequestOffset, requestOffset);
447		return B_BAD_VALUE;
448	}
449
450	if (actualRequestOffset < requestOffset)
451		request->Advance(requestOffset - actualRequestOffset);
452
453	generic_size_t requestRemaining = request->RemainingBytes() - requestLength;
454
455	// Process single operations until the specified part of the request is
456	// finished or until an error occurs.
457	Operation operation;
458	operation.finishedCondition.Init(this, "I/O cache operation finished");
459
460	while (request->RemainingBytes() > requestRemaining
461		&& request->Status() > 0) {
462		status_t error = fDMAResource->TranslateNext(request, &operation,
463			request->RemainingBytes() - requestRemaining);
464		if (error != B_OK)
465			return error;
466
467		error = _DoOperation(operation);
468
469		request->OperationFinished(&operation, error, false,
470			error == B_OK ? operation.OriginalLength() : 0);
471		request->SetUnfinished();
472			// Keep the request in unfinished state. ScheduleRequest() will set
473			// the final status and notify.
474
475		fDMAResource->RecycleBuffer(operation.Buffer());
476
477		if (error != B_OK) {
478			TRACE("%p->IOCache::_TransferRequestLineUncached(): operation at "
479				"%" B_PRIdOFF " failed: %s\n", this, operation.Offset(),
480				strerror(error));
481			return error;
482		}
483	}
484
485	return B_OK;
486}
487
488
489status_t
490IOCache::_DoOperation(Operation& operation)
491{
492	TRACE("%p->IOCache::_DoOperation(%" B_PRIdOFF ", %" B_PRIuSIZE ")\n", this,
493		operation.Offset(), operation.Length());
494
495	while (true) {
496		ConditionVariableEntry waitEntry;
497		operation.finishedCondition.Add(&waitEntry);
498
499		status_t error = fIOCallback(fIOCallbackData, &operation);
500		if (error != B_OK) {
501			operation.finishedCondition.NotifyAll(false, error);
502				// removes the entry from the variable
503			return error;
504		}
505
506		// wait for the operation to finish
507		error = waitEntry.Wait();
508		if (error != B_OK)
509			return error;
510
511		if (operation.Finish())
512			return B_OK;
513	}
514}
515
516
517status_t
518IOCache::_TransferPages(size_t firstPage, size_t pageCount, bool isWrite,
519	bool isVIP)
520{
521	TRACE("%p->IOCache::_TransferPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
522		", write: %d, vip: %d)\n", this, firstPage, pageCount, isWrite, isVIP);
523
524	off_t firstPageOffset = (off_t)fPages[firstPage]->cache_offset
525		* B_PAGE_SIZE;
526	generic_size_t requestLength = std::min(
527			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE, fDeviceCapacity)
528		- firstPageOffset;
529
530	// prepare the I/O vecs
531	size_t vecCount = 0;
532	size_t endPage = firstPage + pageCount;
533	phys_addr_t vecsEndAddress = 0;
534	for (size_t i = firstPage; i < endPage; i++) {
535		phys_addr_t pageAddress
536			= (phys_addr_t)fPages[i]->physical_page_number * B_PAGE_SIZE;
537		if (vecCount == 0 || pageAddress != vecsEndAddress) {
538			fVecs[vecCount].base = pageAddress;
539			fVecs[vecCount++].length = B_PAGE_SIZE;
540			vecsEndAddress = pageAddress + B_PAGE_SIZE;
541		} else {
542			// extend the previous vec
543			fVecs[vecCount - 1].length += B_PAGE_SIZE;
544			vecsEndAddress += B_PAGE_SIZE;
545		}
546	}
547
548	// create a request for the transfer
549	IORequest request;
550	status_t error = request.Init(firstPageOffset, fVecs, vecCount,
551		requestLength, isWrite,
552		B_PHYSICAL_IO_REQUEST | (isVIP ? B_VIP_IO_REQUEST : 0));
553	if (error != B_OK)
554		return error;
555
556	// Process single operations until the complete request is finished or
557	// until an error occurs.
558	Operation operation;
559	operation.finishedCondition.Init(this, "I/O cache operation finished");
560
561	while (request.RemainingBytes() > 0 && request.Status() > 0) {
562		error = fDMAResource->TranslateNext(&request, &operation,
563			requestLength);
564		if (error != B_OK)
565			return error;
566
567		error = _DoOperation(operation);
568
569		request.RemoveOperation(&operation);
570
571		fDMAResource->RecycleBuffer(operation.Buffer());
572
573		if (error != B_OK) {
574			TRACE("%p->IOCache::_TransferLine(): operation at %" B_PRIdOFF
575				" failed: %s\n", this, operation.Offset(), strerror(error));
576			return error;
577		}
578	}
579
580	return B_OK;
581}
582
583
584/*!	Frees all pages in given range of the \c fPages array.
585	\c NULL entries in the range are OK. All non \c NULL entries must refer
586	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
587	may not have a cache.
588	\c fCache must not be locked.
589*/
590void
591IOCache::_DiscardPages(size_t firstPage, size_t pageCount)
592{
593	TRACE("%p->IOCache::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
594		this, firstPage, pageCount);
595
596	AutoLocker<VMCache> cacheLocker(fCache);
597
598	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
599		vm_page* page = fPages[i];
600		if (page == NULL)
601			continue;
602
603		DEBUG_PAGE_ACCESS_START(page);
604
605		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
606			"page: %p @! page -m %p", page, page);
607
608		if (page->Cache() != NULL)
609			fCache->RemovePage(page);
610
611		vm_page_free(NULL, page);
612	}
613}
614
615
616/*!	Marks all pages in the given range of the \c fPages array cached.
617	There must not be any \c NULL entries in the given array range. All pages
618	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
619	\c fCache must not be locked.
620*/
621void
622IOCache::_CachePages(size_t firstPage, size_t pageCount)
623{
624	TRACE("%p->IOCache::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
625		this, firstPage, pageCount);
626
627	AutoLocker<VMCache> cacheLocker(fCache);
628
629	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
630		vm_page* page = fPages[i];
631		ASSERT(page != NULL);
632		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
633				&& page->Cache() == fCache,
634			"page: %p @! page -m %p", page, page);
635
636		DEBUG_PAGE_ACCESS_START(page);
637		vm_page_set_state(page, PAGE_STATE_CACHED);
638		DEBUG_PAGE_ACCESS_END(page);
639	}
640}
641
642
643/*!	Copies the contents of pages in \c fPages to \a request, or vice versa.
644	\param request The request.
645	\param pagesRelativeOffset The offset relative to \c fPages[0] where to
646		start copying.
647	\param requestOffset The request offset where to start copying.
648	\param requestLength The number of bytes to copy.
649	\param toRequest If \c true the copy directory is from \c fPages to
650		\a request, otherwise the other way around.
651	\return \c B_OK, if copying went fine, another error code otherwise.
652*/
653status_t
654IOCache::_CopyPages(IORequest* request, size_t pagesRelativeOffset,
655	off_t requestOffset, size_t requestLength, bool toRequest)
656{
657	TRACE("%p->IOCache::_CopyPages(%p, %" B_PRIuSIZE ", %" B_PRIdOFF
658		", %" B_PRIuSIZE ", %d)\n", this, request, pagesRelativeOffset,
659		requestOffset, requestLength, toRequest);
660
661	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
662	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
663		/ B_PAGE_SIZE;
664
665	// map the pages
666	status_t error = _MapPages(firstPage, endPage);
667// TODO: _MapPages() cannot fail, so the fallback is never needed. Test which
668// method is faster (probably the active one)!
669#if 0
670	if (error != B_OK) {
671		// fallback to copying individual pages
672		size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
673		for (size_t i = firstPage; i < endPage; i++) {
674			// map the page
675			void* handle;
676			addr_t address;
677			error = vm_get_physical_page(
678				fPages[i]->physical_page_number * B_PAGE_SIZE, &address,
679				&handle);
680			if (error != B_OK)
681				return error;
682
683			// copy the page's data
684			size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
685
686			if (toRequest) {
687				error = request->CopyData((uint8*)(address + inPageOffset),
688					requestOffset, toCopy);
689			} else {
690				error = request->CopyData(requestOffset,
691					(uint8*)(address + inPageOffset), toCopy);
692			}
693
694			// unmap the page
695			vm_put_physical_page(address, handle);
696
697			if (error != B_OK)
698				return error;
699
700			inPageOffset = 0;
701			requestOffset += toCopy;
702			requestLength -= toCopy;
703		}
704
705		return B_OK;
706	}
707#endif	// 0
708
709	// copy
710	if (toRequest) {
711		error = request->CopyData((uint8*)fAreaBase + pagesRelativeOffset,
712			requestOffset, requestLength);
713	} else {
714		error = request->CopyData(requestOffset,
715			(uint8*)fAreaBase + pagesRelativeOffset, requestLength);
716	}
717
718	// unmap the pages
719	_UnmapPages(firstPage, endPage);
720
721	return error;
722}
723
724
725/*!	Maps a range of pages in \c fPages into fArea.
726
727	If successful, it must be balanced by a call to _UnmapPages().
728
729	\param firstPage The \c fPages relative index of the first page to map.
730	\param endPage The \c fPages relative index of the page after the last page
731		to map.
732	\return \c B_OK, if mapping went fine, another error code otherwise.
733*/
734status_t
735IOCache::_MapPages(size_t firstPage, size_t endPage)
736{
737	VMTranslationMap* translationMap
738		= VMAddressSpace::Kernel()->TranslationMap();
739
740	translationMap->Lock();
741
742	for (size_t i = firstPage; i < endPage; i++) {
743		vm_page* page = fPages[i];
744
745		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
746			"page: %p @! page -m %p", page, page);
747
748		translationMap->Map((addr_t)fAreaBase + i * B_PAGE_SIZE,
749			page->physical_page_number * B_PAGE_SIZE,
750			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &fMappingReservation);
751		// NOTE: We don't increment gMappedPagesCount. Our pages have state
752		// PAGE_STATE_UNUSED anyway and we map them only for a short time.
753	}
754
755	translationMap->Unlock();
756
757	return B_OK;
758}
759
760
761/*!	Unmaps a range of pages in \c fPages into fArea.
762
763	Must balance a call to _MapPages().
764
765	\param firstPage The \c fPages relative index of the first page to unmap.
766	\param endPage The \c fPages relative index of the page after the last page
767		to unmap.
768*/
769void
770IOCache::_UnmapPages(size_t firstPage, size_t endPage)
771{
772	VMTranslationMap* translationMap
773		= VMAddressSpace::Kernel()->TranslationMap();
774
775	translationMap->Lock();
776
777	translationMap->Unmap((addr_t)fAreaBase + firstPage * B_PAGE_SIZE,
778		(addr_t)fAreaBase + endPage * B_PAGE_SIZE - 1);
779
780	translationMap->Unlock();
781}
782