1/*
2 * Copyright 2010-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "IOCache.h"
8
9#include <algorithm>
10
11#include <condition_variable.h>
12#include <heap.h>
13#include <low_resource_manager.h>
14#include <util/AutoLock.h>
15#include <vm/vm.h>
16#include <vm/VMAddressSpace.h>
17#include <vm/VMCache.h>
18#include <vm/VMTranslationMap.h>
19
20
21//#define TRACE_IO_CACHE 1
22#ifdef TRACE_IO_CACHE
23#	define TRACE(format...)	dprintf(format)
24#else
25#	define TRACE(format...)	do {} while (false)
26#endif
27
28
29static inline bool
30page_physical_number_less(const vm_page* a, const vm_page* b)
31{
32	return a->physical_page_number < b->physical_page_number;
33}
34
35
36struct IOCache::Operation : IOOperation {
37	ConditionVariable	finishedCondition;
38};
39
40
41IOCache::IOCache(DMAResource* resource, size_t cacheLineSize)
42	:
43	IOScheduler(resource),
44	fDeviceCapacity(0),
45	fLineSize(cacheLineSize),
46	fPagesPerLine(cacheLineSize / B_PAGE_SIZE),
47	fArea(-1),
48	fCache(NULL),
49	fPages(NULL),
50	fVecs(NULL)
51{
52	ASSERT(resource != NULL);
53	TRACE("%p->IOCache::IOCache(%p, %" B_PRIuSIZE ")\n", this, resource,
54		cacheLineSize);
55
56	if (cacheLineSize < B_PAGE_SIZE
57		|| (cacheLineSize & (cacheLineSize - 1)) != 0) {
58		panic("Invalid cache line size (%" B_PRIuSIZE "). Must be a power of 2 "
59			"multiple of the page size.", cacheLineSize);
60	}
61
62	mutex_init(&fSerializationLock, "I/O cache request serialization");
63
64	fLineSizeShift = 0;
65	while (cacheLineSize != 1) {
66		fLineSizeShift++;
67		cacheLineSize >>= 1;
68	}
69}
70
71
72IOCache::~IOCache()
73{
74	if (fArea >= 0) {
75		vm_page_unreserve_pages(&fMappingReservation);
76		delete_area(fArea);
77	}
78
79	delete[] fPages;
80	delete[] fVecs;
81
82	mutex_destroy(&fSerializationLock);
83}
84
85
86status_t
87IOCache::Init(const char* name)
88{
89	TRACE("%p->IOCache::Init(\"%s\")\n", this, name);
90
91	status_t error = IOScheduler::Init(name);
92	if (error != B_OK)
93		return error;
94
95	// create the area for mapping cache lines
96	fArea = vm_create_null_area(B_SYSTEM_TEAM, "I/O cache line", &fAreaBase,
97		B_ANY_KERNEL_ADDRESS, fLineSize, 0);
98	if (fArea < 0)
99		return fArea;
100
101	// reserve pages for mapping a complete cache line
102	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
103	VMTranslationMap* translationMap = addressSpace->TranslationMap();
104	size_t pagesNeeded = translationMap->MaxPagesNeededToMap((addr_t)fAreaBase,
105		(addr_t)fAreaBase + fLineSize - 1);
106	vm_page_reserve_pages(&fMappingReservation, pagesNeeded,
107		VM_PRIORITY_SYSTEM);
108
109	// get the area's cache
110	VMArea* area = VMAreas::Lookup(fArea);
111	if (area == NULL) {
112		panic("IOCache::Init(): Where's our area (id: %" B_PRId32 ")?!", fArea);
113		return B_ERROR;
114	}
115	fCache = area->cache;
116
117	// allocate arrays for pages and io vecs
118	fPages = new(std::nothrow) vm_page*[fPagesPerLine];
119	fVecs = new(std::nothrow) generic_io_vec[fPagesPerLine];
120	if (fPages == NULL || fVecs == NULL)
121		return B_NO_MEMORY;
122
123	return B_OK;
124}
125
126
127void
128IOCache::SetDeviceCapacity(off_t deviceCapacity)
129{
130	TRACE("%p->IOCache::SetDeviceCapacity(%" B_PRIdOFF ")\n", this,
131		deviceCapacity);
132
133	MutexLocker serializationLocker(fSerializationLock);
134	AutoLocker<VMCache> cacheLocker(fCache);
135
136	fDeviceCapacity = deviceCapacity;
137}
138
139
140void
141IOCache::MediaChanged()
142{
143	TRACE("%p->IOCache::MediaChanged()\n", this);
144
145	MutexLocker serializationLocker(fSerializationLock);
146	AutoLocker<VMCache> cacheLocker(fCache);
147
148	// new media -- burn all cached data
149	while (vm_page* page = fCache->pages.Root()) {
150		DEBUG_PAGE_ACCESS_START(page);
151		fCache->RemovePage(page);
152		vm_page_free(NULL, page);
153	}
154}
155
156
157status_t
158IOCache::ScheduleRequest(IORequest* request)
159{
160	TRACE("%p->IOCache::ScheduleRequest(%p)\n", this, request);
161
162	// lock the request's memory
163	status_t error;
164	IOBuffer* buffer = request->Buffer();
165	if (buffer->IsVirtual()) {
166		error = buffer->LockMemory(request->TeamID(), request->IsWrite());
167		if (error != B_OK) {
168			request->SetStatusAndNotify(error);
169			return error;
170		}
171	}
172
173	// we completely serialize all I/O in FIFO order
174	MutexLocker serializationLocker(fSerializationLock);
175	generic_size_t bytesTransferred = 0;
176	error = _DoRequest(request, bytesTransferred);
177	serializationLocker.Unlock();
178
179	// unlock memory
180	if (buffer->IsVirtual())
181		buffer->UnlockMemory(request->TeamID(), request->IsWrite());
182
183	// set status and notify
184	if (error == B_OK) {
185		request->SetTransferredBytes(bytesTransferred < request->Length(),
186			bytesTransferred);
187		request->SetStatusAndNotify(B_OK);
188	} else
189		request->SetStatusAndNotify(error);
190
191	return error;
192}
193
194
195void
196IOCache::AbortRequest(IORequest* request, status_t status)
197{
198	// TODO:...
199}
200
201
202void
203IOCache::OperationCompleted(IOOperation* operation, status_t status,
204	generic_size_t transferredBytes)
205{
206	operation->SetStatus(status, transferredBytes);
207
208	if (status == B_OK) {
209		// always fail in case of partial transfers
210		((Operation*)operation)->finishedCondition.NotifyAll(
211			transferredBytes == operation->Length() ? B_OK : B_ERROR);
212	} else
213		((Operation*)operation)->finishedCondition.NotifyAll(status);
214}
215
216
217void
218IOCache::Dump() const
219{
220	kprintf("IOCache at %p\n", this);
221	kprintf("  DMA resource:   %p\n", fDMAResource);
222}
223
224
225status_t
226IOCache::_DoRequest(IORequest* request, generic_size_t& _bytesTransferred)
227{
228	off_t offset = request->Offset();
229	generic_size_t length = request->Length();
230
231	TRACE("%p->IOCache::ScheduleRequest(%p): offset: %" B_PRIdOFF
232		", length: %" B_PRIuSIZE "\n", this, request, offset, length);
233
234	if (offset < 0 || offset > fDeviceCapacity)
235		return B_BAD_VALUE;
236
237	// truncate the request to the device capacity
238	if (fDeviceCapacity - offset < (off_t)length)
239		length = fDeviceCapacity - offset;
240
241	_bytesTransferred = 0;
242
243	while (length > 0) {
244		// the start of the current cache line
245		off_t lineOffset = (offset >> fLineSizeShift) << fLineSizeShift;
246
247		// intersection of request and cache line
248		off_t cacheLineEnd = std::min(lineOffset + (off_t)fLineSize, fDeviceCapacity);
249		size_t requestLineLength
250			= std::min(cacheLineEnd - offset, (off_t)length);
251
252		// transfer the data of the cache line
253		status_t error = _TransferRequestLine(request, lineOffset,
254			cacheLineEnd - lineOffset, offset, requestLineLength);
255		if (error != B_OK)
256			return error;
257
258		offset = cacheLineEnd;
259		length -= requestLineLength;
260		_bytesTransferred += requestLineLength;
261	}
262
263	return B_OK;
264}
265
266
267status_t
268IOCache::_TransferRequestLine(IORequest* request, off_t lineOffset,
269	size_t lineSize, off_t requestOffset, size_t requestLength)
270{
271	TRACE("%p->IOCache::_TransferRequestLine(%p, %" B_PRIdOFF
272		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
273		requestOffset, requestLength);
274
275	// check whether there are pages of the cache line and the mark them used
276	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
277	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
278
279	AutoLocker<VMCache> cacheLocker(fCache);
280
281	page_num_t firstMissing = 0;
282	page_num_t lastMissing = 0;
283	page_num_t missingPages = 0;
284	page_num_t pageOffset = firstPageOffset;
285
286	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
287		true);
288	while (pageOffset < firstPageOffset + linePageCount) {
289		vm_page* page = it.Next();
290		page_num_t currentPageOffset;
291		if (page == NULL
292			|| page->cache_offset >= firstPageOffset + linePageCount) {
293			page = NULL;
294			currentPageOffset = firstPageOffset + linePageCount;
295		} else
296			currentPageOffset = page->cache_offset;
297
298		if (pageOffset < currentPageOffset) {
299			// pages are missing
300			if (missingPages == 0)
301				firstMissing = pageOffset;
302			lastMissing = currentPageOffset - 1;
303			missingPages += currentPageOffset - pageOffset;
304
305			for (; pageOffset < currentPageOffset; pageOffset++)
306				fPages[pageOffset - firstPageOffset] = NULL;
307		}
308
309		if (page != NULL) {
310			fPages[pageOffset++ - firstPageOffset] = page;
311			DEBUG_PAGE_ACCESS_START(page);
312			vm_page_set_state(page, PAGE_STATE_UNUSED);
313			DEBUG_PAGE_ACCESS_END(page);
314		}
315	}
316
317	cacheLocker.Unlock();
318
319	bool isVIP = (request->Flags() & B_VIP_IO_REQUEST) != 0;
320
321	if (missingPages > 0) {
322// TODO: If this is a read request and the missing pages range doesn't intersect
323// with the request, just satisfy the request and don't read anything at all.
324		// There are pages of the cache line missing. We have to allocate fresh
325		// ones.
326
327		// reserve
328		vm_page_reservation reservation;
329		if (!vm_page_try_reserve_pages(&reservation, missingPages,
330				VM_PRIORITY_SYSTEM)) {
331			_DiscardPages(firstMissing - firstPageOffset, missingPages);
332
333			// fall back to uncached transfer
334			return _TransferRequestLineUncached(request, lineOffset,
335				requestOffset, requestLength);
336		}
337
338		// Allocate the missing pages and remove the already existing pages in
339		// the range from the cache. We're going to read/write the whole range
340		// anyway and this way we can sort it, possibly improving the physical
341		// vecs.
342// TODO: When memory is low, we should consider cannibalizing ourselves or
343// simply transferring past the cache!
344		for (pageOffset = firstMissing; pageOffset <= lastMissing;
345				pageOffset++) {
346			page_num_t index = pageOffset - firstPageOffset;
347			if (fPages[index] == NULL) {
348				fPages[index] = vm_page_allocate_page(&reservation,
349					PAGE_STATE_UNUSED);
350				DEBUG_PAGE_ACCESS_END(fPages[index]);
351			} else {
352				cacheLocker.Lock();
353				fCache->RemovePage(fPages[index]);
354				cacheLocker.Unlock();
355			}
356		}
357
358		missingPages = lastMissing - firstMissing + 1;
359
360		// sort the page array by physical page number
361		std::sort(fPages + firstMissing - firstPageOffset,
362			fPages + lastMissing - firstPageOffset + 1,
363			page_physical_number_less);
364
365		// add the pages to the cache
366		cacheLocker.Lock();
367
368		for (pageOffset = firstMissing; pageOffset <= lastMissing;
369				pageOffset++) {
370			page_num_t index = pageOffset - firstPageOffset;
371			fCache->InsertPage(fPages[index], (off_t)pageOffset * B_PAGE_SIZE);
372		}
373
374		cacheLocker.Unlock();
375
376		// Read in the missing pages, if this is a read request or a write
377		// request that doesn't cover the complete missing range.
378		if (request->IsRead()
379			|| requestOffset < (off_t)firstMissing * B_PAGE_SIZE
380			|| requestOffset + (off_t)requestLength
381				> (off_t)(lastMissing + 1) * B_PAGE_SIZE) {
382			status_t error = _TransferPages(firstMissing - firstPageOffset,
383				missingPages, false, isVIP);
384			if (error != B_OK) {
385				dprintf("IOCache::_TransferRequestLine(): Failed to read into "
386					"cache (offset: %" B_PRIdOFF ", length: %" B_PRIuSIZE "), "
387					"trying uncached read (offset: %" B_PRIdOFF ", length: %"
388					B_PRIuSIZE ")\n", (off_t)firstMissing * B_PAGE_SIZE,
389					(size_t)missingPages * B_PAGE_SIZE, requestOffset,
390					requestLength);
391
392				_DiscardPages(firstMissing - firstPageOffset, missingPages);
393
394				// Try again using an uncached transfer
395				return _TransferRequestLineUncached(request, lineOffset,
396					requestOffset, requestLength);
397			}
398		}
399	}
400
401	if (request->IsRead()) {
402		// copy data to request
403		status_t error = _CopyPages(request, requestOffset - lineOffset,
404			requestOffset, requestLength, true);
405		_CachePages(0, linePageCount);
406		return error;
407	}
408
409	// copy data from request
410	status_t error = _CopyPages(request, requestOffset - lineOffset,
411		requestOffset, requestLength, false);
412	if (error != B_OK) {
413		_DiscardPages(0, linePageCount);
414		return error;
415	}
416
417	// write the pages to disk
418	page_num_t firstPage = (requestOffset - lineOffset) / B_PAGE_SIZE;
419	page_num_t endPage = (requestOffset + requestLength - lineOffset
420		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
421	error = _TransferPages(firstPage, endPage - firstPage, true, isVIP);
422
423	if (error != B_OK) {
424		_DiscardPages(firstPage, endPage - firstPage);
425		return error;
426	}
427
428	_CachePages(0, linePageCount);
429	return error;
430}
431
432
433status_t
434IOCache::_TransferRequestLineUncached(IORequest* request, off_t lineOffset,
435	off_t requestOffset, size_t requestLength)
436{
437	TRACE("%p->IOCache::_TransferRequestLineUncached(%p, %" B_PRIdOFF
438		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
439		requestOffset, requestLength);
440
441	// Advance the request to the interesting offset, so the DMAResource can
442	// provide us with fitting operations.
443	off_t actualRequestOffset
444		= request->Offset() + request->Length() - request->RemainingBytes();
445	if (actualRequestOffset > requestOffset) {
446		dprintf("IOCache::_TransferRequestLineUncached(): Request %p advanced "
447			"beyond current cache line (%" B_PRIdOFF " vs. %" B_PRIdOFF ")\n",
448			request, actualRequestOffset, requestOffset);
449		return B_BAD_VALUE;
450	}
451
452	if (actualRequestOffset < requestOffset)
453		request->Advance(requestOffset - actualRequestOffset);
454
455	generic_size_t requestRemaining = request->RemainingBytes() - requestLength;
456
457	// Process single operations until the specified part of the request is
458	// finished or until an error occurs.
459	Operation operation;
460	operation.finishedCondition.Init(this, "I/O cache operation finished");
461
462	while (request->RemainingBytes() > requestRemaining
463		&& request->Status() > 0) {
464		status_t error = fDMAResource->TranslateNext(request, &operation,
465			request->RemainingBytes() - requestRemaining);
466		if (error != B_OK)
467			return error;
468
469		error = _DoOperation(operation);
470
471		request->OperationFinished(&operation);
472		request->SetUnfinished();
473			// Keep the request in unfinished state. ScheduleRequest() will set
474			// the final status and notify.
475
476		fDMAResource->RecycleBuffer(operation.Buffer());
477
478		if (error != B_OK) {
479			TRACE("%p->IOCache::_TransferRequestLineUncached(): operation at "
480				"%" B_PRIdOFF " failed: %s\n", this, operation.Offset(),
481				strerror(error));
482			return error;
483		}
484	}
485
486	return B_OK;
487}
488
489
490status_t
491IOCache::_DoOperation(Operation& operation)
492{
493	TRACE("%p->IOCache::_DoOperation(%" B_PRIdOFF ", %" B_PRIuSIZE ")\n", this,
494		operation.Offset(), operation.Length());
495
496	while (true) {
497		ConditionVariableEntry waitEntry;
498		operation.finishedCondition.Add(&waitEntry);
499
500		status_t error = fIOCallback(fIOCallbackData, &operation);
501		if (error != B_OK) {
502			operation.finishedCondition.NotifyAll(error);
503				// removes the entry from the variable
504			return error;
505		}
506
507		// wait for the operation to finish
508		error = waitEntry.Wait();
509		if (error != B_OK)
510			return error;
511
512		if (operation.Finish())
513			return B_OK;
514	}
515}
516
517
518status_t
519IOCache::_TransferPages(size_t firstPage, size_t pageCount, bool isWrite,
520	bool isVIP)
521{
522	TRACE("%p->IOCache::_TransferPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
523		", write: %d, vip: %d)\n", this, firstPage, pageCount, isWrite, isVIP);
524
525	off_t firstPageOffset = (off_t)fPages[firstPage]->cache_offset
526		* B_PAGE_SIZE;
527	generic_size_t requestLength = std::min(
528			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE, fDeviceCapacity)
529		- firstPageOffset;
530
531	// prepare the I/O vecs
532	size_t vecCount = 0;
533	size_t endPage = firstPage + pageCount;
534	phys_addr_t vecsEndAddress = 0;
535	for (size_t i = firstPage; i < endPage; i++) {
536		phys_addr_t pageAddress
537			= (phys_addr_t)fPages[i]->physical_page_number * B_PAGE_SIZE;
538		if (vecCount == 0 || pageAddress != vecsEndAddress) {
539			fVecs[vecCount].base = pageAddress;
540			fVecs[vecCount++].length = B_PAGE_SIZE;
541			vecsEndAddress = pageAddress + B_PAGE_SIZE;
542		} else {
543			// extend the previous vec
544			fVecs[vecCount - 1].length += B_PAGE_SIZE;
545			vecsEndAddress += B_PAGE_SIZE;
546		}
547	}
548
549	// Don't try to read past the end of the device just to fill a page;
550	// this makes sure that sum(fVecs[].length) == requestLength
551	generic_size_t padLength = B_PAGE_SIZE - requestLength % B_PAGE_SIZE;
552	if (vecCount > 0 && padLength != B_PAGE_SIZE)
553		fVecs[vecCount - 1].length -= padLength;
554
555	// create a request for the transfer
556	IORequest request;
557	status_t error = request.Init(firstPageOffset, fVecs, vecCount,
558		requestLength, isWrite,
559		B_PHYSICAL_IO_REQUEST | (isVIP ? B_VIP_IO_REQUEST : 0));
560	if (error != B_OK)
561		return error;
562
563	// Process single operations until the complete request is finished or
564	// until an error occurs.
565	Operation operation;
566	operation.finishedCondition.Init(this, "I/O cache operation finished");
567
568	while (request.RemainingBytes() > 0 && request.Status() > 0) {
569		error = fDMAResource->TranslateNext(&request, &operation,
570			requestLength);
571		if (error != B_OK)
572			return error;
573
574		error = _DoOperation(operation);
575
576		request.RemoveOperation(&operation);
577
578		fDMAResource->RecycleBuffer(operation.Buffer());
579
580		if (error != B_OK) {
581			TRACE("%p->IOCache::_TransferLine(): operation at %" B_PRIdOFF
582				" failed: %s\n", this, operation.Offset(), strerror(error));
583			return error;
584		}
585	}
586
587	return B_OK;
588}
589
590
591/*!	Frees all pages in given range of the \c fPages array.
592	\c NULL entries in the range are OK. All non \c NULL entries must refer
593	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
594	may not have a cache.
595	\c fCache must not be locked.
596*/
597void
598IOCache::_DiscardPages(size_t firstPage, size_t pageCount)
599{
600	TRACE("%p->IOCache::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
601		this, firstPage, pageCount);
602
603	AutoLocker<VMCache> cacheLocker(fCache);
604
605	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
606		vm_page* page = fPages[i];
607		if (page == NULL)
608			continue;
609
610		DEBUG_PAGE_ACCESS_START(page);
611
612		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
613			"page: %p @! page -m %p", page, page);
614
615		if (page->Cache() != NULL)
616			fCache->RemovePage(page);
617
618		vm_page_free(NULL, page);
619	}
620}
621
622
623/*!	Marks all pages in the given range of the \c fPages array cached.
624	There must not be any \c NULL entries in the given array range. All pages
625	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
626	\c fCache must not be locked.
627*/
628void
629IOCache::_CachePages(size_t firstPage, size_t pageCount)
630{
631	TRACE("%p->IOCache::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
632		this, firstPage, pageCount);
633
634	AutoLocker<VMCache> cacheLocker(fCache);
635
636	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
637		vm_page* page = fPages[i];
638		ASSERT(page != NULL);
639		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
640				&& page->Cache() == fCache,
641			"page: %p @! page -m %p", page, page);
642
643		DEBUG_PAGE_ACCESS_START(page);
644		vm_page_set_state(page, PAGE_STATE_CACHED);
645		DEBUG_PAGE_ACCESS_END(page);
646	}
647}
648
649
650/*!	Copies the contents of pages in \c fPages to \a request, or vice versa.
651	\param request The request.
652	\param pagesRelativeOffset The offset relative to \c fPages[0] where to
653		start copying.
654	\param requestOffset The request offset where to start copying.
655	\param requestLength The number of bytes to copy.
656	\param toRequest If \c true the copy directory is from \c fPages to
657		\a request, otherwise the other way around.
658	\return \c B_OK, if copying went fine, another error code otherwise.
659*/
660status_t
661IOCache::_CopyPages(IORequest* request, size_t pagesRelativeOffset,
662	off_t requestOffset, size_t requestLength, bool toRequest)
663{
664	TRACE("%p->IOCache::_CopyPages(%p, %" B_PRIuSIZE ", %" B_PRIdOFF
665		", %" B_PRIuSIZE ", %d)\n", this, request, pagesRelativeOffset,
666		requestOffset, requestLength, toRequest);
667
668	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
669	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
670		/ B_PAGE_SIZE;
671
672	// map the pages
673	status_t error = _MapPages(firstPage, endPage);
674// TODO: _MapPages() cannot fail, so the fallback is never needed. Test which
675// method is faster (probably the active one)!
676#if 0
677	if (error != B_OK) {
678		// fallback to copying individual pages
679		size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
680		for (size_t i = firstPage; i < endPage; i++) {
681			// map the page
682			void* handle;
683			addr_t address;
684			error = vm_get_physical_page(
685				fPages[i]->physical_page_number * B_PAGE_SIZE, &address,
686				&handle);
687			if (error != B_OK)
688				return error;
689
690			// copy the page's data
691			size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
692
693			if (toRequest) {
694				error = request->CopyData((uint8*)(address + inPageOffset),
695					requestOffset, toCopy);
696			} else {
697				error = request->CopyData(requestOffset,
698					(uint8*)(address + inPageOffset), toCopy);
699			}
700
701			// unmap the page
702			vm_put_physical_page(address, handle);
703
704			if (error != B_OK)
705				return error;
706
707			inPageOffset = 0;
708			requestOffset += toCopy;
709			requestLength -= toCopy;
710		}
711
712		return B_OK;
713	}
714#endif	// 0
715
716	// copy
717	if (toRequest) {
718		error = request->CopyData((uint8*)fAreaBase + pagesRelativeOffset,
719			requestOffset, requestLength);
720	} else {
721		error = request->CopyData(requestOffset,
722			(uint8*)fAreaBase + pagesRelativeOffset, requestLength);
723	}
724
725	// unmap the pages
726	_UnmapPages(firstPage, endPage);
727
728	return error;
729}
730
731
732/*!	Maps a range of pages in \c fPages into fArea.
733
734	If successful, it must be balanced by a call to _UnmapPages().
735
736	\param firstPage The \c fPages relative index of the first page to map.
737	\param endPage The \c fPages relative index of the page after the last page
738		to map.
739	\return \c B_OK, if mapping went fine, another error code otherwise.
740*/
741status_t
742IOCache::_MapPages(size_t firstPage, size_t endPage)
743{
744	VMTranslationMap* translationMap
745		= VMAddressSpace::Kernel()->TranslationMap();
746
747	translationMap->Lock();
748
749	for (size_t i = firstPage; i < endPage; i++) {
750		vm_page* page = fPages[i];
751
752		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
753			"page: %p @! page -m %p", page, page);
754
755		translationMap->Map((addr_t)fAreaBase + i * B_PAGE_SIZE,
756			page->physical_page_number * B_PAGE_SIZE,
757			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &fMappingReservation);
758		// NOTE: We don't increment gMappedPagesCount. Our pages have state
759		// PAGE_STATE_UNUSED anyway and we map them only for a short time.
760	}
761
762	translationMap->Unlock();
763
764	return B_OK;
765}
766
767
768/*!	Unmaps a range of pages in \c fPages into fArea.
769
770	Must balance a call to _MapPages().
771
772	\param firstPage The \c fPages relative index of the first page to unmap.
773	\param endPage The \c fPages relative index of the page after the last page
774		to unmap.
775*/
776void
777IOCache::_UnmapPages(size_t firstPage, size_t endPage)
778{
779	VMTranslationMap* translationMap
780		= VMAddressSpace::Kernel()->TranslationMap();
781
782	translationMap->Lock();
783
784	translationMap->Unmap((addr_t)fAreaBase + firstPage * B_PAGE_SIZE,
785		(addr_t)fAreaBase + endPage * B_PAGE_SIZE - 1);
786
787	translationMap->Unlock();
788}
789