1/*
2 * Copyright 2020-2021, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 *   X512 <danger_mail@list.ru>
7 */
8
9
10#include "RISCV64VMTranslationMap.h"
11
12#include <kernel.h>
13#include <vm/vm_priv.h>
14#include <vm/vm_page.h>
15#include <vm/VMAddressSpace.h>
16#include <vm/VMCache.h>
17#include <slab/Slab.h>
18#include <platform/sbi/sbi_syscalls.h>
19
20#include <util/AutoLock.h>
21#include <util/ThreadAutoLock.h>
22
23
24//#define DO_TRACE
25#ifdef DO_TRACE
26#	define TRACE(x...) dprintf(x)
27#else
28#	define TRACE(x...) ;
29#endif
30
31#define NOT_IMPLEMENTED_PANIC() \
32	panic("not implemented: %s\n", __PRETTY_FUNCTION__)
33
34extern uint32 gPlatform;
35
36
37static void
38WriteVmPage(vm_page* page)
39{
40	dprintf("0x%08" B_PRIxADDR " ",
41		(addr_t)(page->physical_page_number * B_PAGE_SIZE));
42	switch (page->State()) {
43		case PAGE_STATE_ACTIVE:
44			dprintf("A");
45			break;
46		case PAGE_STATE_INACTIVE:
47			dprintf("I");
48			break;
49		case PAGE_STATE_MODIFIED:
50			dprintf("M");
51			break;
52		case PAGE_STATE_CACHED:
53			dprintf("C");
54			break;
55		case PAGE_STATE_FREE:
56			dprintf("F");
57			break;
58		case PAGE_STATE_CLEAR:
59			dprintf("L");
60			break;
61		case PAGE_STATE_WIRED:
62			dprintf("W");
63			break;
64		case PAGE_STATE_UNUSED:
65			dprintf("-");
66			break;
67	}
68	dprintf(" ");
69	if (page->busy)
70		dprintf("B");
71	else
72		dprintf("-");
73
74	if (page->busy_writing)
75		dprintf("W");
76	else
77		dprintf("-");
78
79	if (page->accessed)
80		dprintf("A");
81	else
82		dprintf("-");
83
84	if (page->modified)
85		dprintf("M");
86	else
87		dprintf("-");
88
89	if (page->unused)
90		dprintf("U");
91	else
92		dprintf("-");
93
94	dprintf(" usage:%3u", page->usage_count);
95	dprintf(" wired:%5u", page->WiredCount());
96
97	bool first = true;
98	vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
99	vm_page_mapping* mapping;
100	while ((mapping = iterator.Next()) != NULL) {
101		if (first) {
102			dprintf(": ");
103			first = false;
104		} else
105			dprintf(", ");
106
107		dprintf("%" B_PRId32 " (%s)", mapping->area->id, mapping->area->name);
108		mapping = mapping->page_link.next;
109	}
110}
111
112
113static void
114FreePageTable(page_num_t ppn, bool isKernel, uint32 level = 2)
115{
116	if (level > 0) {
117		Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
118		uint64 beg = 0;
119		uint64 end = pteCount - 1;
120		if (level == 2 && !isKernel) {
121			beg = VirtAdrPte(USER_BASE, 2);
122			end = VirtAdrPte(USER_TOP, 2);
123		}
124		for (uint64 i = beg; i <= end; i++) {
125			if (pte[i].isValid)
126				FreePageTable(pte[i].ppn, isKernel, level - 1);
127		}
128	}
129	vm_page* page = vm_lookup_page(ppn);
130	DEBUG_PAGE_ACCESS_START(page);
131	vm_page_set_state(page, PAGE_STATE_FREE);
132}
133
134
135static uint64
136GetPageTableSize(page_num_t ppn, bool isKernel, uint32 level = 2)
137{
138	if (ppn == 0)
139		return 0;
140
141	if (level == 0)
142		return 1;
143
144	uint64 size = 1;
145	Pte* pte = (Pte*)VirtFromPhys(ppn * B_PAGE_SIZE);
146	uint64 beg = 0;
147	uint64 end = pteCount - 1;
148	if (level == 2 && !isKernel) {
149		beg = VirtAdrPte(USER_BASE, 2);
150		end = VirtAdrPte(USER_TOP, 2);
151	}
152	for (uint64 i = beg; i <= end; i++) {
153		if (pte[i].isValid)
154			size += GetPageTableSize(pte[i].ppn, isKernel, level - 1);
155	}
156	return size;
157}
158
159
160//#pragma mark RISCV64VMTranslationMap
161
162
163std::atomic<Pte>*
164RISCV64VMTranslationMap::LookupPte(addr_t virtAdr, bool alloc,
165	vm_page_reservation* reservation)
166{
167	if (fPageTable == 0) {
168		if (!alloc)
169			return NULL;
170		vm_page* page = vm_page_allocate_page(reservation,
171			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
172		fPageTable = page->physical_page_number * B_PAGE_SIZE;
173		if (fPageTable == 0)
174			return NULL;
175		DEBUG_PAGE_ACCESS_END(page);
176		fPageTableSize++;
177		if (!fIsKernel) {
178			// Map kernel address space into user address space. Preallocated
179			// kernel level-2 PTEs are reused.
180			RISCV64VMTranslationMap* kernelMap = (RISCV64VMTranslationMap*)
181				VMAddressSpace::Kernel()->TranslationMap();
182			Pte *kernelPageTable = (Pte*)VirtFromPhys(kernelMap->PageTable());
183			Pte *userPageTable = (Pte*)VirtFromPhys(fPageTable);
184			for (uint64 i = VirtAdrPte(KERNEL_BASE, 2);
185				i <= VirtAdrPte(KERNEL_TOP, 2); i++) {
186				Pte *pte = &userPageTable[i];
187				pte->ppn = kernelPageTable[i].ppn;
188				pte->isValid = true;
189			}
190		}
191	}
192	auto pte = (std::atomic<Pte>*)VirtFromPhys(fPageTable);
193	for (int level = 2; level > 0; level--) {
194		pte += VirtAdrPte(virtAdr, level);
195		if (!pte->load().isValid) {
196			if (!alloc)
197				return NULL;
198			vm_page* page = vm_page_allocate_page(reservation,
199				PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
200			page_num_t ppn = page->physical_page_number;
201			if (ppn == 0)
202				return NULL;
203			DEBUG_PAGE_ACCESS_END(page);
204			fPageTableSize++;
205			Pte newPte {
206				.isValid = true,
207				.isGlobal = fIsKernel,
208				.ppn = ppn
209			};
210			pte->store(newPte);
211		}
212		pte = (std::atomic<Pte>*)VirtFromPhys(B_PAGE_SIZE * pte->load().ppn);
213	}
214	pte += VirtAdrPte(virtAdr, 0);
215	return pte;
216}
217
218
219phys_addr_t
220RISCV64VMTranslationMap::LookupAddr(addr_t virtAdr)
221{
222	std::atomic<Pte>* pte = LookupPte(virtAdr, false, NULL);
223	if (pte == NULL)
224		return 0;
225	Pte pteVal = pte->load();
226	if (!pteVal.isValid)
227		return 0;
228	if (fIsKernel != !pteVal.isUser)
229		return 0;
230	return pteVal.ppn * B_PAGE_SIZE;
231}
232
233
234RISCV64VMTranslationMap::RISCV64VMTranslationMap(bool kernel,
235	phys_addr_t pageTable):
236	fIsKernel(kernel),
237	fPageTable(pageTable),
238	fPageTableSize(GetPageTableSize(pageTable / B_PAGE_SIZE, kernel)),
239	fInvalidPagesCount(0),
240	fInvalidCode(false)
241{
242	TRACE("+RISCV64VMTranslationMap(%p, %d, 0x%" B_PRIxADDR ")\n", this,
243		kernel, pageTable);
244	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
245}
246
247
248RISCV64VMTranslationMap::~RISCV64VMTranslationMap()
249{
250	TRACE("-RISCV64VMTranslationMap(%p)\n", this);
251	TRACE("  pageTableSize: %" B_PRIu64 "\n", fPageTableSize);
252	TRACE("  GetPageTableSize(): %" B_PRIu64 "\n",
253		GetPageTableSize(fPageTable / B_PAGE_SIZE, fIsKernel));
254
255	ASSERT_ALWAYS(!fIsKernel);
256	// Can't delete currently used page table
257	ASSERT_ALWAYS(::Satp() != Satp());
258
259	FreePageTable(fPageTable / B_PAGE_SIZE, fIsKernel);
260}
261
262
263bool
264RISCV64VMTranslationMap::Lock()
265{
266	TRACE("RISCV64VMTranslationMap::Lock()\n");
267	recursive_lock_lock(&fLock);
268	return true;
269}
270
271
272void
273RISCV64VMTranslationMap::Unlock()
274{
275	TRACE("RISCV64VMTranslationMap::Unlock()\n");
276	if (recursive_lock_get_recursion(&fLock) == 1) {
277		// we're about to release it for the last time
278		Flush();
279	}
280	recursive_lock_unlock(&fLock);
281}
282
283
284addr_t
285RISCV64VMTranslationMap::MappedSize() const
286{
287	NOT_IMPLEMENTED_PANIC();
288	return 0;
289}
290
291
292size_t
293RISCV64VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
294{
295	enum {
296		level0Range = (uint64_t)B_PAGE_SIZE * pteCount,
297		level1Range = (uint64_t)level0Range * pteCount,
298		level2Range = (uint64_t)level1Range * pteCount,
299	};
300
301	if (start == 0) {
302		start = (level2Range) - B_PAGE_SIZE;
303		end += start;
304	}
305
306	size_t requiredLevel2 = end / level2Range + 1 - start / level2Range;
307	size_t requiredLevel1 = end / level1Range + 1 - start / level1Range;
308	size_t requiredLevel0 = end / level0Range + 1 - start / level0Range;
309
310	return requiredLevel2 + requiredLevel1 + requiredLevel0;
311}
312
313
314status_t
315RISCV64VMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
316	uint32 attributes, uint32 memoryType,
317	vm_page_reservation* reservation)
318{
319	TRACE("RISCV64VMTranslationMap::Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
320		")\n", virtualAddress, physicalAddress);
321
322	ThreadCPUPinner pinner(thread_get_current_thread());
323
324	std::atomic<Pte>* pte = LookupPte(virtualAddress, true, reservation);
325	if (pte == NULL)
326		panic("can't allocate page table");
327
328	Pte newPte {
329		.isValid = true,
330		.isGlobal = fIsKernel,
331		.ppn = physicalAddress / B_PAGE_SIZE
332	};
333
334	if ((attributes & B_USER_PROTECTION) != 0) {
335		newPte.isUser = true;
336		if ((attributes & B_READ_AREA) != 0)
337			newPte.isRead = true;
338		if ((attributes & B_WRITE_AREA) != 0)
339			newPte.isWrite = true;
340		if ((attributes & B_EXECUTE_AREA) != 0) {
341			newPte.isExec = true;
342			fInvalidCode = true;
343		}
344	} else {
345		if ((attributes & B_KERNEL_READ_AREA) != 0)
346			newPte.isRead = true;
347		if ((attributes & B_KERNEL_WRITE_AREA) != 0)
348			newPte.isWrite = true;
349		if ((attributes & B_KERNEL_EXECUTE_AREA) != 0) {
350			newPte.isExec = true;
351			fInvalidCode = true;
352		}
353	}
354
355	pte->store(newPte);
356
357	// Note: We don't need to invalidate the TLB for this address, as previously
358	// the entry was not present and the TLB doesn't cache those entries.
359
360	fMapCount++;
361
362	return B_OK;
363}
364
365
366status_t
367RISCV64VMTranslationMap::Unmap(addr_t start, addr_t end)
368{
369	TRACE("RISCV64VMTranslationMap::Unmap(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR
370		")\n", start, end);
371
372	ThreadCPUPinner pinner(thread_get_current_thread());
373
374	for (addr_t page = start; page < end; page += B_PAGE_SIZE) {
375		std::atomic<Pte>* pte = LookupPte(page, false, NULL);
376		if (pte != NULL) {
377			fMapCount--;
378			Pte oldPte = pte->exchange({});
379			if (oldPte.isAccessed)
380				InvalidatePage(page);
381		}
382	}
383	return B_OK;
384}
385
386
387status_t
388RISCV64VMTranslationMap::DebugMarkRangePresent(addr_t start, addr_t end,
389	bool markPresent)
390{
391	NOT_IMPLEMENTED_PANIC();
392	return B_NOT_SUPPORTED;
393}
394
395
396/*
397Things need to be done when unmapping VMArea pages
398	update vm_page::accessed, modified
399	MMIO pages:
400		just unmap
401	wired pages:
402		decrement wired count
403	non-wired pages:
404		remove from VMArea and vm_page `mappings` list
405	wired and non-wird pages
406		vm_page_set_state
407*/
408
409status_t
410RISCV64VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
411	bool updatePageQueue)
412{
413	TRACE("RISCV64VMTranslationMap::UnmapPage(0x%" B_PRIxADDR "(%s), 0x%"
414		B_PRIxADDR ", %d)\n", (addr_t)area, area->name, address,
415		updatePageQueue);
416
417	ThreadCPUPinner pinner(thread_get_current_thread());
418
419	std::atomic<Pte>* pte = LookupPte(address, false, NULL);
420	if (pte == NULL || !pte->load().isValid)
421		return B_ENTRY_NOT_FOUND;
422
423	RecursiveLocker locker(fLock);
424
425	Pte oldPte = pte->exchange({});
426	fMapCount--;
427	pinner.Unlock();
428
429	if (oldPte.isAccessed)
430		InvalidatePage(address);
431
432	Flush();
433
434	locker.Detach(); // PageUnmapped takes ownership
435	PageUnmapped(area, oldPte.ppn, oldPte.isAccessed, oldPte.isDirty, updatePageQueue);
436	return B_OK;
437}
438
439
440void
441RISCV64VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
442	bool updatePageQueue)
443{
444	TRACE("RISCV64VMTranslationMap::UnmapPages(0x%" B_PRIxADDR "(%s), 0x%"
445		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d)\n", (addr_t)area,
446		area->name, base, size, updatePageQueue);
447
448	if (size == 0)
449		return;
450
451	addr_t end = base + size - 1;
452
453	VMAreaMappings queue;
454	RecursiveLocker locker(fLock);
455	ThreadCPUPinner pinner(thread_get_current_thread());
456
457	for (addr_t start = base; start < end; start += B_PAGE_SIZE) {
458		std::atomic<Pte>* pte = LookupPte(start, false, NULL);
459		if (pte == NULL)
460			continue;
461
462		Pte oldPte = pte->exchange({});
463		if (!oldPte.isValid)
464			continue;
465
466		fMapCount--;
467
468		if (oldPte.isAccessed)
469			InvalidatePage(start);
470
471		if (area->cache_type != CACHE_TYPE_DEVICE) {
472			// get the page
473			vm_page* page = vm_lookup_page(oldPte.ppn);
474			ASSERT(page != NULL);
475			if (false) {
476				WriteVmPage(page); dprintf("\n");
477			}
478
479			DEBUG_PAGE_ACCESS_START(page);
480
481			// transfer the accessed/dirty flags to the page
482			page->accessed = oldPte.isAccessed;
483			page->modified = oldPte.isDirty;
484
485			// remove the mapping object/decrement the wired_count of the
486			// page
487			if (area->wiring == B_NO_LOCK) {
488				vm_page_mapping* mapping = NULL;
489				vm_page_mappings::Iterator iterator
490					= page->mappings.GetIterator();
491				while ((mapping = iterator.Next()) != NULL) {
492					if (mapping->area == area)
493						break;
494				}
495
496				ASSERT(mapping != NULL);
497
498				area->mappings.Remove(mapping);
499				page->mappings.Remove(mapping);
500				queue.Add(mapping);
501			} else
502				page->DecrementWiredCount();
503
504			if (!page->IsMapped()) {
505				atomic_add(&gMappedPagesCount, -1);
506
507				if (updatePageQueue) {
508					if (page->Cache()->temporary)
509						vm_page_set_state(page, PAGE_STATE_INACTIVE);
510					else if (page->modified)
511						vm_page_set_state(page, PAGE_STATE_MODIFIED);
512					else
513						vm_page_set_state(page, PAGE_STATE_CACHED);
514				}
515			}
516
517			DEBUG_PAGE_ACCESS_END(page);
518		}
519
520		// flush explicitly, since we directly use the lock
521		Flush();
522	}
523
524	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
525	// really critical here, as in all cases this method is used, the unmapped
526	// area range is unmapped for good (resized/cut) and the pages will likely
527	// be freed.
528
529	locker.Unlock();
530
531	// free removed mappings
532	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
533	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
534		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
535
536	while (vm_page_mapping* mapping = queue.RemoveHead())
537		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
538}
539
540
541void
542RISCV64VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
543	bool ignoreTopCachePageFlags)
544{
545	TRACE("RISCV64VMTranslationMap::UnmapArea(0x%" B_PRIxADDR "(%s), 0x%"
546		B_PRIxADDR ", 0x%" B_PRIxSIZE ", %d, %d)\n", (addr_t)area,
547		area->name, area->Base(), area->Size(), deletingAddressSpace,
548		ignoreTopCachePageFlags);
549
550	if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
551		UnmapPages(area, area->Base(), area->Size(), true);
552		return;
553	}
554
555	bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
556
557	RecursiveLocker locker(fLock);
558	ThreadCPUPinner pinner(thread_get_current_thread());
559
560	VMAreaMappings mappings;
561	mappings.MoveFrom(&area->mappings);
562
563	for (VMAreaMappings::Iterator it = mappings.GetIterator();
564			vm_page_mapping* mapping = it.Next();) {
565
566		vm_page* page = mapping->page;
567		page->mappings.Remove(mapping);
568
569		VMCache* cache = page->Cache();
570
571		bool pageFullyUnmapped = false;
572		if (!page->IsMapped()) {
573			atomic_add(&gMappedPagesCount, -1);
574			pageFullyUnmapped = true;
575		}
576
577		if (unmapPages || cache != area->cache) {
578			addr_t address = area->Base()
579				+ ((page->cache_offset * B_PAGE_SIZE)
580				- area->cache_offset);
581
582			std::atomic<Pte>* pte = LookupPte(address, false, NULL);
583			if (pte == NULL || !pte->load().isValid) {
584				panic("page %p has mapping for area %p "
585					"(%#" B_PRIxADDR "), but has no "
586					"page table", page, area, address);
587				continue;
588			}
589
590			Pte oldPte = pte->exchange({});
591
592			// transfer the accessed/dirty flags to the page and
593			// invalidate the mapping, if necessary
594			if (oldPte.isAccessed) {
595				page->accessed = true;
596
597				if (!deletingAddressSpace)
598					InvalidatePage(address);
599			}
600
601			if (oldPte.isDirty)
602				page->modified = true;
603
604			if (pageFullyUnmapped) {
605				DEBUG_PAGE_ACCESS_START(page);
606
607				if (cache->temporary) {
608					vm_page_set_state(page,
609						PAGE_STATE_INACTIVE);
610				} else if (page->modified) {
611					vm_page_set_state(page,
612						PAGE_STATE_MODIFIED);
613				} else {
614					vm_page_set_state(page,
615						PAGE_STATE_CACHED);
616				}
617
618				DEBUG_PAGE_ACCESS_END(page);
619			}
620		}
621
622		fMapCount--;
623	}
624
625	Flush();
626		// flush explicitely, since we directly use the lock
627
628	locker.Unlock();
629
630	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
631	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
632		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
633
634	while (vm_page_mapping* mapping = mappings.RemoveHead())
635		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
636}
637
638
639status_t
640RISCV64VMTranslationMap::Query(addr_t virtualAddress,
641	phys_addr_t* _physicalAddress, uint32* _flags)
642{
643	*_flags = 0;
644	*_physicalAddress = 0;
645
646	ThreadCPUPinner pinner(thread_get_current_thread());
647
648	if (fPageTable == 0)
649		return B_OK;
650
651	std::atomic<Pte>* pte = LookupPte(virtualAddress, false, NULL);
652	if (pte == NULL)
653		return B_OK;
654
655	Pte pteVal = pte->load();
656	*_physicalAddress = pteVal.ppn * B_PAGE_SIZE;
657
658	if (pteVal.isValid)
659		*_flags |= PAGE_PRESENT;
660	if (pteVal.isDirty)
661		*_flags |= PAGE_MODIFIED;
662	if (pteVal.isAccessed)
663		*_flags |= PAGE_ACCESSED;
664	if (pteVal.isUser) {
665		if (pteVal.isRead)
666			*_flags |= B_READ_AREA;
667		if (pteVal.isWrite)
668			*_flags |= B_WRITE_AREA;
669		if (pteVal.isExec)
670			*_flags |= B_EXECUTE_AREA;
671	} else {
672		if (pteVal.isRead)
673			*_flags |= B_KERNEL_READ_AREA;
674		if (pteVal.isWrite)
675			*_flags |= B_KERNEL_WRITE_AREA;
676		if (pteVal.isExec)
677			*_flags |= B_KERNEL_EXECUTE_AREA;
678	}
679
680	return B_OK;
681}
682
683
684status_t
685RISCV64VMTranslationMap::QueryInterrupt(addr_t virtualAddress,
686	phys_addr_t* _physicalAddress, uint32* _flags)
687{
688	return Query(virtualAddress, _physicalAddress, _flags);
689}
690
691
692status_t RISCV64VMTranslationMap::Protect(addr_t base, addr_t top,
693	uint32 attributes, uint32 memoryType)
694{
695	TRACE("RISCV64VMTranslationMap::Protect(0x%" B_PRIxADDR ", 0x%"
696		B_PRIxADDR ")\n", base, top);
697
698	ThreadCPUPinner pinner(thread_get_current_thread());
699
700	for (addr_t page = base; page < top; page += B_PAGE_SIZE) {
701
702		std::atomic<Pte>* pte = LookupPte(page, false, NULL);
703		if (pte == NULL || !pte->load().isValid) {
704			TRACE("attempt to protect not mapped page: 0x%"
705				B_PRIxADDR "\n", page);
706			continue;
707		}
708
709		Pte oldPte {};
710		Pte newPte {};
711		while (true) {
712			oldPte = pte->load();
713
714			newPte = oldPte;
715			if ((attributes & B_USER_PROTECTION) != 0) {
716				newPte.isUser = true;
717				newPte.isRead  = (attributes & B_READ_AREA)    != 0;
718				newPte.isWrite = (attributes & B_WRITE_AREA)   != 0;
719				newPte.isExec  = (attributes & B_EXECUTE_AREA) != 0;
720			} else {
721				newPte.isUser = false;
722				newPte.isRead  = (attributes & B_KERNEL_READ_AREA)    != 0;
723				newPte.isWrite = (attributes & B_KERNEL_WRITE_AREA)   != 0;
724				newPte.isExec  = (attributes & B_KERNEL_EXECUTE_AREA) != 0;
725			}
726
727			if (pte->compare_exchange_strong(oldPte, newPte))
728				break;
729		}
730
731		fInvalidCode = newPte.isExec;
732
733		if (oldPte.isAccessed)
734			InvalidatePage(page);
735	}
736
737	return B_OK;
738}
739
740
741status_t
742RISCV64VMTranslationMap::ProtectPage(VMArea* area, addr_t address,
743	uint32 attributes)
744{
745	NOT_IMPLEMENTED_PANIC();
746	return B_OK;
747}
748
749
750status_t
751RISCV64VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
752{
753	NOT_IMPLEMENTED_PANIC();
754	return B_NOT_SUPPORTED;
755}
756
757
758static inline uint64
759ConvertAccessedFlags(uint32 flags)
760{
761	Pte pteFlags {
762		.isAccessed = (flags & PAGE_ACCESSED) != 0,
763		.isDirty = (flags & PAGE_MODIFIED) != 0
764	};
765	return pteFlags.val;
766}
767
768
769void
770RISCV64VMTranslationMap::SetFlags(addr_t address, uint32 flags)
771{
772	// Only called from interrupt handler with interrupts disabled for CPUs that don't support
773	// setting accessed/modified flags by hardware.
774
775	std::atomic<Pte>* pte = LookupPte(address, false, NULL);
776	if (pte == NULL || !pte->load().isValid)
777		return;
778
779	*(std::atomic<uint64>*)pte |= ConvertAccessedFlags(flags);
780
781	if (IS_KERNEL_ADDRESS(address))
782		FlushTlbPage(address);
783	else
784		FlushTlbPageAsid(address, 0);
785
786	return;
787}
788
789
790status_t
791RISCV64VMTranslationMap::ClearFlags(addr_t address, uint32 flags)
792{
793	ThreadCPUPinner pinner(thread_get_current_thread());
794
795	std::atomic<Pte>* pte = LookupPte(address, false, NULL);
796	if (pte == NULL || !pte->load().isValid)
797		return B_OK;
798
799	*(std::atomic<uint64>*)pte &= ~ConvertAccessedFlags(flags);
800	InvalidatePage(address);
801	return B_OK;
802}
803
804
805bool
806RISCV64VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
807	bool unmapIfUnaccessed, bool& _modified)
808{
809	TRACE("RISCV64VMPhysicalPageMapper::ClearAccessedAndModified(0x%"
810		B_PRIxADDR "(%s), 0x%" B_PRIxADDR ", %d)\n", (addr_t)area,
811		area->name, address, unmapIfUnaccessed);
812
813	RecursiveLocker locker(fLock);
814	ThreadCPUPinner pinner(thread_get_current_thread());
815
816	std::atomic<Pte>* pte = LookupPte(address, false, NULL);
817	if (pte == NULL || !pte->load().isValid)
818		return false;
819
820	Pte oldPte {};
821	if (unmapIfUnaccessed) {
822		for (;;) {
823			oldPte = pte->load();
824			if (!oldPte.isValid)
825				return false;
826
827			if (oldPte.isAccessed) {
828				oldPte.val = ((std::atomic<uint64>*)pte)->fetch_and(
829					~Pte {.isAccessed = true, .isDirty = true}.val);
830				break;
831			}
832			if (pte->compare_exchange_strong(oldPte, {}))
833				break;
834		}
835	} else {
836		oldPte.val = ((std::atomic<uint64>*)pte)->fetch_and(
837			~Pte {.isAccessed = true, .isDirty = true}.val);
838	}
839
840	pinner.Unlock();
841	_modified = oldPte.isDirty;
842	if (oldPte.isAccessed) {
843		InvalidatePage(address);
844		Flush();
845		return true;
846	}
847
848	if (!unmapIfUnaccessed)
849		return false;
850
851	fMapCount--;
852
853	locker.Detach(); // UnaccessedPageUnmapped takes ownership
854	UnaccessedPageUnmapped(area, oldPte.ppn);
855	return false;
856}
857
858
859void
860RISCV64VMTranslationMap::Flush()
861{
862	// copy of X86VMTranslationMap::Flush
863	// TODO: move to common VMTranslationMap class
864
865	if (fInvalidPagesCount <= 0)
866		return;
867
868	ThreadCPUPinner pinner(thread_get_current_thread());
869
870	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
871		// invalidate all pages
872		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
873			fInvalidPagesCount);
874
875		if (fIsKernel) {
876			arch_cpu_global_TLB_invalidate();
877
878			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
879				NULL, SMP_MSG_FLAG_SYNC);
880		} else {
881			cpu_status state = disable_interrupts();
882			arch_cpu_user_TLB_invalidate();
883			restore_interrupts(state);
884
885			int cpu = smp_get_current_cpu();
886			CPUSet cpuMask = fActiveOnCpus;
887			cpuMask.ClearBit(cpu);
888
889			if (!cpuMask.IsEmpty()) {
890				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
891					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
892			}
893		}
894	} else {
895		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
896			fInvalidPagesCount);
897
898		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
899
900		if (fIsKernel) {
901			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
902				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
903				SMP_MSG_FLAG_SYNC);
904		} else {
905			int cpu = smp_get_current_cpu();
906			CPUSet cpuMask = fActiveOnCpus;
907			cpuMask.ClearBit(cpu);
908
909			if (!cpuMask.IsEmpty()) {
910				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
911					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
912					SMP_MSG_FLAG_SYNC);
913			}
914		}
915	}
916	fInvalidPagesCount = 0;
917
918	if (fInvalidCode) {
919		FenceI();
920
921		int cpu = smp_get_current_cpu();
922		CPUSet cpuMask = fActiveOnCpus;
923		cpuMask.ClearBit(cpu);
924
925		if (!cpuMask.IsEmpty()) {
926			switch (gPlatform) {
927				case kPlatformSbi: {
928					uint64 hartMask = 0;
929					int32 cpuCount = smp_get_num_cpus();
930					for (int32 i = 0; i < cpuCount; i++) {
931						if (cpuMask.GetBit(i))
932							hartMask |= (uint64)1 << gCPU[i].arch.hartId;
933					}
934					// TODO: handle hart ID >= 64
935					memory_full_barrier();
936					sbi_remote_fence_i(hartMask, 0);
937					break;
938				}
939			}
940		}
941		fInvalidCode = false;
942	}
943}
944
945
946void
947RISCV64VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
948{
949	NOT_IMPLEMENTED_PANIC();
950}
951
952
953bool
954RISCV64VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
955	ReverseMappingInfoCallback& callback)
956{
957	NOT_IMPLEMENTED_PANIC();
958	return false;
959}
960
961
962status_t
963RISCV64VMTranslationMap::MemcpyToMap(addr_t to, const char *from, size_t size)
964{
965	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToMap(0x%" B_PRIxADDR ", 0x%"
966		B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from, size);
967
968	while (size > 0) {
969		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
970		uint64 pa0 = LookupAddr(va0);
971		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
972			va0, pa0);
973
974		if (pa0 == 0) {
975			TRACE("[!] not mapped: 0x%" B_PRIxADDR "\n", va0);
976			return B_BAD_ADDRESS;
977		}
978
979		uint64 n = B_PAGE_SIZE - (to - va0);
980		if (n > size)
981			n = size;
982
983		memcpy(VirtFromPhys(pa0 + (to - va0)), from, n);
984
985		size -= n;
986		from += n;
987		to = va0 + B_PAGE_SIZE;
988	}
989	return B_OK;
990}
991
992
993status_t
994RISCV64VMTranslationMap::MemcpyFromMap(char *to, addr_t from, size_t size)
995{
996	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromMap(0x%" B_PRIxADDR
997		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n",
998		(addr_t)to, from, size);
999
1000	while (size > 0) {
1001		uint64 va0 = ROUNDDOWN(from, B_PAGE_SIZE);
1002		uint64 pa0 = LookupAddr(va0);
1003		if (pa0 == 0) {
1004			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1005				", calling page fault handler\n", va0);
1006
1007			addr_t newIP;
1008			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1009
1010			pa0 = LookupAddr(va0);
1011			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1012				B_PRIxADDR "\n", va0, pa0);
1013
1014			if (pa0 == 0)
1015				return B_BAD_ADDRESS;
1016		}
1017		uint64 n = B_PAGE_SIZE - (from - va0);
1018		if(n > size)
1019			n = size;
1020
1021		memcpy(to, VirtFromPhys(pa0 + (from - va0)), n);
1022
1023		size -= n;
1024		to += n;
1025		from = va0 + B_PAGE_SIZE;
1026	}
1027
1028	return B_OK;
1029}
1030
1031
1032status_t
1033RISCV64VMTranslationMap::MemsetToMap(addr_t to, char c, size_t count)
1034{
1035	TRACE("RISCV64VMPhysicalPageMapper::MemsetToMap(0x%" B_PRIxADDR
1036		", %d, %" B_PRIuSIZE ")\n", to, c, count);
1037
1038	while (count > 0) {
1039		uint64 va0 = ROUNDDOWN(to, B_PAGE_SIZE);
1040		uint64 pa0 = LookupAddr(va0);
1041		TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%" B_PRIxADDR "\n",
1042			va0, pa0);
1043
1044		if (pa0 == 0) {
1045			TRACE("[!] not mapped: 0x%" B_PRIxADDR
1046				", calling page fault handler\n", va0);
1047			addr_t newIP;
1048			vm_page_fault(va0, Ra(), true, false, true, &newIP);
1049			pa0 = LookupAddr(va0);
1050			TRACE("LookupAddr(0x%" B_PRIxADDR "): 0x%"
1051				B_PRIxADDR "\n", va0, pa0);
1052
1053			if (pa0 == 0)
1054				return B_BAD_ADDRESS;
1055		}
1056
1057		uint64 n = B_PAGE_SIZE - (to - va0);
1058		if (n > count)
1059			n = count;
1060
1061		memset(VirtFromPhys(pa0 + (to - va0)), c, n);
1062
1063		count -= n;
1064		to = va0 + B_PAGE_SIZE;
1065	}
1066	return B_OK;
1067}
1068
1069
1070ssize_t
1071RISCV64VMTranslationMap::StrlcpyFromMap(char *to, addr_t from, size_t size)
1072{
1073	// NOT_IMPLEMENTED_PANIC();
1074	return strlcpy(to, (const char*)from, size);
1075	// return 0;
1076}
1077
1078
1079ssize_t
1080RISCV64VMTranslationMap::StrlcpyToMap(addr_t to, const char *from, size_t size)
1081{
1082	ssize_t len = strlen(from) + 1;
1083	if ((size_t)len > size)
1084		len = size;
1085
1086	if (MemcpyToMap(to, from, len) < B_OK)
1087		return 0;
1088
1089	return len;
1090}
1091
1092
1093//#pragma mark RISCV64VMPhysicalPageMapper
1094
1095
1096RISCV64VMPhysicalPageMapper::RISCV64VMPhysicalPageMapper()
1097{
1098	TRACE("+RISCV64VMPhysicalPageMapper\n");
1099}
1100
1101
1102RISCV64VMPhysicalPageMapper::~RISCV64VMPhysicalPageMapper()
1103{
1104	TRACE("-RISCV64VMPhysicalPageMapper\n");
1105}
1106
1107
1108status_t
1109RISCV64VMPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
1110	addr_t* _virtualAddress, void** _handle)
1111{
1112	*_virtualAddress = (addr_t)VirtFromPhys(physicalAddress);
1113	*_handle = (void*)1;
1114	return B_OK;
1115}
1116
1117
1118status_t
1119RISCV64VMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
1120{
1121	return B_OK;
1122}
1123
1124
1125status_t
1126RISCV64VMPhysicalPageMapper::GetPageCurrentCPU( phys_addr_t physicalAddress,
1127	addr_t* _virtualAddress, void** _handle)
1128{
1129	return GetPage(physicalAddress, _virtualAddress, _handle);
1130}
1131
1132
1133status_t
1134RISCV64VMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
1135	void* _handle)
1136{
1137	return PutPage(virtualAddress, _handle);
1138}
1139
1140
1141status_t
1142RISCV64VMPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
1143	addr_t* _virtualAddress, void** _handle)
1144{
1145	NOT_IMPLEMENTED_PANIC();
1146	return B_NOT_SUPPORTED;
1147}
1148
1149
1150status_t
1151RISCV64VMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
1152{
1153	NOT_IMPLEMENTED_PANIC();
1154	return B_NOT_SUPPORTED;
1155}
1156
1157
1158status_t
1159RISCV64VMPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
1160	phys_size_t length)
1161{
1162	TRACE("RISCV64VMPhysicalPageMapper::MemsetPhysical(0x%" B_PRIxADDR
1163		", 0x%x, 0x%" B_PRIxADDR ")\n", address, value, length);
1164	return user_memset(VirtFromPhys(address), value, length);
1165}
1166
1167
1168status_t
1169RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t from,
1170	size_t length, bool user)
1171{
1172	TRACE("RISCV64VMPhysicalPageMapper::MemcpyFromPhysical(0x%" B_PRIxADDR
1173		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", (addr_t)to,
1174		from, length);
1175	return user_memcpy(to, VirtFromPhys(from), length);
1176}
1177
1178
1179status_t
1180RISCV64VMPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to, const void* from,
1181	size_t length, bool user)
1182{
1183	TRACE("RISCV64VMPhysicalPageMapper::MemcpyToPhysical(0x%" B_PRIxADDR
1184		", 0x%" B_PRIxADDR ", %" B_PRIuSIZE ")\n", to, (addr_t)from,
1185		length);
1186	return user_memcpy(VirtFromPhys(to), from, length);
1187}
1188
1189
1190void
1191RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
1192	phys_addr_t from)
1193{
1194	TRACE("RISCV64VMPhysicalPageMapper::MemcpyPhysicalPage(0x%" B_PRIxADDR
1195		", 0x%" B_PRIxADDR ")\n", to, from);
1196	user_memcpy(VirtFromPhys(to), VirtFromPhys(from), B_PAGE_SIZE);
1197}
1198