1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2007, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11#include "paging/M68KVMTranslationMap.h"
12
13#include <thread.h>
14#include <smp.h>
15
16#include "paging/M68KPagingStructures.h"
17
18
19//#define TRACE_M68K_VM_TRANSLATION_MAP
20#ifdef TRACE_M68K_VM_TRANSLATION_MAP
21#	define TRACE(x...) dprintf(x)
22#else
23#	define TRACE(x...) ;
24#endif
25
26
27M68KVMTranslationMap::M68KVMTranslationMap()
28	:
29	fPageMapper(NULL),
30	fInvalidPagesCount(0)
31{
32}
33
34
35M68KVMTranslationMap::~M68KVMTranslationMap()
36{
37}
38
39
40status_t
41M68KVMTranslationMap::Init(bool kernel)
42{
43	fIsKernelMap = kernel;
44	return B_OK;
45}
46
47
48/*!	Acquires the map's recursive lock, and resets the invalidate pages counter
49	in case it's the first locking recursion.
50*/
51bool
52M68KVMTranslationMap::Lock()
53{
54	TRACE("%p->M68KVMTranslationMap::Lock()\n", this);
55
56	recursive_lock_lock(&fLock);
57	if (recursive_lock_get_recursion(&fLock) == 1) {
58		// we were the first one to grab the lock
59		TRACE("clearing invalidated page count\n");
60		fInvalidPagesCount = 0;
61	}
62
63	return true;
64}
65
66
67/*!	Unlocks the map, and, if we are actually losing the recursive lock,
68	flush all pending changes of this map (ie. flush TLB caches as
69	needed).
70*/
71void
72M68KVMTranslationMap::Unlock()
73{
74	TRACE("%p->M68KVMTranslationMap::Unlock()\n", this);
75
76	if (recursive_lock_get_recursion(&fLock) == 1) {
77		// we're about to release it for the last time
78		Flush();
79	}
80
81	recursive_lock_unlock(&fLock);
82}
83
84
85addr_t
86M68KVMTranslationMap::MappedSize() const
87{
88	return fMapCount;
89}
90
91
92void
93M68KVMTranslationMap::Flush()
94{
95	if (fInvalidPagesCount <= 0)
96		return;
97
98	Thread* thread = thread_get_current_thread();
99	thread_pin_to_current_cpu(thread);
100
101	if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
102		// invalidate all pages
103		TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
104			fInvalidPagesCount);
105
106		if (fIsKernelMap) {
107			arch_cpu_global_TLB_invalidate();
108			smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
109				NULL, SMP_MSG_FLAG_SYNC);
110		} else {
111			cpu_status state = disable_interrupts();
112			arch_cpu_user_TLB_invalidate();
113			restore_interrupts(state);
114
115			int cpu = smp_get_current_cpu();
116			CPUSet cpuMask = PagingStructures()->active_on_cpus;
117			cpuMask.ClearBit(cpu);
118
119			if (!cpuMask.IsEmpty()) {
120				smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
121					0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
122			}
123		}
124	} else {
125		TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
126			fInvalidPagesCount);
127
128		arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
129
130		if (fIsKernelMap) {
131			smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
132				(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
133				SMP_MSG_FLAG_SYNC);
134		} else {
135			int cpu = smp_get_current_cpu();
136			CPUSet cpuMask = PagingStructures()->active_on_cpus;
137			cpuMask.ClearBit(cpu);
138
139			if (!cpuMask.IsEmpty()) {
140				smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
141					(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
142					SMP_MSG_FLAG_SYNC);
143			}
144		}
145	}
146	fInvalidPagesCount = 0;
147
148	thread_unpin_from_current_cpu(thread);
149}
150