1/*
2 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
3 * Distributed under the terms of the MIT License.
4 */
5#ifndef KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
6#define KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
7
8
9#include <OS.h>
10
11#include <cpu.h>
12#include <kernel.h>
13#include <smp.h>
14#include <vm/vm.h>
15#include <vm/vm_types.h>
16#include <vm/VMAddressSpace.h>
17
18#include "paging/x86_physical_page_mapper.h"
19#include "paging/X86PagingStructures.h"
20#include "paging/X86VMTranslationMap.h"
21
22
23struct kernel_args;
24
25
26/*!	Physical page mapper implementation for use where the whole of physical
27	memory is permanently mapped into the kernel address space.
28
29	This is used on x86_64 where the virtual address space is likely a great
30	deal larger than the amount of physical memory in the machine, so it can
31	all be mapped in permanently, which is faster and makes life much easier.
32*/
33
34
35// #pragma mark - TranslationMapPhysicalPageMapper
36
37
38inline void
39TranslationMapPhysicalPageMapper::Delete()
40{
41	delete this;
42}
43
44
45inline void*
46TranslationMapPhysicalPageMapper::GetPageTableAt(
47	phys_addr_t physicalAddress)
48{
49	ASSERT(physicalAddress % B_PAGE_SIZE == 0);
50
51	return (void*)(physicalAddress + KERNEL_PMAP_BASE);
52}
53
54
55// #pragma mark - X86PhysicalPageMapper
56
57
58inline status_t
59X86PhysicalPageMapper::CreateTranslationMapPhysicalPageMapper(
60	TranslationMapPhysicalPageMapper** _mapper)
61{
62	auto mapper = new(std::nothrow) TranslationMapPhysicalPageMapper;
63	if (mapper == NULL)
64		return B_NO_MEMORY;
65
66	*_mapper = mapper;
67	return B_OK;
68}
69
70
71inline void*
72X86PhysicalPageMapper::InterruptGetPageTableAt(
73	phys_addr_t physicalAddress)
74{
75	ASSERT(physicalAddress % B_PAGE_SIZE == 0);
76
77	return (void*)(physicalAddress + KERNEL_PMAP_BASE);
78}
79
80
81inline status_t
82X86PhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
83	addr_t* virtualAddress, void** handle)
84{
85	if (physicalAddress >= KERNEL_PMAP_BASE)
86		return B_BAD_ADDRESS;
87
88	*virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
89	return B_OK;
90}
91
92
93inline status_t
94X86PhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
95{
96	return B_OK;
97}
98
99
100inline status_t
101X86PhysicalPageMapper::GetPageCurrentCPU(phys_addr_t physicalAddress,
102	addr_t* virtualAddress, void** handle)
103{
104	if (physicalAddress >= KERNEL_PMAP_BASE)
105		return B_BAD_ADDRESS;
106
107	*virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
108	return B_OK;
109}
110
111
112inline status_t
113X86PhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
114	void* handle)
115{
116	return B_OK;
117}
118
119
120inline status_t
121X86PhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
122	addr_t* virtualAddress, void** handle)
123{
124	if (physicalAddress >= KERNEL_PMAP_BASE)
125		return B_BAD_ADDRESS;
126
127	*virtualAddress = physicalAddress + KERNEL_PMAP_BASE;
128	return B_OK;
129}
130
131
132inline status_t
133X86PhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
134{
135	return B_OK;
136}
137
138
139inline status_t
140X86PhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
141	phys_size_t length)
142{
143	if (address >= KERNEL_PMAP_SIZE || address + length > KERNEL_PMAP_SIZE)
144		return B_BAD_ADDRESS;
145
146	memset((void*)(address + KERNEL_PMAP_BASE), value, length);
147	return B_OK;
148}
149
150
151inline status_t
152X86PhysicalPageMapper::MemcpyFromPhysical(void* to, phys_addr_t _from,
153	size_t length, bool user)
154{
155	if (_from >= KERNEL_PMAP_SIZE || _from + length > KERNEL_PMAP_SIZE)
156		return B_BAD_ADDRESS;
157
158	auto from = (void*)(_from + KERNEL_PMAP_BASE);
159
160	if (user)
161		return user_memcpy(to, from, length);
162	else
163		memcpy(to, from, length);
164
165	return B_OK;
166}
167
168
169inline status_t
170X86PhysicalPageMapper::MemcpyToPhysical(phys_addr_t _to, const void* from,
171	size_t length, bool user)
172{
173	if (_to >= KERNEL_PMAP_SIZE || _to + length > KERNEL_PMAP_SIZE)
174		return B_BAD_ADDRESS;
175
176	auto to = (void*)(_to + KERNEL_PMAP_BASE);
177
178	if (user)
179		return user_memcpy(to, from, length);
180
181	memcpy(to, from, length);
182	return B_OK;
183}
184
185
186inline void
187X86PhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
188	phys_addr_t from)
189{
190	memcpy((void*)(to + KERNEL_PMAP_BASE), (void*)(from + KERNEL_PMAP_BASE),
191		B_PAGE_SIZE);
192}
193
194
195status_t mapped_physical_page_ops_init(kernel_args* args,
196	X86PhysicalPageMapper*& _pageMapper,
197	TranslationMapPhysicalPageMapper*& _kernelPageMapper);
198
199
200#endif	// KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
201