1//===- Win32/Memory.cpp - Win32 Memory Implementation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides the Win32 specific implementation of various Memory
10// management utilities
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Support/DataTypes.h"
15#include "llvm/Support/ErrorHandling.h"
16#include "llvm/Support/Process.h"
17#include "llvm/Support/WindowsError.h"
18
19// The Windows.h header must be the last one included.
20#include "llvm/Support/Windows/WindowsSupport.h"
21
22static DWORD getWindowsProtectionFlags(unsigned Flags) {
23  switch (Flags & llvm::sys::Memory::MF_RWE_MASK) {
24  // Contrary to what you might expect, the Windows page protection flags
25  // are not a bitwise combination of RWX values
26  case llvm::sys::Memory::MF_READ:
27    return PAGE_READONLY;
28  case llvm::sys::Memory::MF_WRITE:
29    // Note: PAGE_WRITE is not supported by VirtualProtect
30    return PAGE_READWRITE;
31  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
32    return PAGE_READWRITE;
33  case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
34    return PAGE_EXECUTE_READ;
35  case llvm::sys::Memory::MF_READ |
36         llvm::sys::Memory::MF_WRITE |
37         llvm::sys::Memory::MF_EXEC:
38    return PAGE_EXECUTE_READWRITE;
39  case llvm::sys::Memory::MF_EXEC:
40    return PAGE_EXECUTE;
41  default:
42    llvm_unreachable("Illegal memory protection flag specified!");
43  }
44  // Provide a default return value as required by some compilers.
45  return PAGE_NOACCESS;
46}
47
48// While we'd be happy to allocate single pages, the Windows allocation
49// granularity may be larger than a single page (in practice, it is 64K)
50// so mapping less than that will create an unreachable fragment of memory.
51static size_t getAllocationGranularity() {
52  SYSTEM_INFO  Info;
53  ::GetSystemInfo(&Info);
54  if (Info.dwPageSize > Info.dwAllocationGranularity)
55    return Info.dwPageSize;
56  else
57    return Info.dwAllocationGranularity;
58}
59
60// Large/huge memory pages need explicit process permissions in order to be
61// used. See https://blogs.msdn.microsoft.com/oldnewthing/20110128-00/?p=11643
62// Also large pages need to be manually enabled on your OS. If all this is
63// sucessfull, we return the minimal large memory page size.
64static size_t enableProcessLargePages() {
65  HANDLE Token = 0;
66  size_t LargePageMin = GetLargePageMinimum();
67  if (LargePageMin)
68    OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY,
69                     &Token);
70  if (!Token)
71    return 0;
72  LUID Luid;
73  if (!LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &Luid)) {
74    CloseHandle(Token);
75    return 0;
76  }
77  TOKEN_PRIVILEGES TP{};
78  TP.PrivilegeCount = 1;
79  TP.Privileges[0].Luid = Luid;
80  TP.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
81  if (!AdjustTokenPrivileges(Token, FALSE, &TP, 0, 0, 0)) {
82    CloseHandle(Token);
83    return 0;
84  }
85  DWORD E = GetLastError();
86  CloseHandle(Token);
87  if (E == ERROR_SUCCESS)
88    return LargePageMin;
89  return 0;
90}
91
92namespace llvm {
93namespace sys {
94
95//===----------------------------------------------------------------------===//
96//=== WARNING: Implementation here must contain only Win32 specific code
97//===          and must not be UNIX code
98//===----------------------------------------------------------------------===//
99
100MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
101                                         const MemoryBlock *const NearBlock,
102                                         unsigned Flags,
103                                         std::error_code &EC) {
104  EC = std::error_code();
105  if (NumBytes == 0)
106    return MemoryBlock();
107
108  static size_t DefaultGranularity = getAllocationGranularity();
109  static size_t LargePageGranularity = enableProcessLargePages();
110
111  DWORD AllocType = MEM_RESERVE | MEM_COMMIT;
112  bool HugePages = false;
113  size_t Granularity = DefaultGranularity;
114
115  if ((Flags & MF_HUGE_HINT) && LargePageGranularity > 0) {
116    AllocType |= MEM_LARGE_PAGES;
117    HugePages = true;
118    Granularity = LargePageGranularity;
119  }
120
121  size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
122
123  uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
124                                NearBlock->allocatedSize()
125                           : 0;
126
127  // If the requested address is not aligned to the allocation granularity,
128  // round up to get beyond NearBlock. VirtualAlloc would have rounded down.
129  if (Start && Start % Granularity != 0)
130    Start += Granularity - Start % Granularity;
131
132  DWORD Protect = getWindowsProtectionFlags(Flags);
133
134  size_t AllocSize = NumBlocks * Granularity;
135  void *PA = ::VirtualAlloc(reinterpret_cast<void *>(Start),
136                            AllocSize, AllocType, Protect);
137  if (PA == NULL) {
138    if (NearBlock || HugePages) {
139      // Try again without the NearBlock hint and without large memory pages
140      return allocateMappedMemory(NumBytes, NULL, Flags & ~MF_HUGE_HINT, EC);
141    }
142    EC = mapWindowsError(::GetLastError());
143    return MemoryBlock();
144  }
145
146  MemoryBlock Result;
147  Result.Address = PA;
148  Result.AllocatedSize = AllocSize;
149  Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
150
151  if (Flags & MF_EXEC)
152    Memory::InvalidateInstructionCache(Result.Address, AllocSize);
153
154  return Result;
155}
156
157  std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
158  if (M.Address == 0 || M.AllocatedSize == 0)
159    return std::error_code();
160
161  if (!VirtualFree(M.Address, 0, MEM_RELEASE))
162    return mapWindowsError(::GetLastError());
163
164  M.Address = 0;
165  M.AllocatedSize = 0;
166
167  return std::error_code();
168}
169
170  std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
171                                       unsigned Flags) {
172  if (M.Address == 0 || M.AllocatedSize == 0)
173    return std::error_code();
174
175  DWORD Protect = getWindowsProtectionFlags(Flags);
176
177  DWORD OldFlags;
178  if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags))
179    return mapWindowsError(::GetLastError());
180
181  if (Flags & MF_EXEC)
182    Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
183
184  return std::error_code();
185}
186
187/// InvalidateInstructionCache - Before the JIT can run a block of code
188/// that has been emitted it must invalidate the instruction cache on some
189/// platforms.
190void Memory::InvalidateInstructionCache(
191    const void *Addr, size_t Len) {
192  FlushInstructionCache(GetCurrentProcess(), Addr, Len);
193}
194
195} // namespace sys
196} // namespace llvm
197