1// Copyright 2016 The Fuchsia Authors 2// 3// Use of this source code is governed by a MIT-style 4// license that can be found in the LICENSE file or at 5// https://opensource.org/licenses/MIT 6#include "pmm_arena.h" 7 8#include <err.h> 9#include <inttypes.h> 10#include <pretty/sizes.h> 11#include <string.h> 12#include <trace.h> 13#include <vm/bootalloc.h> 14#include <vm/bootreserve.h> 15#include <vm/physmap.h> 16#include <zircon/types.h> 17 18#include "pmm_node.h" 19#include "vm_priv.h" 20 21#define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0) 22 23zx_status_t PmmArena::Init(const pmm_arena_info_t* info, PmmNode* node) { 24 // TODO: validate that info is sane (page aligned, etc) 25 info_ = *info; 26 27 // allocate an array of pages to back this one 28 size_t page_count = size() / PAGE_SIZE; 29 size_t page_array_size = ROUNDUP_PAGE_SIZE(page_count * sizeof(vm_page)); 30 31 // if the arena is too small to be useful, bail 32 if (page_array_size >= size()) { 33 printf("PMM: arena too small to be useful (size %zu)\n", size()); 34 return ZX_ERR_BUFFER_TOO_SMALL; 35 } 36 37 // allocate a chunk to back the page array out of the arena itself, near the top of memory 38 reserve_range_t range; 39 auto status = boot_reserve_range_search(base(), size(), page_array_size, &range); 40 if (status != ZX_OK) { 41 printf("PMM: arena intersects with reserved memory in unresovable way\n"); 42 return ZX_ERR_NO_MEMORY; 43 } 44 45 DEBUG_ASSERT(range.pa >= base() && range.len <= page_array_size); 46 47 // get the kernel pointer 48 void* raw_page_array = paddr_to_physmap(range.pa); 49 LTRACEF("arena for base 0%#" PRIxPTR " size %#zx page array at %p size %#zx\n", base(), size(), 50 raw_page_array, page_array_size); 51 52 memset(raw_page_array, 0, page_array_size); 53 54 page_array_ = (vm_page_t*)raw_page_array; 55 56 // compute the range of the array that backs the array itself 57 size_t array_start_index = (PAGE_ALIGN(range.pa) - info_.base) / PAGE_SIZE; 58 size_t array_end_index = array_start_index + page_array_size / PAGE_SIZE; 59 LTRACEF("array_start_index %zu, array_end_index %zu, page_count %zu\n", 60 array_start_index, array_end_index, page_count); 61 62 DEBUG_ASSERT(array_start_index < page_count && array_end_index <= page_count); 63 64 // add all pages that aren't part of the page array to the free list 65 // pages part of the free array go to the WIRED state 66 list_node list; 67 list_initialize(&list); 68 for (size_t i = 0; i < page_count; i++) { 69 auto& p = page_array_[i]; 70 71 p.paddr_priv = base() + i * PAGE_SIZE; 72 if (i >= array_start_index && i < array_end_index) { 73 p.state = VM_PAGE_STATE_WIRED; 74 } else { 75 p.state = VM_PAGE_STATE_FREE; 76 list_add_tail(&list, &p.queue_node); 77 } 78 } 79 80 node->AddFreePages(&list); 81 82 return ZX_OK; 83} 84 85vm_page_t* PmmArena::FindSpecific(paddr_t pa) { 86 if (!address_in_arena(pa)) { 87 return nullptr; 88 } 89 90 size_t index = (pa - base()) / PAGE_SIZE; 91 92 DEBUG_ASSERT(index < size() / PAGE_SIZE); 93 94 return get_page(index); 95} 96 97vm_page_t* PmmArena::FindFreeContiguous(size_t count, uint8_t alignment_log2) { 98 // walk the list starting at alignment boundaries. 99 // calculate the starting offset into this arena, based on the 100 // base address of the arena to handle the case where the arena 101 // is not aligned on the same boundary requested. 102 paddr_t rounded_base = ROUNDUP(base(), 1UL << alignment_log2); 103 if (rounded_base < base() || rounded_base > base() + size() - 1) { 104 return 0; 105 } 106 107 paddr_t aligned_offset = (rounded_base - base()) / PAGE_SIZE; 108 paddr_t start = aligned_offset; 109 LTRACEF("starting search at aligned offset %#" PRIxPTR "\n", start); 110 LTRACEF("arena base %#" PRIxPTR " size %zu\n", base(), size()); 111 112retry: 113 // search while we're still within the arena and have a chance of finding a slot 114 // (start + count < end of arena) 115 while ((start < size() / PAGE_SIZE) && ((start + count) <= size() / PAGE_SIZE)) { 116 vm_page_t* p = &page_array_[start]; 117 for (uint i = 0; i < count; i++) { 118 if (!p->is_free()) { 119 // this run is broken, break out of the inner loop. 120 // start over at the next alignment boundary 121 start = ROUNDUP(start - aligned_offset + i + 1, 1UL << (alignment_log2 - PAGE_SIZE_SHIFT)) + 122 aligned_offset; 123 goto retry; 124 } 125 p++; 126 } 127 128 // we found a run 129 p = &page_array_[start]; 130 LTRACEF("found run from pa %#" PRIxPTR " to %#" PRIxPTR "\n", p->paddr(), p->paddr() + count * PAGE_SIZE); 131 132 return p; 133 } 134 135 return nullptr; 136} 137 138void PmmArena::CountStates(size_t state_count[VM_PAGE_STATE_COUNT_]) const { 139 for (size_t i = 0; i < size() / PAGE_SIZE; i++) { 140 state_count[page_array_[i].state]++; 141 } 142} 143 144void PmmArena::Dump(bool dump_pages, bool dump_free_ranges) const { 145 char pbuf[16]; 146 printf(" arena %p: name '%s' base %#" PRIxPTR " size %s (0x%zx) priority %u flags 0x%x\n", 147 this, name(), base(), format_size(pbuf, sizeof(pbuf), size()), size(), priority(), flags()); 148 printf("\tpage_array %p\n", page_array_); 149 150 // dump all of the pages 151 if (dump_pages) { 152 for (size_t i = 0; i < size() / PAGE_SIZE; i++) { 153 page_array_[i].dump(); 154 } 155 } 156 157 // count the number of pages in every state 158 size_t state_count[VM_PAGE_STATE_COUNT_] = {}; 159 CountStates(state_count); 160 161 printf("\tpage states:\n"); 162 for (unsigned int i = 0; i < VM_PAGE_STATE_COUNT_; i++) { 163 printf("\t\t%-12s %-16zu (%zu bytes)\n", page_state_to_string(i), state_count[i], 164 state_count[i] * PAGE_SIZE); 165 } 166 167 // dump the free pages 168 if (dump_free_ranges) { 169 printf("\tfree ranges:\n"); 170 ssize_t last = -1; 171 for (size_t i = 0; i < size() / PAGE_SIZE; i++) { 172 if (page_array_[i].is_free()) { 173 if (last == -1) { 174 last = i; 175 } 176 } else { 177 if (last != -1) { 178 printf("\t\t%#" PRIxPTR " - %#" PRIxPTR "\n", base() + last * PAGE_SIZE, 179 base() + i * PAGE_SIZE); 180 } 181 last = -1; 182 } 183 } 184 185 if (last != -1) { 186 printf("\t\t%#" PRIxPTR " - %#" PRIxPTR "\n", base() + last * PAGE_SIZE, base() + size()); 187 } 188 } 189} 190