1// Copyright 2018 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6#pragma once
7
8#include <fbl/canary.h>
9#include <fbl/intrusive_double_list.h>
10
11#include <kernel/lockdep.h>
12#include <kernel/mutex.h>
13#include <vm/pmm.h>
14
15#include "pmm_arena.h"
16
17#define PMM_ENABLE_FREE_FILL 0
18#define PMM_FREE_FILL_BYTE 0x42
19
20// per numa node collection of pmm arenas and worker threads
21class PmmNode {
22public:
23    PmmNode();
24    ~PmmNode();
25
26    DISALLOW_COPY_ASSIGN_AND_MOVE(PmmNode);
27
28    paddr_t PageToPaddr(const vm_page_t* page) TA_NO_THREAD_SAFETY_ANALYSIS;
29    vm_page_t* PaddrToPage(paddr_t addr) TA_NO_THREAD_SAFETY_ANALYSIS;
30
31    // main allocator routines
32    zx_status_t AllocPage(uint alloc_flags, vm_page_t** page, paddr_t* pa);
33    zx_status_t AllocPages(size_t count, uint alloc_flags, list_node* list);
34    zx_status_t AllocRange(paddr_t address, size_t count, list_node* list);
35    zx_status_t AllocContiguous(size_t count, uint alloc_flags, uint8_t alignment_log2, paddr_t* pa, list_node* list);
36    void FreePage(vm_page* page);
37    void FreeList(list_node* list);
38
39    uint64_t CountFreePages() const;
40    uint64_t CountTotalBytes() const;
41    void CountTotalStates(uint64_t state_count[VM_PAGE_STATE_COUNT_]) const;
42
43    // printf free and overall state of the internal arenas
44    // NOTE: both functions skip mutexes and can be called inside timer or crash context
45    // though the data they return may be questionable
46    void DumpFree() const TA_NO_THREAD_SAFETY_ANALYSIS;
47    void Dump(bool is_panic) const TA_NO_THREAD_SAFETY_ANALYSIS;
48
49#if PMM_ENABLE_FREE_FILL
50    void EnforceFill() TA_NO_THREAD_SAFETY_ANALYSIS;
51#endif
52
53    zx_status_t AddArena(const pmm_arena_info_t* info);
54
55    // add new pages to the free queue. used when boostrapping a PmmArena
56    void AddFreePages(list_node* list);
57
58private:
59    void FreePageLocked(vm_page* page) TA_REQ(lock_);
60    void FreeListLocked(list_node* list) TA_REQ(lock_);
61
62    fbl::Canary<fbl::magic("PNOD")> canary_;
63
64    mutable DECLARE_MUTEX(PmmNode) lock_;
65
66    uint64_t arena_cumulative_size_ TA_GUARDED(lock_) = 0;
67    uint64_t free_count_ TA_GUARDED(lock_) = 0;
68
69    fbl::DoublyLinkedList<PmmArena*> arena_list_ TA_GUARDED(lock_);
70
71    // page queues
72    list_node free_list_ TA_GUARDED(lock_) = LIST_INITIAL_VALUE(free_list_);
73    list_node inactive_list_ TA_GUARDED(lock_) = LIST_INITIAL_VALUE(inactive_list_);
74    list_node active_list_ TA_GUARDED(lock_) = LIST_INITIAL_VALUE(active_list_);
75    list_node modified_list_ TA_GUARDED(lock_) = LIST_INITIAL_VALUE(modified_list_);
76    list_node wired_list_ TA_GUARDED(lock_) = LIST_INITIAL_VALUE(wired_list_);
77
78#if PMM_ENABLE_FREE_FILL
79    void FreeFill(vm_page_t* page);
80    void CheckFreeFill(vm_page_t* page);
81
82    bool enforce_fill_ = false;
83#endif
84};
85
86// We don't need to hold the arena lock while executing this, since it is
87// only accesses values that are set once during system initialization.
88inline vm_page_t* PmmNode::PaddrToPage(paddr_t addr) TA_NO_THREAD_SAFETY_ANALYSIS {
89    for (auto& a : arena_list_) {
90        if (a.address_in_arena(addr)) {
91            size_t index = (addr - a.base()) / PAGE_SIZE;
92            return a.get_page(index);
93        }
94    }
95    return nullptr;
96}
97