1// Copyright 2017 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7
8#include <object/pinned_memory_token_dispatcher.h>
9
10#include <assert.h>
11#include <err.h>
12#include <vm/vm.h>
13#include <vm/vm_object.h>
14#include <zxcpp/new.h>
15#include <fbl/algorithm.h>
16#include <fbl/auto_call.h>
17#include <fbl/auto_lock.h>
18#include <object/bus_transaction_initiator_dispatcher.h>
19#include <trace.h>
20
21#define LOCAL_TRACE 0
22
23zx_status_t PinnedMemoryTokenDispatcher::Create(fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,
24                                                fbl::RefPtr<VmObject> vmo, size_t offset,
25                                                size_t size, uint32_t perms,
26                                                fbl::RefPtr<Dispatcher>* dispatcher,
27                                                zx_rights_t* rights) {
28    LTRACE_ENTRY;
29    DEBUG_ASSERT(IS_PAGE_ALIGNED(offset) && IS_PAGE_ALIGNED(size));
30
31    if (vmo->is_paged()) {
32        // Commit the VMO range, in case it's not already committed.
33        zx_status_t status = vmo->CommitRange(offset, size, nullptr);
34        if (status != ZX_OK) {
35            LTRACEF("vmo->CommitRange failed: %d\n", status);
36            return status;
37        }
38
39        // Pin the memory to make sure it doesn't change from underneath us for the
40        // lifetime of the created PMT.
41        status = vmo->Pin(offset, size);
42        if (status != ZX_OK) {
43            LTRACEF("vmo->Pin failed: %d\n", status);
44            return status;
45        }
46    }
47
48    // Set up a cleanup function to undo the pin if we need to fail this
49    // operation.
50    auto unpin_vmo = fbl::MakeAutoCall([vmo, offset, size]() {
51        if (vmo->is_paged()) {
52            vmo->Unpin(offset, size);
53        }
54    });
55
56    const size_t min_contig = bti->minimum_contiguity();
57    DEBUG_ASSERT(fbl::is_pow2(min_contig));
58
59    fbl::AllocChecker ac;
60    const size_t num_addrs = ROUNDUP(size, min_contig) / min_contig;
61    fbl::Array<dev_vaddr_t> addr_array(new (&ac) dev_vaddr_t[num_addrs], num_addrs);
62    if (!ac.check()) {
63        return ZX_ERR_NO_MEMORY;
64    }
65
66    bool is_contiguous = vmo->is_contiguous();
67    auto pmo = fbl::AdoptRef(new (&ac) PinnedMemoryTokenDispatcher(fbl::move(bti), fbl::move(vmo),
68                                                                   offset, size, is_contiguous,
69                                                                   fbl::move(addr_array)));
70    if (!ac.check()) {
71        return ZX_ERR_NO_MEMORY;
72    }
73
74    // Now that the pmt object has been created, it is responsible for
75    // unpinning.
76    unpin_vmo.cancel();
77
78    zx_status_t status = pmo->MapIntoIommu(perms);
79    if (status != ZX_OK) {
80        LTRACEF("MapIntoIommu failed: %d\n", status);
81        return status;
82    }
83
84    // Create must be called with the BTI's lock held, so this is safe to
85    // invoke.
86    [&]() TA_NO_THREAD_SAFETY_ANALYSIS {
87        pmo->bti_->AddPmoLocked(pmo.get());
88    }();
89
90    *dispatcher = fbl::move(pmo);
91    *rights = ZX_DEFAULT_PMT_RIGHTS;
92    return ZX_OK;
93}
94
95// Used during initialization to set up the IOMMU state for this PMT.
96//
97// We disable thread-safety analysis here, because this is part of the
98// initialization routine before other threads have access to this dispatcher.
99zx_status_t PinnedMemoryTokenDispatcher::MapIntoIommu(uint32_t perms) TA_NO_THREAD_SAFETY_ANALYSIS {
100    const uint64_t bti_id = bti_->bti_id();
101    const size_t min_contig = bti_->minimum_contiguity();
102    if (is_contiguous_) {
103        dev_vaddr_t vaddr;
104        size_t mapped_len;
105
106        // Usermode drivers assume that if they requested a contiguous buffer in
107        // memory, then the physical addresses will be contiguous.  Return an
108        // error if we can't acutally map the address contiguously.
109        zx_status_t status = bti_->iommu()->MapContiguous(bti_id, vmo_, offset_, size_, perms,
110                                                          &vaddr, &mapped_len);
111        if (status != ZX_OK) {
112            return status;
113        }
114
115        DEBUG_ASSERT(vaddr % min_contig == 0);
116        mapped_addrs_[0] = vaddr;
117        for (size_t i = 1; i < mapped_addrs_.size(); ++i) {
118            mapped_addrs_[i] = mapped_addrs_[i - 1] + min_contig;
119        }
120        return ZX_OK;
121    }
122
123    size_t remaining = size_;
124    uint64_t curr_offset = offset_;
125    size_t next_addr_idx = 0;
126    while (remaining > 0) {
127        dev_vaddr_t vaddr;
128        size_t mapped_len;
129        zx_status_t status = bti_->iommu()->Map(bti_id, vmo_, curr_offset, remaining, perms,
130                                                &vaddr, &mapped_len);
131        if (status != ZX_OK) {
132            zx_status_t err = UnmapFromIommuLocked();
133            ASSERT(err == ZX_OK);
134            return status;
135        }
136
137        // Ensure we don't end up with any non-terminal chunks that are not |min_contig| in
138        // length.
139        DEBUG_ASSERT(mapped_len % min_contig == 0 || remaining == mapped_len);
140
141        // Break the range up into chunks of length |min_contig|
142        size_t mapped_remaining = mapped_len;
143        while (mapped_remaining > 0) {
144            size_t addr_pages = fbl::min<size_t>(mapped_remaining, min_contig);
145            mapped_addrs_[next_addr_idx] = vaddr;
146            next_addr_idx++;
147            vaddr += addr_pages;
148            mapped_remaining -= addr_pages;
149        }
150
151        curr_offset += mapped_len;
152        remaining -= fbl::min(mapped_len, remaining);
153    }
154    DEBUG_ASSERT(next_addr_idx == mapped_addrs_.size());
155
156    return ZX_OK;
157}
158
159zx_status_t PinnedMemoryTokenDispatcher::UnmapFromIommuLocked() {
160    auto iommu = bti_->iommu();
161    const uint64_t bus_txn_id = bti_->bti_id();
162
163    if (mapped_addrs_[0] == UINT64_MAX) {
164        // No work to do, nothing is mapped.
165        return ZX_OK;
166    }
167
168    zx_status_t status = ZX_OK;
169    if (is_contiguous_) {
170        status = iommu->Unmap(bus_txn_id, mapped_addrs_[0], size_);
171    } else {
172        const size_t min_contig = bti_->minimum_contiguity();
173        size_t remaining = size_;
174        for (size_t i = 0; i < mapped_addrs_.size(); ++i) {
175            dev_vaddr_t addr = mapped_addrs_[i];
176            if (addr == UINT64_MAX) {
177                break;
178            }
179
180            size_t size = fbl::min(remaining, min_contig);
181            DEBUG_ASSERT(size == min_contig || i == mapped_addrs_.size() - 1);
182            // Try to unmap all pages even if we get an error, and return the
183            // first error encountered.
184            zx_status_t err = iommu->Unmap(bus_txn_id, addr, size);
185            DEBUG_ASSERT(err == ZX_OK);
186            if (err != ZX_OK && status == ZX_OK) {
187                status = err;
188            }
189            remaining -= size;
190        }
191    }
192
193    // Clear this so we won't try again if this gets called again in the
194    // destructor.
195    InvalidateMappedAddrsLocked();
196    return status;
197}
198
199void PinnedMemoryTokenDispatcher::MarkUnpinned() {
200    Guard<fbl::Mutex> guard{get_lock()};
201    explicitly_unpinned_ = true;
202}
203
204void PinnedMemoryTokenDispatcher::InvalidateMappedAddrsLocked() {
205    // Fill with a known invalid address to simplify cleanup of errors during
206    // mapping
207    for (size_t i = 0; i < mapped_addrs_.size(); ++i) {
208        mapped_addrs_[i] = UINT64_MAX;
209    }
210}
211
212void PinnedMemoryTokenDispatcher::on_zero_handles() {
213    Guard<fbl::Mutex> guard{get_lock()};
214
215    // Once usermode has dropped the handle, either through zx_handle_close(),
216    // zx_pmt_unpin(), or process crash, prevent access to the pinned memory.
217    //
218    // We do not unpin the VMO until this object is destroyed, to allow usermode
219    // to protect against stray DMA via the quarantining mechanism.
220    zx_status_t status = UnmapFromIommuLocked();
221    ASSERT(status == ZX_OK);
222
223    if (explicitly_unpinned_) {
224        // The cleanup will happen when the reference that on_zero_handles()
225        // was called on goes away.
226    } else {
227        // Add to the quarantine list to prevent the underlying VMO from being
228        // unpinned.
229        bti_->Quarantine(fbl::WrapRefPtr(this));
230    }
231}
232
233PinnedMemoryTokenDispatcher::~PinnedMemoryTokenDispatcher() {
234    // In most cases the Unmap will already have run via on_zero_handles(), but
235    // it is possible for that to never run if an error occurs between the
236    // creation of the PinnedMemoryTokenDispatcher and the completion of the
237    // zx_bti_pin() syscall.
238    zx_status_t status = UnmapFromIommuLocked();
239    ASSERT(status == ZX_OK);
240
241    if (vmo_->is_paged()) {
242        vmo_->Unpin(offset_, size_);
243    }
244
245    // RemovePmo is the only method that will remove dll_pmt_ from a list, and
246    // it's only called here.  dll_pmt_ is only added to a list at the end of
247    // Create, before any reference to the pmt has been given out.
248    // Because of this, it's safe to check InContainer without holding a lock.
249    if (dll_pmt_.InContainer()) {
250        bti_->RemovePmo(this);
251    }
252}
253
254PinnedMemoryTokenDispatcher::PinnedMemoryTokenDispatcher(
255        fbl::RefPtr<BusTransactionInitiatorDispatcher> bti,
256        fbl::RefPtr<VmObject> vmo, size_t offset, size_t size,
257        bool is_contiguous,
258        fbl::Array<dev_vaddr_t> mapped_addrs)
259    : vmo_(fbl::move(vmo)), offset_(offset), size_(size), is_contiguous_(is_contiguous),
260      bti_(fbl::move(bti)), mapped_addrs_(fbl::move(mapped_addrs)) {
261
262    InvalidateMappedAddrsLocked();
263}
264
265zx_status_t PinnedMemoryTokenDispatcher::EncodeAddrs(bool compress_results,
266                                                     bool contiguous,
267                                                     dev_vaddr_t* mapped_addrs,
268                                                     size_t mapped_addrs_count) {
269    Guard<fbl::Mutex> guard{get_lock()};
270
271    const fbl::Array<dev_vaddr_t>& pmo_addrs = mapped_addrs_;
272    const size_t found_addrs = pmo_addrs.size();
273    if (compress_results) {
274        if (found_addrs != mapped_addrs_count) {
275            return ZX_ERR_INVALID_ARGS;
276        }
277        memcpy(mapped_addrs, pmo_addrs.get(), found_addrs * sizeof(dev_vaddr_t));
278    } else if (contiguous) {
279        if (mapped_addrs_count != 1 || !is_contiguous_) {
280            return ZX_ERR_INVALID_ARGS;
281        }
282        *mapped_addrs = pmo_addrs.get()[0];
283    } else {
284        const size_t num_pages = size_ / PAGE_SIZE;
285        if (num_pages != mapped_addrs_count) {
286            return ZX_ERR_INVALID_ARGS;
287        }
288        const size_t min_contig = bti_->minimum_contiguity();
289        size_t next_idx = 0;
290        for (size_t i = 0; i < found_addrs; ++i) {
291            dev_vaddr_t extent_base = pmo_addrs[i];
292            for (dev_vaddr_t addr = extent_base;
293                 addr < extent_base + min_contig && next_idx < num_pages;
294                 addr += PAGE_SIZE, ++next_idx) {
295                mapped_addrs[next_idx] = addr;
296            }
297        }
298    }
299    return ZX_OK;
300}
301