1// Copyright 2017 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <ddk/debug.h>
6#include <fbl/algorithm.h>
7#include <fbl/auto_lock.h>
8#include <hw/reg.h>
9#include <inttypes.h>
10
11#include "pci.h"
12
13namespace {
14
15// For reading the virtio specific vendor capabilities that can be PIO or MMIO space
16#define cap_field(offset, field) static_cast<uint8_t>(offset + offsetof(virtio_pci_cap_t, field))
17static void ReadVirtioCap(pci_protocol_t* pci, uint8_t offset, virtio_pci_cap& cap) {
18    pci_config_read8(pci, cap_field(offset, cap_vndr), &cap.cap_vndr);
19    pci_config_read8(pci, cap_field(offset, cap_next), &cap.cap_next);
20    pci_config_read8(pci, cap_field(offset, cap_len), &cap.cap_len);
21    pci_config_read8(pci, cap_field(offset, cfg_type), &cap.cfg_type);
22    pci_config_read8(pci, cap_field(offset, bar), &cap.bar);
23    pci_config_read32(pci, cap_field(offset, offset), &cap.offset);
24    pci_config_read32(pci, cap_field(offset, length), &cap.length);
25}
26#undef cap_field
27
28// MMIO reads and writes are abstracted out into template methods that
29// ensure fields are only accessed with the right size.
30template <typename T>
31void MmioWrite(volatile T* addr, T value) {
32    T::bad_instantiation();
33}
34
35template <typename T>
36void MmioRead(const volatile T* addr, T* value) {
37    T::bad_instantiation();
38}
39
40template <>
41void MmioWrite<uint32_t>(volatile uint32_t* addr, uint32_t value) {
42    writel(value, addr);
43}
44
45template <>
46void MmioRead<uint32_t>(const volatile uint32_t* addr, uint32_t* value) {
47    *value = readl(addr);
48}
49
50template <>
51void MmioWrite<uint16_t>(volatile uint16_t* addr, uint16_t value) {
52    writew(value, addr);
53}
54
55template <>
56void MmioRead<uint16_t>(const volatile uint16_t* addr, uint16_t* value) {
57    *value = readw(addr);
58}
59
60template <>
61void MmioWrite<uint8_t>(volatile uint8_t* addr, uint8_t value) {
62    writeb(value, addr);
63}
64
65template <>
66void MmioRead<uint8_t>(const volatile uint8_t* addr, uint8_t* value) {
67    *value = readb(addr);
68}
69
70// Virtio 1.0 Section 4.1.3:
71// 64-bit fields are to be treated as two 32-bit fields, with low 32 bit
72// part followed by the high 32 bit part.
73template <>
74void MmioWrite<uint64_t>(volatile uint64_t* addr, uint64_t value) {
75    auto words = reinterpret_cast<volatile uint32_t*>(addr);
76    MmioWrite(&words[0], static_cast<uint32_t>(value));
77    MmioWrite(&words[1], static_cast<uint32_t>(value >> 32));
78}
79
80template <>
81void MmioRead<uint64_t>(const volatile uint64_t* addr, uint64_t* value) {
82    auto words = reinterpret_cast<const volatile uint32_t*>(addr);
83    uint32_t lo, hi;
84    MmioRead(&words[0], &lo);
85    MmioRead(&words[1], &hi);
86    *value = static_cast<uint64_t>(lo) | (static_cast<uint64_t>(hi) << 32);
87}
88
89} // anonymous namespace
90
91namespace virtio {
92
93zx_status_t PciModernBackend::Init() {
94    fbl::AutoLock lock(&lock_);
95
96    // try to parse capabilities
97    for (uint8_t off = pci_get_first_capability(&pci_, kPciCapIdVendor);
98         off != 0;
99         off = pci_get_next_capability(&pci_, off, kPciCapIdVendor)) {
100        virtio_pci_cap_t cap;
101
102        ReadVirtioCap(&pci_, off, cap);
103        switch (cap.cfg_type) {
104        case VIRTIO_PCI_CAP_COMMON_CFG:
105            CommonCfgCallbackLocked(cap);
106            break;
107        case VIRTIO_PCI_CAP_NOTIFY_CFG:
108            // Virtio 1.0 section 4.1.4.4
109            // notify_off_multiplier is a 32bit field following this capability
110            pci_config_read32(&pci_, static_cast<uint8_t>(off + sizeof(virtio_pci_cap_t)),
111                    &notify_off_mul_);
112            NotifyCfgCallbackLocked(cap);
113            break;
114        case VIRTIO_PCI_CAP_ISR_CFG:
115            IsrCfgCallbackLocked(cap);
116            break;
117        case VIRTIO_PCI_CAP_DEVICE_CFG:
118            DeviceCfgCallbackLocked(cap);
119            break;
120        case VIRTIO_PCI_CAP_PCI_CFG:
121            PciCfgCallbackLocked(cap);
122            break;
123        }
124    }
125
126    // Ensure we found needed capabilities during parsing
127    if (common_cfg_ == nullptr || isr_status_ == nullptr || device_cfg_ == 0 || notify_base_ == 0) {
128        zxlogf(ERROR, "%s: failed to bind, missing capabilities\n", tag());
129        return ZX_ERR_BAD_STATE;
130    }
131
132    zxlogf(SPEW, "virtio: modern pci backend successfully initialized\n");
133    return ZX_OK;
134}
135
136// value pointers are used to maintain type safety with field width
137void PciModernBackend::DeviceConfigRead(uint16_t offset, uint8_t* value) {
138    fbl::AutoLock lock(&lock_);
139    MmioRead(reinterpret_cast<volatile uint8_t*>(device_cfg_ + offset), value);
140}
141
142void PciModernBackend::DeviceConfigRead(uint16_t offset, uint16_t* value) {
143    fbl::AutoLock lock(&lock_);
144    MmioRead(reinterpret_cast<volatile uint16_t*>(device_cfg_ + offset), value);
145}
146
147void PciModernBackend::DeviceConfigRead(uint16_t offset, uint32_t* value) {
148    fbl::AutoLock lock(&lock_);
149    MmioRead(reinterpret_cast<volatile uint32_t*>(device_cfg_ + offset), value);
150}
151
152void PciModernBackend::DeviceConfigRead(uint16_t offset, uint64_t* value) {
153    fbl::AutoLock lock(&lock_);
154    MmioRead(reinterpret_cast<volatile uint64_t*>(device_cfg_ + offset), value);
155}
156
157void PciModernBackend::DeviceConfigWrite(uint16_t offset, uint8_t value) {
158    fbl::AutoLock lock(&lock_);
159    MmioWrite(reinterpret_cast<volatile uint8_t*>(device_cfg_ + offset), value);
160}
161
162void PciModernBackend::DeviceConfigWrite(uint16_t offset, uint16_t value) {
163    fbl::AutoLock lock(&lock_);
164    MmioWrite(reinterpret_cast<volatile uint16_t*>(device_cfg_ + offset), value);
165}
166
167void PciModernBackend::DeviceConfigWrite(uint16_t offset, uint32_t value) {
168    fbl::AutoLock lock(&lock_);
169    MmioWrite(reinterpret_cast<volatile uint32_t*>(device_cfg_ + offset), value);
170}
171
172void PciModernBackend::DeviceConfigWrite(uint16_t offset, uint64_t value) {
173    fbl::AutoLock lock(&lock_);
174    MmioWrite(reinterpret_cast<volatile uint64_t*>(device_cfg_ + offset), value);
175}
176
177// Attempt to map a bar found in a capability structure. If it has already been
178// mapped and we have stored a valid handle in the structure then just return
179// ZX_OK.
180zx_status_t PciModernBackend::MapBar(uint8_t bar) {
181    if (bar >= fbl::count_of(bar_)) {
182        return ZX_ERR_INVALID_ARGS;
183    }
184
185    if (bar_[bar].mmio_handle != ZX_HANDLE_INVALID) {
186        return ZX_OK;
187    }
188
189    size_t size;
190    zx_handle_t handle;
191    void* base;
192    zx_status_t s = pci_map_bar(&pci_, bar, ZX_CACHE_POLICY_UNCACHED_DEVICE, &base, &size, &handle);
193    if (s != ZX_OK) {
194        zxlogf(ERROR, "%s: Failed to map bar %u: %d\n", tag(), bar, s);
195        return s;
196    }
197
198    // Store the base as a uintptr_t due to the amount of math done on it later
199    bar_[bar].mmio_base = reinterpret_cast<uintptr_t>(base);
200    bar_[bar].mmio_handle.reset(handle);
201    zxlogf(TRACE, "%s: bar %u mapped to %#" PRIxPTR "\n", tag(), bar, bar_[bar].mmio_base);
202    return ZX_OK;
203}
204
205void PciModernBackend::CommonCfgCallbackLocked(const virtio_pci_cap_t& cap) {
206    zxlogf(TRACE, "%s: common cfg found in bar %u offset %#x\n", tag(), cap.bar, cap.offset);
207    if (MapBar(cap.bar) != ZX_OK) {
208        return;
209    }
210
211    // Common config is a structure of type virtio_pci_common_cfg_t located at an
212    // the bar and offset specified by the capability.
213    auto addr = bar_[cap.bar].mmio_base + cap.offset;
214    common_cfg_ = reinterpret_cast<volatile virtio_pci_common_cfg_t*>(addr);
215
216    // Cache this when we find the config for kicking the queues later
217}
218
219void PciModernBackend::NotifyCfgCallbackLocked(const virtio_pci_cap_t& cap) {
220    zxlogf(TRACE, "%s: notify cfg found in bar %u offset %#x\n", tag(), cap.bar, cap.offset);
221    if (MapBar(cap.bar) != ZX_OK) {
222        return;
223    }
224
225    notify_base_ = bar_[cap.bar].mmio_base + cap.offset;
226}
227
228void PciModernBackend::IsrCfgCallbackLocked(const virtio_pci_cap_t& cap) {
229    zxlogf(TRACE, "%s: isr cfg found in bar %u offset %#x\n", tag(), cap.bar, cap.offset);
230    if (MapBar(cap.bar) != ZX_OK) {
231        return;
232    }
233
234    // interrupt status is directly read from the register at this address
235    isr_status_ = reinterpret_cast<volatile uint32_t*>(bar_[cap.bar].mmio_base + cap.offset);
236}
237
238void PciModernBackend::DeviceCfgCallbackLocked(const virtio_pci_cap_t& cap) {
239    zxlogf(TRACE, "%s: device cfg found in bar %u offset %#x\n", tag(), cap.bar, cap.offset);
240    if (MapBar(cap.bar) != ZX_OK) {
241        return;
242    }
243
244    device_cfg_ = bar_[cap.bar].mmio_base + cap.offset;
245}
246
247void PciModernBackend::PciCfgCallbackLocked(const virtio_pci_cap_t& cap) {
248    // We are not using this capability presently since we can map the
249    // bars for direct memory access.
250}
251
252// Get the ring size of a specific index
253uint16_t PciModernBackend::GetRingSize(uint16_t index) {
254    fbl::AutoLock lock(&lock_);
255
256    uint16_t queue_size = 0;
257    MmioWrite(&common_cfg_->queue_select, index);
258    MmioRead(&common_cfg_->queue_size, &queue_size);
259    return queue_size;
260}
261
262// Set up ring descriptors with the backend.
263void PciModernBackend::SetRing(uint16_t index, uint16_t count, zx_paddr_t pa_desc, zx_paddr_t pa_avail, zx_paddr_t pa_used) {
264    fbl::AutoLock lock(&lock_);
265
266    // These offsets are wrong and this should be changed
267    MmioWrite(&common_cfg_->queue_select, index);
268    MmioWrite(&common_cfg_->queue_size, count);
269    MmioWrite(&common_cfg_->queue_desc, pa_desc);
270    MmioWrite(&common_cfg_->queue_avail, pa_avail);
271    MmioWrite(&common_cfg_->queue_used, pa_used);
272    MmioWrite<uint16_t>(&common_cfg_->queue_enable, 1);
273
274    // Assert that queue_notify_off is equal to the ring index.
275    uint16_t queue_notify_off;
276    MmioRead(&common_cfg_->queue_notify_off, &queue_notify_off);
277    ZX_ASSERT(queue_notify_off == index);
278}
279
280void PciModernBackend::RingKick(uint16_t ring_index) {
281    fbl::AutoLock lock(&lock_);
282
283    // Virtio 1.0 Section 4.1.4.4
284    // The address to notify for a queue is calculated using information from
285    // the notify_off_multiplier, the capability's base + offset, and the
286    // selected queue's offset.
287    //
288    // For performance reasons, we assume that the selected queue's offset is
289    // equal to the ring index.
290    auto addr = notify_base_ + ring_index * notify_off_mul_;
291    auto ptr = reinterpret_cast<volatile uint16_t*>(addr);
292    zxlogf(SPEW, "%s: kick %u addr %p\n", tag(), ring_index, ptr);
293    *ptr = ring_index;
294}
295
296bool PciModernBackend::ReadFeature(uint32_t feature) {
297    fbl::AutoLock lock(&lock_);
298    uint32_t select = feature / 32;
299    uint32_t bit = feature % 32;
300    uint32_t val;
301
302    MmioWrite(&common_cfg_->device_feature_select, select);
303    MmioRead(&common_cfg_->device_feature, &val);
304    bool is_set = (val & (1u << bit)) != 0;
305    zxlogf(TRACE, "%s: read feature bit %u = %u\n", tag(), feature, is_set);
306    return is_set;
307}
308
309void PciModernBackend::SetFeature(uint32_t feature) {
310    fbl::AutoLock lock(&lock_);
311    uint32_t select = feature / 32;
312    uint32_t bit = feature % 32;
313    uint32_t val;
314
315    MmioWrite(&common_cfg_->driver_feature_select, select);
316    MmioRead(&common_cfg_->driver_feature, &val);
317    MmioWrite(&common_cfg_->driver_feature, val | (1u << bit));
318    zxlogf(TRACE, "%s: feature bit %u now set\n", tag(), feature);
319}
320
321zx_status_t PciModernBackend::ConfirmFeatures() {
322    fbl::AutoLock lock(&lock_);
323    uint8_t val;
324
325    MmioRead(&common_cfg_->device_status, &val);
326    val |= VIRTIO_STATUS_FEATURES_OK;
327    MmioWrite(&common_cfg_->device_status, val);
328
329    // Check that the device confirmed our feature choices were valid
330    MmioRead(&common_cfg_->device_status, &val);
331    if ((val & VIRTIO_STATUS_FEATURES_OK) == 0) {
332        return ZX_ERR_NOT_SUPPORTED;
333    }
334
335    return ZX_OK;
336}
337
338void PciModernBackend::DeviceReset() {
339    fbl::AutoLock lock(&lock_);
340
341    MmioWrite<uint8_t>(&common_cfg_->device_status, 0u);
342}
343
344void PciModernBackend::DriverStatusOk() {
345    fbl::AutoLock lock(&lock_);
346
347    uint8_t device_status;
348    MmioRead(&common_cfg_->device_status, &device_status);
349    device_status |= VIRTIO_STATUS_DRIVER_OK;
350    MmioWrite(&common_cfg_->device_status, device_status);
351}
352
353void PciModernBackend::DriverStatusAck() {
354    fbl::AutoLock lock(&lock_);
355
356    uint8_t device_status;
357    MmioRead(&common_cfg_->device_status, &device_status);
358    device_status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER;
359    MmioWrite(&common_cfg_->device_status, device_status);
360}
361
362uint32_t PciModernBackend::IsrStatus() {
363    return (*isr_status_ & (VIRTIO_ISR_QUEUE_INT | VIRTIO_ISR_DEV_CFG_INT));
364}
365
366} // namespace virtio
367