1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <ddk/binding.h>
6#include <ddk/debug.h>
7#include <ddk/device.h>
8#include <ddk/driver.h>
9#include <ddk/io-buffer.h>
10#include <ddk/protocol/ethernet.h>
11#include <ddk/protocol/pci.h>
12#include <hw/pci.h>
13
14#include <zircon/assert.h>
15#include <zircon/device/ethernet.h>
16#include <zircon/syscalls.h>
17#include <zircon/types.h>
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <threads.h>
22
23typedef zx_status_t status_t;
24#include "ie.h"
25
26typedef enum {
27    ETH_RUNNING = 0,
28    ETH_SUSPENDING,
29    ETH_SUSPENDED,
30} eth_state;
31
32typedef struct ethernet_device {
33    ethdev_t eth;
34    mtx_t lock;
35    eth_state state;
36    zx_device_t* zxdev;
37    pci_protocol_t pci;
38    zx_handle_t ioh;
39    zx_handle_t irqh;
40    thrd_t thread;
41    zx_handle_t btih;
42    io_buffer_t buffer;
43    bool online;
44
45    // callback interface to attached ethernet layer
46    ethmac_ifc_t* ifc;
47    void* cookie;
48} ethernet_device_t;
49
50static int irq_thread(void* arg) {
51    ethernet_device_t* edev = arg;
52    for (;;) {
53        zx_status_t r;
54        r = zx_interrupt_wait(edev->irqh, NULL);
55        if (r != ZX_OK) {
56            printf("eth: irq wait failed? %d\n", r);
57            break;
58        }
59        mtx_lock(&edev->lock);
60        unsigned irq = eth_handle_irq(&edev->eth);
61        if (irq & ETH_IRQ_RX) {
62            void* data;
63            size_t len;
64
65            while (eth_rx(&edev->eth, &data, &len) == ZX_OK) {
66                if (edev->ifc && (edev->state == ETH_RUNNING)) {
67                    edev->ifc->recv(edev->cookie, data, len, 0);
68                }
69                eth_rx_ack(&edev->eth);
70            }
71        }
72        if (irq & ETH_IRQ_LSC) {
73            bool was_online = edev->online;
74            bool online = eth_status_online(&edev->eth);
75            zxlogf(TRACE, "intel-eth: ETH_IRQ_LSC fired: %d->%d\n", was_online, online);
76            if (online != was_online) {
77                edev->online = online;
78                if (edev->ifc) {
79                    edev->ifc->status(edev->cookie, online ? ETH_STATUS_ONLINE : 0);
80                }
81            }
82        }
83        mtx_unlock(&edev->lock);
84    }
85    return 0;
86}
87
88static zx_status_t eth_query(void* ctx, uint32_t options, ethmac_info_t* info) {
89    ethernet_device_t* edev = ctx;
90
91    if (options) {
92        return ZX_ERR_INVALID_ARGS;
93    }
94
95    memset(info, 0, sizeof(*info));
96    ZX_DEBUG_ASSERT(ETH_TXBUF_SIZE >= ETH_MTU);
97    info->mtu = ETH_MTU;
98    memcpy(info->mac, edev->eth.mac, sizeof(edev->eth.mac));
99
100    return ZX_OK;
101}
102
103static void eth_stop(void* ctx) {
104    ethernet_device_t* edev = ctx;
105    mtx_lock(&edev->lock);
106    edev->ifc = NULL;
107    mtx_unlock(&edev->lock);
108}
109
110static zx_status_t eth_start(void* ctx, ethmac_ifc_t* ifc, void* cookie) {
111    ethernet_device_t* edev = ctx;
112    zx_status_t status = ZX_OK;
113
114    mtx_lock(&edev->lock);
115    if (edev->ifc) {
116        status = ZX_ERR_BAD_STATE;
117    } else {
118        edev->ifc = ifc;
119        edev->cookie = cookie;
120        edev->ifc->status(edev->cookie, edev->online ? ETH_STATUS_ONLINE : 0);
121    }
122    mtx_unlock(&edev->lock);
123
124    return status;
125}
126
127static zx_status_t eth_queue_tx(void* ctx, uint32_t options, ethmac_netbuf_t* netbuf) {
128    ethernet_device_t* edev = ctx;
129    if (edev->state != ETH_RUNNING) {
130        return ZX_ERR_BAD_STATE;
131    }
132    // TODO: Add support for DMA directly from netbuf
133    return eth_tx(&edev->eth, netbuf->data, netbuf->len);
134}
135
136static zx_status_t eth_set_param(void *ctx, uint32_t param, int32_t value, void* data) {
137    ethernet_device_t* edev = ctx;
138    zx_status_t status = ZX_OK;
139
140    mtx_lock(&edev->lock);
141
142    switch (param) {
143    case ETHMAC_SETPARAM_PROMISC:
144        if ((bool)value) {
145            eth_start_promisc(&edev->eth);
146        } else {
147            eth_stop_promisc(&edev->eth);
148        }
149        status = ZX_OK;
150        break;
151    default:
152        status = ZX_ERR_NOT_SUPPORTED;
153    }
154    mtx_unlock(&edev->lock);
155
156    return status;
157}
158
159static ethmac_protocol_ops_t ethmac_ops = {
160    .query = eth_query,
161    .stop = eth_stop,
162    .start = eth_start,
163    .queue_tx = eth_queue_tx,
164    .set_param = eth_set_param,
165};
166
167static zx_status_t eth_suspend(void* ctx, uint32_t flags) {
168    ethernet_device_t* edev = ctx;
169    mtx_lock(&edev->lock);
170    edev->state = ETH_SUSPENDING;
171
172    // Immediately disable the rx queue
173    eth_disable_rx(&edev->eth);
174
175    // Wait for queued tx packets to complete
176    int iterations = 0;
177    do {
178        if (!eth_tx_queued(&edev->eth)) {
179            goto tx_done;
180        }
181        mtx_unlock(&edev->lock);
182        zx_nanosleep(zx_deadline_after(ZX_MSEC(1)));
183        iterations++;
184        mtx_lock(&edev->lock);
185    } while (iterations < 10);
186    printf("intel-eth: timed out waiting for tx queue to drain when suspending\n");
187
188tx_done:
189    eth_disable_tx(&edev->eth);
190    eth_disable_phy(&edev->eth);
191    edev->state = ETH_SUSPENDED;
192    mtx_unlock(&edev->lock);
193    return ZX_OK;
194}
195
196static zx_status_t eth_resume(void* ctx, uint32_t flags) {
197    ethernet_device_t* edev = ctx;
198    mtx_lock(&edev->lock);
199    eth_enable_phy(&edev->eth);
200    eth_enable_rx(&edev->eth);
201    eth_enable_tx(&edev->eth);
202    edev->state = ETH_RUNNING;
203    mtx_unlock(&edev->lock);
204    return ZX_OK;
205}
206
207static void eth_release(void* ctx) {
208    ethernet_device_t* edev = ctx;
209    eth_reset_hw(&edev->eth);
210    pci_enable_bus_master(&edev->pci, false);
211
212    io_buffer_release(&edev->buffer);
213
214    zx_handle_close(edev->btih);
215    zx_handle_close(edev->irqh);
216    zx_handle_close(edev->ioh);
217    free(edev);
218}
219
220static zx_protocol_device_t device_ops = {
221    .version = DEVICE_OPS_VERSION,
222    .suspend = eth_suspend,
223    .resume = eth_resume,
224    .release = eth_release,
225};
226
227static zx_status_t eth_bind(void* ctx, zx_device_t* dev) {
228    ethernet_device_t* edev;
229    if ((edev = calloc(1, sizeof(ethernet_device_t))) == NULL) {
230        return ZX_ERR_NO_MEMORY;
231    }
232    mtx_init(&edev->lock, mtx_plain);
233    mtx_init(&edev->eth.send_lock, mtx_plain);
234
235    if (device_get_protocol(dev, ZX_PROTOCOL_PCI, &edev->pci)) {
236        printf("no pci protocol\n");
237        goto fail;
238    }
239
240    zx_status_t status = pci_get_bti(&edev->pci, 0, &edev->btih);
241    if (status != ZX_OK) {
242        goto fail;
243    }
244
245    // Query whether we have MSI or Legacy interrupts.
246    uint32_t irq_cnt = 0;
247    if ((pci_query_irq_mode(&edev->pci, ZX_PCIE_IRQ_MODE_MSI, &irq_cnt) == ZX_OK) &&
248        (pci_set_irq_mode(&edev->pci, ZX_PCIE_IRQ_MODE_MSI, 1) == ZX_OK)) {
249        printf("eth: using MSI mode\n");
250    } else if ((pci_query_irq_mode(&edev->pci, ZX_PCIE_IRQ_MODE_LEGACY, &irq_cnt) == ZX_OK) &&
251               (pci_set_irq_mode(&edev->pci, ZX_PCIE_IRQ_MODE_LEGACY, 1) == ZX_OK)) {
252        printf("eth: using legacy irq mode\n");
253    } else {
254        printf("eth: failed to configure irqs\n");
255        goto fail;
256    }
257
258    zx_status_t r = pci_map_interrupt(&edev->pci, 0, &edev->irqh);
259    if (r != ZX_OK) {
260        printf("eth: failed to map irq\n");
261        goto fail;
262    }
263
264    // map iomem
265    uint64_t sz;
266    zx_handle_t h;
267    void* io;
268    r = pci_map_bar(&edev->pci, 0u, ZX_CACHE_POLICY_UNCACHED_DEVICE, &io, &sz, &h);
269    if (r != ZX_OK) {
270        printf("eth: cannot map io %d\n", h);
271        goto fail;
272    }
273    edev->eth.iobase = (uintptr_t)io;
274    edev->ioh = h;
275
276    zx_pcie_device_info_t pci_info;
277    status = pci_get_device_info(&edev->pci, &pci_info);
278    if (status != ZX_OK) {
279        goto fail;
280    }
281    edev->eth.pci_did = pci_info.device_id;
282
283    if ((r = pci_enable_bus_master(&edev->pci, true)) < 0) {
284        printf("eth: cannot enable bus master %d\n", r);
285        goto fail;
286    }
287
288    if (eth_enable_phy(&edev->eth) != ZX_OK) {
289        goto fail;
290    }
291
292    if (eth_reset_hw(&edev->eth)) {
293        goto fail;
294    }
295
296    r = io_buffer_init(&edev->buffer, edev->btih, ETH_ALLOC, IO_BUFFER_RW | IO_BUFFER_CONTIG);
297    if (r < 0) {
298        printf("eth: cannot alloc io-buffer %d\n", r);
299        goto fail;
300    }
301
302    eth_setup_buffers(&edev->eth, io_buffer_virt(&edev->buffer), io_buffer_phys(&edev->buffer));
303    eth_init_hw(&edev->eth);
304    edev->online = eth_status_online(&edev->eth);
305
306    device_add_args_t args = {
307        .version = DEVICE_ADD_ARGS_VERSION,
308        .name = "intel-ethernet",
309        .ctx = edev,
310        .ops = &device_ops,
311        .proto_id = ZX_PROTOCOL_ETHERNET_IMPL,
312        .proto_ops = &ethmac_ops,
313    };
314
315    if (device_add(dev, &args, &edev->zxdev)) {
316        goto fail;
317    }
318
319    thrd_create_with_name(&edev->thread, irq_thread, edev, "eth-irq-thread");
320    thrd_detach(edev->thread);
321
322    printf("eth: intel-ethernet online\n");
323
324    return ZX_OK;
325
326fail:
327    io_buffer_release(&edev->buffer);
328    if (edev->btih) {
329        zx_handle_close(edev->btih);
330    }
331    if (edev->ioh) {
332        pci_enable_bus_master(&edev->pci, false);
333        zx_handle_close(edev->irqh);
334        zx_handle_close(edev->ioh);
335    }
336    free(edev);
337    return ZX_ERR_NOT_SUPPORTED;
338}
339
340static zx_driver_ops_t intel_ethernet_driver_ops = {
341    .version = DRIVER_OPS_VERSION,
342    .bind = eth_bind,
343};
344
345// clang-format off
346ZIRCON_DRIVER_BEGIN(intel_ethernet, intel_ethernet_driver_ops, "zircon", "0.1", 13)
347    BI_ABORT_IF(NE, BIND_PROTOCOL, ZX_PROTOCOL_PCI),
348    BI_ABORT_IF(NE, BIND_PCI_VID, 0x8086),
349    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x100E), // Qemu
350    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x1f45), // Atom c2000 2.5Gbe backplane
351    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x1502), // Ivy Bridge
352    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x15A3), // Broadwell
353    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x1570), // Skylake
354    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x1533), // I210 standalone
355    BI_MATCH_IF(EQ, BIND_PCI_DID, IE_DID_I211_AT),
356    BI_MATCH_IF(EQ, BIND_PCI_DID, IE_DID_I219_LM),
357    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x15b7), // Skull Canyon NUC
358    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x15b8), // I219-V
359    BI_MATCH_IF(EQ, BIND_PCI_DID, 0x15d8), // Kaby Lake NUC
360ZIRCON_DRIVER_END(intel_ethernet)
361