1/*
2 * Copyright (c) 2014, ETH Zurich. All rights reserved.
3 *
4 * This file is distributed under the terms in the attached LICENSE file.
5 * If you do not find this file, copies can be found by writing to:
6 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
7 */
8#include <string.h>
9#include <barrelfish/barrelfish.h>
10#include <barrelfish/waitset.h>
11#include <bench/bench.h>
12#include <driverkit/iommu.h>
13#include <dev/ioat_dma_dev.h>
14
15#include <dma_mem_utils.h>
16
17#include <ioat/ioat_dma_internal.h>
18#include <ioat/ioat_dma_dca_internal.h>
19#include <ioat/ioat_dma_device_internal.h>
20#include <ioat/ioat_dma_channel_internal.h>
21
22#include <debug.h>
23#include "../include/dma_device_internal.h"
24
25/**
26 * IOAT DMA device representation
27 */
28struct ioat_dma_device
29{
30    struct dma_device common;
31
32    ioat_dma_t device;                  ///< mackerel device base
33    ioat_dma_cbver_t version;           ///< Crystal Beach version number
34
35    struct dmem complstatus;         ///< memory region for channels CHANSTS
36
37    uint8_t irq_msix_vector;
38    uint16_t irq_msix_count;
39
40    uint32_t flags;
41};
42
43/// counter for device ID enumeration
44static dma_dev_id_t device_id = 1;
45
46/*
47 * ----------------------------------------------------------------------------
48 * device initialization functions
49 * ----------------------------------------------------------------------------
50 */
51
52static errval_t device_init_ioat_v1(struct ioat_dma_device *dev)
53{
54    IOATDEV_DEBUG("devices of Crystal Beach Version 1.xx are currently not supported.\n",
55                  dev->common.id);
56    return DMA_ERR_DEVICE_UNSUPPORTED;
57}
58
59static errval_t device_init_ioat_v2(struct ioat_dma_device *dev)
60{
61    IOATDEV_DEBUG("devices of Crystal Beach Version 2.xx are currently not supported.\n",
62                  dev->common.id);
63    return DMA_ERR_DEVICE_UNSUPPORTED;
64}
65
66static errval_t device_init_ioat_v3(struct ioat_dma_device *dev)
67{
68    errval_t err;
69
70    IOATDEV_DEBUG("initialize Crystal Beach 3 DMA device\n", dev->common.id);
71
72    ioat_dma_dmacapability_t cap = ioat_dma_dmacapability_rd(&dev->device);
73
74    if (ioat_dma_cbver_minor_extract(dev->version) == 2) {
75        IOATDEV_DEBUG("disabling XOR and PQ opcodes for Crystal Beach 3.2\n",
76                      dev->common.id);
77        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
78        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
79    } else if (ioat_dma_cbver_minor_extract(dev->version) == 3) {
80        IOATDEV_DEBUG("devices of Crystal Beach Version 3.3 are not supported.\n",
81                      dev->common.id);
82        return DMA_ERR_DEVICE_UNSUPPORTED;
83    }
84
85    /* if DCA is enabled, we cannot support the RAID functions */
86    if (ioat_dma_dca_is_enabled()) {
87        IOATDEV_DEBUG("Disabling XOR and PQ while DCA is enabled\n", dev->common.id);
88        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
89        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
90    }
91
92    if (ioat_dma_dmacapability_xor_extract(cap)) {
93        IOATDEV_DEBUG("device supports XOR RAID.\n", dev->common.id);
94
95        dev->flags |= IOAT_DMA_DEV_F_RAID;
96
97        /*
98         * this may need some additional functions to prepare
99         * the specific transfers...
100         *
101         * max_xor = 8;
102         * prepare_xor, prepare_xor_val
103         */
104    }
105
106    if (ioat_dma_dmacapability_pq_extract(cap)) {
107        IOATDEV_DEBUG("device supports PQ RAID.\n", dev->common.id);
108
109        dev->flags |= IOAT_DMA_DEV_F_RAID;
110
111        /*
112         * this may need some additional functions to prepare the
113         * DMA descriptors
114         *
115         * max_xor = 8;
116         * max_pq = 8;
117         * prepare_pq, perpare_pq_val
118         *
119         * also set the prepare_xor pointers...
120         *
121         */
122    }
123
124    /* set the interrupt type to disabled*/
125    dev->common.irq_type = DMA_IRQ_DISABLED;
126    dev->common.type = DMA_DEV_TYPE_IOAT;
127
128    /* allocate memory for completion status writeback */
129    err = driverkit_iommu_mmap_cl(dev->common.iommu, IOAT_DMA_COMPLSTATUS_SIZE,
130                                  IOAT_DMA_COMPLSTATUS_FLAGS, &dev->complstatus);
131    if (err_is_fail(err)) {
132        return err;
133    }
134
135    dev->common.channels.count = ioat_dma_chancnt_num_rdf(&dev->device);
136
137    dev->common.channels.c = calloc(dev->common.channels.count,
138                                    sizeof(*dev->common.channels.c));
139    if (dev->common.channels.c == NULL) {
140        driverkit_iommu_munmap(&dev->complstatus);
141        return LIB_ERR_MALLOC_FAIL;
142    }
143
144    /* channel enumeration */
145
146    IOATDEV_DEBUG("channel enumeration. discovered %u channels\n", dev->common.id,
147                  dev->common.channels.count);
148
149    uint32_t max_xfer_size = (1 << ioat_dma_xfercap_max_rdf(&dev->device));
150
151    for (uint8_t i = 0; i < dev->common.channels.count; ++i) {
152        struct dma_channel **chan = &dev->common.channels.c[i];
153        err = ioat_dma_channel_init(dev, i, max_xfer_size,
154                                    (struct ioat_dma_channel **) chan);
155        if (err_is_fail(err)) {
156            /* TODO: cleanup! */
157            driverkit_iommu_munmap(&dev->complstatus);
158            return err;
159        }
160    }
161
162    if (dev->flags & IOAT_DMA_DEV_F_DCA) {
163        /*
164         * TODO: DCA initialization
165         * device->dca = ioat3_dca_init(pdev, device->reg_base);
166         */
167    }
168
169    err = ioat_dma_device_irq_setup(dev, DMA_IRQ_MSIX);
170    if (err_is_fail(err)) {
171        return err;
172    }
173
174    return SYS_ERR_OK;
175}
176
177/*
178 * ===========================================================================
179 * Library Internal Interface
180 * ===========================================================================
181 */
182
183void ioat_dma_device_get_complsts_addr(struct ioat_dma_device *dev,
184                                       struct dmem *mem)
185{
186    if (dev->common.state != DMA_DEV_ST_CHAN_ENUM) {
187        memset(mem, 0, sizeof(*mem));
188    }
189
190    assert(dev->complstatus.vbase);
191
192    *mem = dev->complstatus;
193    mem->size = IOAT_DMA_COMPLSTATUS_SIZE;
194    mem->devaddr += (IOAT_DMA_COMPLSTATUS_SIZE * dev->common.channels.next);
195    mem->mem = NULL_CAP;
196    mem->vbase += (IOAT_DMA_COMPLSTATUS_SIZE * dev->common.channels.next++);
197}
198
199#if IOAT_DEBUG_INTR_ENABLED
200///< flag indicating that the interrupt has happened for debugging purposes
201static uint32_t msix_intr_happened = 0;
202#include <dma/ioat/ioat_dma_request.h>
203
204static void ioat_dma_device_irq_handler(void* arg)
205{
206    errval_t err;
207    struct dma_device *dev = arg;
208
209    IOATDEV_DEBUG("############ MSIX INTERRUPT HAPPENED.\n", dev->id);
210
211#if IOAT_DEBUG_INTR_ENABLED
212    msix_intr_happened=1;
213#endif
214
215    err = ioat_dma_device_poll_channels(dev);
216    if (err_is_fail(err)) {
217        if (err_no(err) == DMA_ERR_DEVICE_IDLE) {
218            IOATDEV_DEBUG("WARNING: MSI-X interrupt on idle device\n", dev->id);
219            return;
220        }
221        USER_PANIC_ERR(err, "dma poll device returned an error\n");
222    }
223}
224#endif
225
226/**
227 * \brief gets the local apic ID from the CPU id
228 *
229 * \return local apic ID
230 */
231static inline uint8_t get_local_apic_id(void)
232{
233    uint32_t eax, ebx;
234
235    cpuid(1, &eax, &ebx, NULL, NULL);
236    return  ebx >> 24;
237}
238
239/**
240 * \brief globally enables the interrupts for the given device
241 *
242 * \param dev   IOAT DMA device
243 * \param type  the interrupt type to enable
244 */
245errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
246                                   dma_irq_t type)
247{
248    errval_t err;
249
250    return SYS_ERR_OK;
251
252    ioat_dma_intrctrl_t intcrtl = 0;
253    intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);
254
255    dev->common.irq_type = type;
256    switch (type) {
257        case DMA_IRQ_MSIX:
258            #if 0
259            /* The number of MSI-X vectors should equal the number of channels */
260            IOATDEV_DEBUG("MSI-X interrupt setup for device (%u, %u, %u)\n",
261                          dev->common.id, dev->pci_addr.bus, dev->pci_addr.device,
262                          dev->pci_addr.function);
263
264            err = pci_msix_enable_addr(&dev->pci_addr, &dev->irq_msix_count);
265            if (err_is_fail(err)) {
266                return err;
267            }
268
269            assert(dev->irq_msix_count > 0);
270
271            IOATDEV_DEBUG("MSI-X enabled #vecs=%d\n", dev->common.id,
272                          dev->irq_msix_count);
273
274            err = pci_setup_inthandler(ioat_dma_device_irq_handler, dev,
275                                       &dev->irq_msix_vector);
276            assert(err_is_ok(err));
277
278            uint8_t dest = get_local_apic_id();
279
280            IOATDEV_DEBUG("MSI-X routing to apic=%u\n", dev->common.id,
281                          dest);
282
283            err = pci_msix_vector_init_addr(&dev->pci_addr, 0, dest,
284                                            dev->irq_msix_vector);
285            assert(err_is_ok(err));
286
287            /* enable the interrupts */
288            intcrtl = ioat_dma_intrctrl_msix_vec_insert(intcrtl, 1);
289            intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);
290            #endif
291            assert(!"NYI");
292            ioat_dma_device_irq_handler(NULL);
293            break;
294        case DMA_IRQ_MSI:
295            IOATDEV_DEBUG("Initializing MSI interrupts \n", dev->common.id);
296            assert(!"NYI");
297            break;
298        case DMA_IRQ_INTX:
299            IOATDEV_DEBUG("Initializing INTx interrupts \n", dev->common.id);
300            assert(!"NYI");
301            break;
302        default:
303            /* disabled */
304            intcrtl = 0;
305            IOATDEV_DEBUG("Disabling interrupts \n", dev->common.id);
306            break;
307    }
308
309    ioat_dma_intrctrl_wr(&dev->device, intcrtl);
310
311
312#if IOAT_DEBUG_INTR_ENABLED
313    /*
314     * check if interrupts are working.
315     */
316    msix_intr_happened = 0;
317
318    struct ioat_dma_channel *chan;
319    chan = (struct ioat_dma_channel *)dev->common.channels.c[0];
320
321    ioat_dma_request_nop_chan(chan);
322    err = ioat_dma_channel_issue_pending(chan);
323    if (err_is_fail(err)) {
324        return err;
325    }
326
327    while(msix_intr_happened == 0) {
328        uint64_t status = ioat_dma_channel_get_status(chan);
329        err = event_dispatch_non_block(get_default_waitset());
330
331        if (!ioat_dma_channel_is_active(status) && !ioat_dma_channel_is_idle(status)) {
332            USER_PANIC("DMA request turned channel into erroneous state.")
333        }
334
335        switch(err_no(err)) {
336            case LIB_ERR_NO_EVENT:
337                thread_yield();
338                break;
339            case SYS_ERR_OK:
340                continue;
341            default:
342                USER_PANIC_ERR(err, "dispatching event");
343        }
344    }
345#endif
346
347    return SYS_ERR_OK;
348}
349
350/*
351 * ===========================================================================
352 * Public Interface
353 * ===========================================================================
354 */
355
356/*
357 * ----------------------------------------------------------------------------
358 * device initialization / termination
359 * ----------------------------------------------------------------------------
360 */
361
362/**
363 * \brief initializes a IOAT DMA device with the giving capability
364 *
365 * \param mmio     capability representing the device's MMIO registers
366 * \param pci_addr the PCI address of this device
367 * \param dev      returns a pointer to the device structure
368 *
369 * \returns SYS_ERR_OK on success
370 *          errval on error
371 */
372errval_t ioat_dma_device_init(struct capref mmio,
373                              struct iommu_client *cl,
374                              struct ioat_dma_device **dev)
375{
376    errval_t err;
377
378    struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device));
379    if (ioat_device == NULL) {
380        return LIB_ERR_MALLOC_FAIL;
381    }
382
383#if DMA_BENCH_ENABLED
384     bench_init();
385#endif
386
387    struct dma_device *dma_dev = &ioat_device->common;
388
389    struct frame_identity mmio_id;
390    err = frame_identify(mmio, &mmio_id);
391    if (err_is_fail(err)) {
392        free(ioat_device);
393        return err;
394    }
395
396    dma_dev->id = device_id++;
397    dma_dev->mmio.paddr = mmio_id.base;
398    dma_dev->mmio.bytes = mmio_id.bytes;
399    dma_dev->mmio.frame = mmio;
400    dma_dev->iommu = cl;
401
402    IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%zu kB}\n",
403                  dma_dev->id, mmio_id.base, mmio_id.bytes / 1024);
404
405    err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr,
406                                    dma_dev->mmio.bytes, dma_dev->mmio.frame,
407                                    VREGION_FLAGS_READ_WRITE_NOCACHE,
408                                    NULL, NULL);
409    if (err_is_fail(err)) {
410        free(ioat_device);
411        return err;
412    }
413
414    ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr);
415
416    ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device);
417
418    IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n",
419                  dma_dev->id, dma_dev->mmio.vaddr,
420                  ioat_dma_cbver_major_extract(ioat_device->version),
421                  ioat_dma_cbver_minor_extract(ioat_device->version));
422
423    switch (ioat_dma_cbver_major_extract(ioat_device->version)) {
424        case ioat_dma_cbver_1x:
425            err = device_init_ioat_v1(ioat_device);
426            break;
427        case ioat_dma_cbver_2x:
428            err = device_init_ioat_v2(ioat_device);
429            break;
430        case ioat_dma_cbver_3x:
431            err = device_init_ioat_v3(ioat_device);
432            break;
433        default:
434            err = DMA_ERR_DEVICE_UNSUPPORTED;
435    }
436
437    if (err_is_fail(err)) {
438        vspace_unmap((void*) dma_dev->mmio.vaddr);
439        free(ioat_device);
440        return err;
441    }
442
443    dma_dev->f.deregister_memory = NULL;
444    dma_dev->f.register_memory = NULL;
445    dma_dev->f.poll = ioat_dma_device_poll_channels;
446
447    *dev = ioat_device;
448
449    return err;
450}
451
452/**
453 * \brief terminates the device operation and frees up the allocated resources
454 *
455 * \param dev IOAT DMA device to shutdown
456 *
457 * \returns SYS_ERR_OK on success
458 *          errval on error
459 */
460errval_t ioat_dma_device_shutdown(struct ioat_dma_device *dev)
461{
462    assert(!"NYI");
463    return SYS_ERR_OK;
464}
465
466/**
467 * \brief requests access to a IOAT DMA device from the IOAT device manager
468 *        and initializes the device.
469 *
470 * \param dev  returns a pointer to the device structure
471 *
472 * \returns SYS_ERR_OK on success
473 *          errval on error
474 */
475errval_t ioat_dma_device_acquire(struct ioat_dma_device **dev)
476{
477    errval_t err;
478
479    struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device));
480    if (ioat_device == NULL) {
481        return LIB_ERR_MALLOC_FAIL;
482    }
483    assert(!"NYI");
484    err = SYS_ERR_OK;
485    return err;
486}
487
488/**
489 * \brief terminates the device operation and frees up the allocated resources
490 *        and releases the device and returns it to the IOAT device manager.
491 *
492 * \param dev IOAT DMA device to be released
493 *
494 * \returns SYS_ERR_OK on success
495 *          errval on error
496 */
497errval_t ioat_dma_device_release(struct ioat_dma_device *dev)
498{
499    assert(!"NYI");
500    return SYS_ERR_OK;
501}
502
503/*
504 * ----------------------------------------------------------------------------
505 * Interrupt management
506 * ----------------------------------------------------------------------------
507 */
508
509/**
510 * \brief enables the interrupts for the device
511 *
512 * \param dev   IOAT DMA device
513 * \param type  interrupt type
514 * \param fn    interrupt handler function
515 * \param arg   argument supplied to the handler function
516 */
517errval_t ioat_dma_device_intr_enable(struct ioat_dma_device *dev,
518                                     dma_irq_t type,
519                                     dma_irq_fn_t fn,
520                                     void *arg)
521{
522    assert(!"NYI");
523    return SYS_ERR_OK;
524}
525
526/**
527 * \brief disables the interrupts for the device
528 *
529 * \param dev   IOAT DMA device
530 */
531void ioat_dma_device_intr_disable(struct ioat_dma_device *dev)
532{
533    assert(!"NYI");
534}
535
536/**
537 * \brief sets the interrupt delay for the device
538 *
539 * \param dev   IOAT DMA device
540 * \param usec  interrupt delay in microseconds
541 */
542void ioat_dma_device_set_intr_delay(struct ioat_dma_device *dev,
543                                    uint16_t usec)
544{
545    ioat_dma_intrdelay_delay_us_wrf(&dev->device, usec);
546}
547
548/*
549 * ----------------------------------------------------------------------------
550 * Device Operation Functions
551 * ----------------------------------------------------------------------------
552 */
553
554/**
555 * \brief polls the channels of the IOAT DMA device
556 *
557 * \param dev   IOAT DMA device
558 *
559 * \returns SYS_ERR_OK on success
560 *          DMA_ERR_DEVICE_IDLE if there is nothing completed on the channels
561 *          errval on error
562 */
563errval_t ioat_dma_device_poll_channels(struct dma_device *dev)
564{
565    errval_t err;
566
567    uint8_t idle = 0x1;
568
569    for (uint8_t i = 0; i < dev->channels.count; ++i) {
570        err = ioat_dma_channel_poll(dev->channels.c[i]);
571        switch (err_no(err)) {
572            case DMA_ERR_CHAN_IDLE:
573                break;
574            case SYS_ERR_OK:
575                idle = 0;
576                break;
577            default:
578                return err;
579        }
580    }
581
582    if (idle) {
583        return DMA_ERR_DEVICE_IDLE;
584    }
585
586    return SYS_ERR_OK;
587}
588
589