1/*
2 * Copyright (c) 2014, ETH Zurich. All rights reserved.
3 *
4 * This file is distributed under the terms in the attached LICENSE file.
5 * If you do not find this file, copies can be found by writing to:
6 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
7 */
8
9#include <barrelfish/barrelfish.h>
10
11#include <dev/ioat_dma_chan_dev.h>
12
13#include <dma_mem_utils.h>
14
15#include <dma_ring_internal.h>
16#include <ioat/ioat_dma_internal.h>
17#include <ioat/ioat_dma_device_internal.h>
18#include <ioat/ioat_dma_channel_internal.h>
19#include <ioat/ioat_dma_descriptors_internal.h>
20#include <ioat/ioat_dma_request_internal.h>
21
22#include <debug.h>
23
24struct ioat_dma_channel
25{
26    struct dma_channel common;
27
28    ioat_dma_chan_t channel;         ///< Mackerel address
29
30    lpaddr_t last_completion;        ///<
31    struct dma_mem completion;
32    struct dma_ring *ring;          ///< Descriptor ring
33    uint64_t status;                 ///< channel status
34};
35
36/**
37 * \brief sets the star of the descriptor chain address of the DMA channel
38 *
39 * \param chan  IOAT DMA channel
40 */
41static inline void channel_set_chain_addr(struct ioat_dma_channel *chan)
42{
43    lpaddr_t chain_addr = dma_ring_get_chain_addr(chan->ring);
44
45    IOATCHAN_DEBUG("setting chain addr to [%016lx]\n", chan->common.id,
46                   chain_addr);
47
48    ioat_dma_chan_chainaddr_lo_wr(&chan->channel, (uint32_t) chain_addr);
49    ioat_dma_chan_chainaddr_hi_wr(&chan->channel, chain_addr >> 32);
50}
51
52/**
53 * \brief reads the channel status and returns the physical address of the last
54 *        completed descriptor
55 *
56 * \param chan IOAT DMA channel
57 *
58 * \returns physical address of last descriptor
59 */
60static inline lpaddr_t channel_get_completion_addr(struct ioat_dma_channel *chan)
61{
62    lpaddr_t compl_addr = *((lpaddr_t*) chan->completion.vaddr);
63
64    return (compl_addr & (~ioat_dma_chan_status_mask));
65}
66
67/**
68 * \brief checks if the channel has completed descriptors which can be processed
69 *        and returns the physical address of the last one.
70 *
71 * \param chan IOAT DMA channel
72 *
73 * \returns physical address of last descriptor
74 *          0 if there were no new descriptors to process
75 */
76static inline lpaddr_t channel_has_completed_descr(struct ioat_dma_channel *chan)
77{
78    lpaddr_t curr_compl = channel_get_completion_addr(chan);
79    if (curr_compl != chan->last_completion) {
80        return curr_compl;
81    } else {
82        return 0;
83    }
84}
85
86/**
87 * \brief processes the completed descriptors of a DMA channel and finishes
88 *        the requests
89 *
90 * \param chan             IAOT DMA channel
91 * \param compl_addr_phyis physical address of the last completed descriptor
92 *
93 * \returns SYS_ERR_OK on if the request was processed to completion
94 *          DMA_ERR_CHAN_IDLE if there was no descriptor to process
95 *          DMA_ERR_REQUEST_UNFINISHED if the request is still not finished
96 *          errval on error
97 */
98static errval_t channel_process_descriptors(struct ioat_dma_channel *chan,
99                                            lpaddr_t compl_addr_phys)
100{
101    errval_t err;
102
103    if (!compl_addr_phys) {
104        return DMA_ERR_CHAN_IDLE;
105    }
106
107    IOATCHAN_DEBUG("processing [%016lx] wrnxt: %u, tail: %u, issued: %u\n",
108                   chan->common.id, compl_addr_phys,
109                   dma_ring_get_write_next(chan->ring), dma_ring_get_tail(chan->ring),
110                   dma_ring_get_issued(chan->ring));
111
112    uint16_t active_count = dma_ring_get_active(chan->ring);
113
114    struct dma_descriptor *desc;
115    struct dma_request *req;
116    struct dma_request *req_head;
117
118    uint16_t processed = 0;
119    uint8_t request_done = 0;
120
121    for (uint16_t i = 0; i < active_count; i++) {
122        desc = dma_ring_get_tail_desc(chan->ring);
123
124        /*
125         * check if there is a request associated with the descriptor
126         * this indicates the last descriptor of a request
127         */
128        req = dma_desc_get_request(desc);
129        if (req) {
130            req_head = dma_channel_deq_request_head(&chan->common);
131            assert(req_head == req);
132            err = ioat_dma_request_process((struct ioat_dma_request *) req);
133            if (err_is_fail(err)) {
134                dma_channel_enq_request_head(&chan->common, req_head);
135                return err;
136            }
137            request_done = 1;
138        }
139
140        /* this was the last completed descriptor */
141        if (dma_desc_get_paddr(desc) == compl_addr_phys) {
142            processed = i;
143            break;
144        }
145    }
146
147    chan->last_completion = compl_addr_phys;
148
149    /* do a 5us delay per pending descriptor */
150    ioat_dma_device_set_intr_delay((struct ioat_dma_device *) chan->common.device,
151                                   (5 * active_count - processed));
152
153    if (request_done) {
154        return SYS_ERR_OK;
155    }
156
157    return DMA_ERR_REQUEST_UNFINISHED;
158}
159
160/*
161 * ============================================================================
162 * Library Internal Interface
163 * ============================================================================
164 */
165
166/**
167 * \brief initializes and allocates resources for a new channel DMA channel
168 *        belonging to a device
169 *
170 * \param dev       IOAT DMA device
171 * \param id        id of this channel
172 * \param max_xfer  maximum size in bytes for a transfer
173 * \param ret_chan  returned channel pointer
174 *
175 * \returns SYS_ERR_OK on success
176 */
177errval_t ioat_dma_channel_init(struct ioat_dma_device *dev,
178                               uint8_t id,
179                               uint32_t max_xfer,
180                               struct ioat_dma_channel **ret_chan)
181{
182
183    errval_t err;
184
185    struct ioat_dma_channel *chan = calloc(1, sizeof(*chan));
186    if (chan == NULL) {
187        return LIB_ERR_MALLOC_FAIL;
188    }
189
190    struct dma_device *dma_dev = (struct dma_device *) dev;
191    struct dma_channel *dma_chan = &chan->common;
192
193    dma_chan->id = dma_channel_id_build(dma_device_get_id(dma_dev), id);
194    dma_chan->device = dma_dev;
195    dma_chan->max_xfer_size = max_xfer;
196
197    IOATCHAN_DEBUG("initialize channel with  max. xfer size of %u bytes\n",
198                   dma_chan->id, max_xfer);
199
200    mackerel_addr_t chan_base = dma_device_get_mmio_vbase(dma_dev);
201    ioat_dma_chan_initialize(&chan->channel, chan_base + ((id + 1) * 0x80));
202
203    ioat_dma_chan_dcactrl_target_cpu_wrf(&chan->channel,
204                                         ioat_dma_chan_dca_ctr_target_any);
205
206    err = ioat_dma_channel_reset(chan);
207    if (err_is_fail(err)) {
208        return err;
209    }
210
211    ioat_dma_device_get_complsts_addr(dev, &chan->completion);
212
213    /* write the completion address */
214    ioat_dma_chan_cmpl_lo_wr(&chan->channel, chan->completion.paddr);
215    ioat_dma_chan_cmpl_hi_wr(&chan->channel, chan->completion.paddr >> 32);
216
217    err = dma_ring_alloc(IOAT_DMA_DESC_RING_SIZE, IOAT_DMA_DESC_ALIGN,
218                         IOAT_DMA_DESC_SIZE, 0x0, dma_chan, &chan->ring);
219    if (err_is_fail(err)) {
220        dma_mem_free(&chan->completion);
221        return err;
222    }
223
224    /* we have to do the hardware linkage */
225    struct dma_descriptor *dcurr, *dnext;
226    for (uint32_t i = 0; i < (1 << IOAT_DMA_DESC_RING_SIZE); ++i) {
227        dcurr = dma_ring_get_desc(chan->ring, i);
228        dnext = dma_desc_get_next(dcurr);
229        assert(dnext);
230        ioat_dma_desc_next_insert(dma_desc_get_desc_handle(dcurr),
231                                  dma_desc_get_paddr(dnext));
232    }
233
234    ioat_dma_chan_ctrl_t chan_ctrl = 0;
235    chan_ctrl = ioat_dma_chan_ctrl_err_abort_insert(chan_ctrl, 0x1);
236    chan_ctrl = ioat_dma_chan_ctrl_err_cmp_en_insert(chan_ctrl, 0x1);
237    chan_ctrl = ioat_dma_chan_ctrl_err_int_en_insert(chan_ctrl, 0x1);
238    chan_ctrl = ioat_dma_chan_ctrl_intp_dis_insert(chan_ctrl, 0x1);
239    ioat_dma_chan_ctrl_wr(&chan->channel, chan_ctrl);
240
241    dma_chan->state = DMA_CHAN_ST_PREPARED;
242    dma_chan->f.memcpy = ioat_dma_request_memcpy_chan;
243    dma_chan->f.memset = ioat_dma_request_memset_chan;
244    dma_chan->f.poll = ioat_dma_channel_poll;
245
246    *ret_chan = chan;
247
248    /*
249     * do a check if the channel operates correctly by issuing a NOP
250     */
251    IOATCHAN_DEBUG("performing selftest on channel with NOP\n", dma_chan->id);
252
253    ioat_dma_request_nop_chan(chan);
254    err = ioat_dma_channel_issue_pending(chan);
255    if (err_is_fail(err)) {
256        dma_mem_free(&chan->completion);
257        return err;
258    }
259
260    uint32_t j = 0xFFFF;
261    uint64_t status;
262    do {
263        status = ioat_dma_channel_get_status(chan);
264        thread_yield();
265    } while (j-- && !ioat_dma_channel_is_active(status)
266             && !ioat_dma_channel_is_idle(status));
267
268    if (ioat_dma_channel_is_active(status) || ioat_dma_channel_is_idle(status)) {
269        IOATCHAN_DEBUG("channel worked properly: %016lx\n", dma_chan->id,
270                       *(uint64_t* ) chan->completion.vaddr);
271        return SYS_ERR_OK;
272    } else {
273        IOATCHAN_DEBUG(" channel error ERROR: %08x\n", dma_chan->id,
274                       ioat_dma_chan_err_rd(&chan->channel));
275        dma_mem_free(&chan->completion);
276        free(chan);
277        *ret_chan = NULL;
278        return DMA_ERR_CHAN_ERROR;
279    }
280
281    return SYS_ERR_OK;
282}
283
284/**
285 * \brief Submits the pending descriptors to the hardware queue
286 *
287 * \param chan  IOAT DMA channel
288 *
289 * \returns number of submitted descriptors
290 */
291uint16_t ioat_dma_channel_issue_pending(struct ioat_dma_channel *chan)
292{
293    errval_t err;
294
295    uint16_t pending = dma_ring_get_pendig(chan->ring);
296
297    IOATCHAN_DEBUG("issuing %u pending descriptors to hardware\n",
298                   chan->common.id, pending);
299
300    if (chan->common.state != DMA_CHAN_ST_RUNNING) {
301        err = ioat_dma_channel_start(chan);
302    }
303    if (pending > 0) {
304        uint16_t dmacnt = dma_ring_submit_pending(chan->ring);
305        ioat_dma_chan_dmacount_wr(&chan->channel, dmacnt);
306
307        IOATCHAN_DEBUG(" setting dma_count to [%u]\n", chan->common.id, dmacnt);
308    }
309
310    return pending;
311}
312
313/*
314 * ============================================================================
315 * Public Interface
316 * ============================================================================
317 */
318
319/*
320 * ----------------------------------------------------------------------------
321 * Channel State Management
322 * ----------------------------------------------------------------------------
323 */
324
325/**
326 * \brief Resets a IOAT DMA channel
327 *
328 * \param chan  IOAT DMA channel to be reset
329 *
330 * \returns SYS_ERR_OK on success
331 *          DMA_ERR_CHAN_RESET on reset timeout
332 */
333errval_t ioat_dma_channel_reset(struct ioat_dma_channel *chan)
334{
335    struct dma_channel *dma_chan = &chan->common;
336
337    IOATCHAN_DEBUG("reset channel.\n", dma_chan->id);
338
339    if (dma_chan->state == DMA_CHAN_ST_ERROR) {
340        ioat_dma_chan_err_t chanerr = ioat_dma_chan_err_rd(&chan->channel);
341        ioat_dma_chan_err_wr(&chan->channel, chanerr);
342        IOATCHAN_DEBUG("Reseting channel from error state: [%08x]\n",
343                       dma_chan->id, chanerr);
344
345        /*
346         * errval_t pci_read_conf_header(uint32_t dword, uint32_t *val);
347
348         errval_t pci_write_conf_header(uint32_t dword, uint32_t val);
349         * TODO: clear the ioat_dma_pci_chanerr register in PCI config space
350         *       (same approach as above)
351         *       -> How to access this ?
352         */
353    }
354    dma_chan->state = DMA_CHAN_ST_RESETTING;
355
356    /* perform reset */
357    ioat_dma_chan_cmd_reset_wrf(&chan->channel, 0x1);
358
359    uint16_t reset_counter = 0xFFF;
360    do {
361        if (!ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
362            break;
363        }
364        thread_yield();
365    } while (reset_counter--);
366
367    if (ioat_dma_chan_cmd_reset_rdf(&chan->channel)) {
368        /* reset failed */
369        return DMA_ERR_RESET_TIMEOUT;
370    }
371
372    /* XXX: Intel BD architecture will need some additional work here */
373
374    dma_chan->state = DMA_CHAN_ST_UNINITIALEZED;
375
376    return SYS_ERR_OK;
377}
378
379/**
380 * \brief restarts a IOAT DMA channel this updates the chain address register
381 *        and the DMA count register.
382 *
383 * \param chan  IOAT DMA channel
384 *
385 * \returns SYS_ERR_OK on success
386 *          DMA_ERR_* on failure
387 */
388errval_t ioat_dma_channel_restart(struct ioat_dma_channel *chan)
389{
390    assert(!"NYI");
391    return SYS_ERR_OK;
392}
393
394/**
395 * \brief starts a IOAT DMA channel. This sets the chain address to the first
396 *        entry of the ring and the DMA count to zero.
397 *
398 * \param chan  IOAT DMA channel
399 *
400 * \returns SYS_ERR_OK on success
401 *          DMA_ERR_* on failure
402 */
403errval_t ioat_dma_channel_start(struct ioat_dma_channel *chan)
404{
405    if (chan->common.state == DMA_CHAN_ST_ERROR) {
406        return ioat_dma_channel_restart(chan);
407    }
408
409    if (chan->common.state == DMA_CHAN_ST_RUNNING) {
410        return SYS_ERR_OK;
411    }
412
413    IOATCHAN_DEBUG("starting channel.\n", chan->common.id);
414
415    chan->common.state = DMA_CHAN_ST_RUNNING;
416    channel_set_chain_addr(chan);
417
418    return SYS_ERR_OK;
419}
420
421/**
422 * \brief stopps the processing of the descriptors.
423 *
424 * \param chan  IOAT DMA channel
425 *
426 * \returns SYS_ERR_OK on success
427 *          DMA_ERR_* on failure
428 */
429errval_t ioat_dma_channel_stop(struct ioat_dma_channel *chan)
430{
431    assert(!"NYI");
432    return SYS_ERR_OK;
433}
434
435/**
436 * \brief Puts the IOAT DMA channel into the suspended state
437 *
438 * \param chan  IOAT DMA channel
439 *
440 * \returns SYS_ERR_OK on success
441 *          DMA_ERR_* on failure
442 */
443errval_t ioat_dma_channel_suspend(struct ioat_dma_channel *chan)
444{
445    assert(!"NYI");
446    return SYS_ERR_OK;
447}
448
449/**
450 * \brief enqueues a request onto the IOAT DMA channel and submits it to the
451 *        hardware
452 *
453 * \param chan  IOAT DMA channel
454 * \param req   IOAT DMA request to be submitted
455 *
456 * \returns SYS_ERR_OK on success
457 *          DMA_ERR_* on failure
458 */
459errval_t ioat_dma_channel_submit_request(struct ioat_dma_channel *chan,
460                                         struct ioat_dma_request *req)
461{
462    IOATCHAN_DEBUG("submit request [%016lx]\n", chan->common.id,
463                   dma_request_get_id((struct dma_request * )req));
464
465    dma_channel_enq_request_tail(&chan->common, (struct dma_request *) req);
466
467    ioat_dma_channel_issue_pending(chan);
468
469    return SYS_ERR_OK;
470}
471
472/**
473 * \brief polls the IOAT DMA channel for completed events
474 *
475 * \param chan  IOAT DMA channel
476 *
477 * \returns SYS_ERR_OK if there was something processed
478 *          DMA_ERR_CHAN_IDLE if there was no request on the channel
479 *          DMA_ERR_REQUEST_UNFINISHED if the request has not been completed yet
480 *
481 */
482errval_t ioat_dma_channel_poll(struct dma_channel *chan)
483{
484    errval_t err;
485
486    struct ioat_dma_channel *ioat_chan = (struct ioat_dma_channel *) chan;
487
488    uint64_t status = ioat_dma_channel_get_status(ioat_chan);
489
490    if (ioat_dma_channel_is_halted(status)) {
491        IOATCHAN_DEBUG("channel is in error state\n", chan->id);
492        char buf[512];
493        ioat_dma_chan_err_pr(buf, 512, &ioat_chan->channel);
494        printf("channel error: %s\n", buf);
495        assert(!"NYI: error event handling");
496    }
497
498    /* check if there can be something to process */
499    if (chan->req_list.head == NULL) {
500        return DMA_ERR_CHAN_IDLE;
501    }
502
503    lpaddr_t compl_addr_phys = channel_has_completed_descr(ioat_chan);
504    if (!compl_addr_phys) {
505        return DMA_ERR_CHAN_IDLE;
506    }
507
508    err = channel_process_descriptors(ioat_chan, compl_addr_phys);
509    switch (err_no(err)) {
510        case SYS_ERR_OK:
511            /* this means we processed a descriptor request */
512            return SYS_ERR_OK;
513        case DMA_ERR_REQUEST_UNFINISHED:
514            return DMA_ERR_CHAN_IDLE;
515        default:
516            return err;
517    }
518}
519
520/*
521 * ----------------------------------------------------------------------------
522 * Getter / Setter Functions
523 * ----------------------------------------------------------------------------
524 */
525
526/**
527 * \brief returns the associated IOAT DMA descriptor ring of a channel
528 *
529 * \param chan  IOAT DMA channel
530 *
531 * \returns IOAT DMA descriptor ring handle
532 */
533inline struct dma_ring *ioat_dma_channel_get_ring(struct ioat_dma_channel *chan)
534{
535    return chan->ring;
536}
537
538/**
539 * \brief updates the channel status flag by reading the CHANSTS register
540 *
541 * \param chan IOAT DMA channel
542 */
543inline uint64_t ioat_dma_channel_get_status(struct ioat_dma_channel *chan)
544{
545    uint32_t status_lo = ioat_dma_chan_sts_lo_rd(&chan->channel);
546    chan->status = ioat_dma_chan_sts_hi_rd(&chan->channel);
547    chan->status <<= 32;
548    chan->status |= status_lo;
549
550    return chan->status;
551}
552
553