1/*
2 * Copyright (c) 2014, ETH Zurich. All rights reserved.
3 *
4 * This file is distributed under the terms in the attached LICENSE file.
5 * If you do not find this file, copies can be found by writing to:
6 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
7 */
8
9#include<barrelfish/barrelfish.h>
10
11#include <ioat/ioat_dma_internal.h>
12#include <dma_ring_internal.h>
13#include <ioat/ioat_dma_device_internal.h>
14#include <ioat/ioat_dma_channel_internal.h>
15#include <ioat/ioat_dma_request_internal.h>
16#include <ioat/ioat_dma_descriptors_internal.h>
17
18#include <debug.h>
19
20/**
21 * represens the IOAT specific DMA requests
22 */
23struct ioat_dma_request
24{
25    struct dma_request common;
26    struct dma_descriptor *desc_head;
27    struct dma_descriptor *desc_tail;
28};
29
30/*
31 * ---------------------------------------------------------------------------
32 * Request Management
33 * ---------------------------------------------------------------------------
34 */
35
36/// caches allocated requests which are no longer used
37static struct dma_request *req_free_list = NULL;
38
39/**
40 * \brief allocates a IOAT DMA request structure
41 *
42 * \returns IOAT DMA request
43 *          NULL on failure
44 */
45static struct ioat_dma_request *request_alloc(void)
46{
47    struct ioat_dma_request *ret;
48
49    if (req_free_list) {
50        ret = (struct ioat_dma_request *) req_free_list;
51        req_free_list = ret->common.next;
52
53        DMAREQ_DEBUG("meta: reusing request %p. freelist:%p\n", ret, req_free_list);
54
55        return ret;
56    }
57    return calloc(1, sizeof(*ret));
58}
59
60/**
61 * \brief frees up the used DMA request structure
62 *
63 * \param req   DMA request to be freed
64 */
65static void request_free(struct ioat_dma_request *req)
66{
67    DMAREQ_DEBUG("meta: freeing request %p.\n", req);
68    req->desc_head = NULL;
69    req->desc_tail = NULL;
70    req->common.next = req_free_list;
71    req_free_list = &req->common;
72}
73
74/*
75 * ---------------------------------------------------------------------------
76 * Helper Functions
77 * ---------------------------------------------------------------------------
78 */
79
80inline static uint32_t req_num_desc_needed(struct ioat_dma_channel *chan,
81                                           size_t bytes)
82{
83    struct dma_channel *dma_chan = (struct dma_channel *) chan;
84    uint32_t max_xfer_size = dma_channel_get_max_xfer_size(dma_chan);
85    bytes += (max_xfer_size - 1);
86    return (uint32_t) (bytes / max_xfer_size);
87}
88
89/*
90 * ===========================================================================
91 * Library Internal Interface
92 * ===========================================================================
93 */
94
95/**
96 * \brief handles the processing of completed DMA requests
97 *
98 * \param req   the DMA request to process
99 *
100 * \returns SYS_ERR_OK on sucess
101 *          errval on failure
102 */
103errval_t ioat_dma_request_process(struct ioat_dma_request *req)
104{
105    errval_t err;
106
107    req->common.state = DMA_REQ_ST_DONE;
108
109    err = dma_request_process(&req->common);
110    if (err_is_fail(err)) {
111        return err;
112    }
113
114    request_free(req);
115
116    return SYS_ERR_OK;
117}
118
119/*
120 * ===========================================================================
121 * Public Interface
122 * ===========================================================================
123 */
124
125/**
126 * \brief issues a memcpy request to the given channel
127 *
128 * \param chan  IOAT DMA channel
129 * \param setup request setup information
130 * \param id    returns the generated request id
131 *
132 * \returns SYS_ERR_OK on success
133 *          errval on failure
134 */
135errval_t ioat_dma_request_memcpy_chan(struct dma_channel *chan,
136                                      struct dma_req_setup *setup,
137                                      dma_req_id_t *id)
138{
139    assert(chan->device->type == DMA_DEV_TYPE_IOAT);
140
141    struct ioat_dma_channel *ioat_chan = (struct ioat_dma_channel *) chan;
142
143    uint32_t num_desc = req_num_desc_needed(ioat_chan, setup->args.memcpy.bytes);
144
145    IOATREQ_DEBUG("DMA Memcpy request: [0x%016lx]->[0x%016lx] of %lu bytes (%u desc)\n",
146                  setup->args.memcpy.src, setup->args.memcpy.dst,
147                  setup->args.memcpy.bytes, num_desc);
148
149    struct dma_ring *ring = ioat_dma_channel_get_ring(ioat_chan);
150
151    if (num_desc > dma_ring_get_space(ring)) {
152        IOATREQ_DEBUG("Too less space in ring: %u / %u\n", num_desc,
153                      dma_ring_get_space(ring));
154        return DMA_ERR_NO_DESCRIPTORS;
155    }
156
157    struct ioat_dma_request *req = request_alloc();
158    if (req == NULL) {
159        IOATREQ_DEBUG("No request descriptors for holding request data\n");
160        return DMA_ERR_NO_REQUESTS;
161    }
162
163    dma_request_common_init(&req->common, chan, setup->type);
164
165    ioat_dma_desc_ctrl_array_t ctrl = {
166        0
167    };
168
169    struct dma_descriptor *desc;
170    size_t length = setup->args.memcpy.bytes;
171    lpaddr_t src = setup->args.memcpy.src;
172    lpaddr_t dst = setup->args.memcpy.dst;
173    size_t bytes, max_xfer_size = dma_channel_get_max_xfer_size(chan);
174    do {
175        desc = dma_ring_get_next_desc(ring);
176
177        if (!req->desc_head) {
178            req->desc_head = desc;
179        }
180        if (length <= max_xfer_size) {
181            /* the last one */
182            bytes = length;
183            req->desc_tail = desc;
184
185            ioat_dma_desc_ctrl_fence_insert(ctrl, setup->args.memcpy.ctrl_fence);
186            ioat_dma_desc_ctrl_int_en_insert(ctrl, setup->args.memcpy.ctrl_intr);\
187            ioat_dma_desc_ctrl_compl_write_insert(ctrl, 0x1);
188        } else {
189            bytes = max_xfer_size;
190        }
191
192        ioat_dma_desc_fill_memcpy(desc, src, dst, bytes, ctrl);
193        dma_desc_set_request(desc, NULL);
194
195        length -= bytes;
196        src += bytes;
197        dst += bytes;
198    } while (length > 0);
199
200    req->common.setup = *setup;
201
202    if (id) {
203        *id = req->common.id;
204    }
205    /* set the request pointer in the last descriptor */
206    dma_desc_set_request(desc, &req->common);
207
208    assert(req->desc_tail);
209    assert(dma_desc_get_request(req->desc_tail));
210
211    return ioat_dma_channel_submit_request(ioat_chan, req);
212}
213
214/**
215 * \brief issues a memcpy request to a channel of the given device
216 *
217 * \param dev   IOAT DMA device
218 * \param setup request setup information
219 * \param id    returns the generated request id
220 *
221 * \returns SYS_ERR_OK on success
222 *          errval on failure
223 */
224errval_t ioat_dma_request_memcpy(struct dma_device *dev,
225                                 struct dma_req_setup *setup,
226                                 dma_req_id_t *id)
227{
228    struct dma_channel *chan = dma_device_get_channel(dev);
229    return ioat_dma_request_memcpy_chan(chan, setup, id);
230}
231
232/**
233 * \brief issues a memcpy request to the given channel
234 *
235 * \param chan  IOAT DMA channel
236 * \param setup request setup information
237 * \param id    returns the generated request id
238 *
239 * \returns SYS_ERR_OK on success
240 *          errval on failure
241 */
242errval_t ioat_dma_request_memset_chan(struct dma_channel *chan,
243                                      struct dma_req_setup *setup,
244                                      dma_req_id_t *id)
245{
246    assert(chan->device->type == DMA_DEV_TYPE_IOAT);
247
248    struct ioat_dma_channel *ioat_chan = (struct ioat_dma_channel *) chan;
249
250    uint32_t num_desc = req_num_desc_needed(ioat_chan, setup->args.memset.bytes);
251
252    IOATREQ_DEBUG("DMA Memset request: [0x%016lx]->[0x%016lx] of %lu bytes (%u desc)\n",
253                  setup->args.memset.src, setup->args.memset.dst,
254                  setup->args.memset.bytes, num_desc);
255
256    struct dma_ring *ring = ioat_dma_channel_get_ring(ioat_chan);
257
258    if (num_desc > dma_ring_get_space(ring)) {
259        IOATREQ_DEBUG("Too less space in ring: %u / %u\n", num_desc,
260                      dma_ring_get_space(ring));
261        return DMA_ERR_NO_DESCRIPTORS;
262    }
263
264    struct ioat_dma_request *req = request_alloc();
265    if (req == NULL) {
266        IOATREQ_DEBUG("No request descriptors for holding request data\n");
267        return DMA_ERR_NO_REQUESTS;
268    }
269
270    dma_request_common_init(&req->common, chan, setup->type);
271
272    ioat_dma_desc_ctrl_array_t ctrl = {
273        0
274    };
275
276    struct dma_descriptor *desc;
277    size_t length = setup->args.memset.bytes;
278    lpaddr_t src_data = setup->args.memset.val;
279    lpaddr_t dst = setup->args.memset.dst;
280    size_t bytes, max_xfer_size = dma_channel_get_max_xfer_size(chan);
281    do {
282        desc = dma_ring_get_next_desc(ring);
283
284        if (!req->desc_head) {
285            req->desc_head = desc;
286        }
287        if (length <= max_xfer_size) {
288            /* the last one */
289            bytes = length;
290            req->desc_tail = desc;
291
292            ioat_dma_desc_ctrl_fence_insert(ctrl, setup->args.memset.ctrl_fence);
293            ioat_dma_desc_ctrl_int_en_insert(ctrl, setup->args.memset.ctrl_intr);
294            ioat_dma_desc_ctrl_compl_write_insert(ctrl, 0x1);
295        } else {
296            bytes = max_xfer_size;
297        }
298
299        ioat_dma_desc_fill_memset(desc, src_data, dst, bytes, ctrl);
300        dma_desc_set_request(desc, NULL);
301
302        length -= bytes;
303        dst += bytes;
304    } while (length > 0);
305
306    req->common.setup = *setup;
307
308    if (id) {
309        *id = req->common.id;
310    }
311    /* set the request pointer in the last descriptor */
312    dma_desc_set_request(desc, &req->common);
313
314    assert(req->desc_tail);
315    assert(dma_desc_get_request(req->desc_tail));
316
317    return ioat_dma_channel_submit_request(ioat_chan, req);
318}
319
320/**
321 * \brief issues a memset request to a channel of the given device
322 *
323 * \param dev   IOAT DMA device
324 * \param setup request setup information
325 * \param id    returns the generated request id
326 *
327 * \returns SYS_ERR_OK on success
328 *          errval on failure
329 */
330errval_t ioat_dma_request_memset(struct dma_device *dev,
331                                 struct dma_req_setup *setup,
332                                 dma_req_id_t *id)
333{
334    struct dma_channel *chan = dma_device_get_channel(dev);
335    return ioat_dma_request_memset_chan(chan, setup, id);
336}
337
338/**
339 * \brief issues a NOP / NULL descriptor request on the given channel
340 *
341 * \param chan  IOAT DMA channel
342 * \param setup request setup information
343 *
344 * \returns SYS_ERR_OK on success
345 *          errval on failure
346 */
347void ioat_dma_request_nop_chan(struct ioat_dma_channel *chan)
348{
349
350    struct dma_ring *ring = ioat_dma_channel_get_ring(chan);
351    assert(ring);
352
353    struct dma_descriptor *desc = dma_ring_get_next_desc(ring);
354    assert(desc);
355    IOATREQ_DEBUG("New DMA NOP request: descriptor=%p\n", desc);
356
357    ioat_dma_desc_fill_nop(desc);
358}
359
360/**
361 * \brief issues a NOP / NULL descriptor request on the given device
362 *
363 * \param dev   IOAT DMA device
364 * \param setup request setup information
365 *
366 * \returns SYS_ERR_OK on success
367 *          errval on failure
368 */
369inline void ioat_dma_request_nop(struct ioat_dma_device *dev)
370{
371    struct ioat_dma_channel *chan;
372    struct dma_device *dma_dev = (struct dma_device *) dev;
373    chan = (struct ioat_dma_channel *) dma_device_get_channel(dma_dev);
374    ioat_dma_request_nop_chan(chan);
375}
376