1/*
2 * Copyright (c) 2014, ETH Zurich. All rights reserved.
3 *
4 * This file is distributed under the terms in the attached LICENSE file.
5 * If you do not find this file, copies can be found by writing to:
6 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
7 */
8#include<barrelfish/barrelfish.h>
9
10
11#include <xeon_phi/xeon_phi_dma_internal.h>
12#include <xeon_phi/xeon_phi_dma_device_internal.h>
13#include <xeon_phi/xeon_phi_dma_channel_internal.h>
14#include <xeon_phi/xeon_phi_dma_request_internal.h>
15#include <dma_ring_internal.h>
16#include <xeon_phi/xeon_phi_dma_descriptors_internal.h>
17
18#include <debug.h>
19
20/**
21 * represens the Xeon Phi specific DMA requests
22 */
23struct xeon_phi_dma_request
24{
25    struct dma_request common;
26    struct dma_descriptor *desc_head;
27    struct dma_descriptor *desc_tail;
28};
29
30/*
31 * ---------------------------------------------------------------------------
32 * Request Management
33 * ---------------------------------------------------------------------------
34 */
35
36/// caches allocated requests which are no longer used
37static struct dma_request *req_free_list = NULL;
38
39/**
40 * \brief allocates a Xeon Phi DMA request structure
41 *
42 * \returns Xeon Phi DMA request
43 *          NULL on failure
44 */
45static struct xeon_phi_dma_request *request_alloc(void)
46{
47    struct xeon_phi_dma_request *ret;
48
49    if (req_free_list) {
50        ret = (struct xeon_phi_dma_request *) req_free_list;
51        req_free_list = ret->common.next;
52
53        DMAREQ_DEBUG("meta: reusing request %p. freelist:%p\n", ret,
54                     req_free_list);
55
56        return ret;
57    }
58    return calloc(1, sizeof(*ret));
59}
60
61/**
62 * \brief frees up the used DMA request structure
63 *
64 * \param req   DMA request to be freed
65 */
66static void request_free(struct xeon_phi_dma_request *req)
67{
68    DMAREQ_DEBUG("meta: freeing request %p.\n", req);
69    req->common.next = req_free_list;
70    req_free_list = &req->common;
71}
72
73/*
74 * ---------------------------------------------------------------------------
75 * Helper Functions
76 * ---------------------------------------------------------------------------
77 */
78
79inline static uint32_t req_num_desc_needed(struct xeon_phi_dma_channel *chan,
80                                           size_t bytes)
81{
82    struct dma_channel *dma_chan = (struct dma_channel *) chan;
83    uint32_t max_xfer_size = dma_channel_get_max_xfer_size(dma_chan);
84    bytes += (max_xfer_size - 1);
85    return (uint32_t) (bytes / max_xfer_size);
86}
87
88/*
89 * ===========================================================================
90 * Library Internal Interface
91 * ===========================================================================
92 */
93
94/**
95 * \brief handles the processing of completed DMA requests
96 *
97 * \param req   the DMA request to process
98 *
99 * \returns SYS_ERR_OK on sucess
100 *          errval on failure
101 */
102errval_t xeon_phi_dma_request_process(struct xeon_phi_dma_request *req)
103{
104    errval_t err;
105
106    req->common.state = DMA_REQ_ST_DONE;
107
108    err = dma_request_process(&req->common);
109    if (err_is_fail(err)) {
110        return err;
111    }
112
113    request_free(req);
114
115    return SYS_ERR_OK;
116}
117
118/*
119 * ===========================================================================
120 * Public Interface
121 * ===========================================================================
122 */
123
124/**
125 * \brief issues a memcpy request to the given channel
126 *
127 * \param chan  Xeon Phi DMA channel
128 * \param setup request setup information
129 * \param id    returns the generated request id
130 *
131 * \returns SYS_ERR_OK on success
132 *          errval on failure
133 */
134errval_t xeon_phi_dma_request_memcpy_chan(struct dma_channel *chan,
135                                          struct dma_req_setup *setup,
136                                          dma_req_id_t *id)
137{
138    assert(chan->device->type == DMA_DEV_TYPE_XEON_PHI);
139
140    struct xeon_phi_dma_channel *xchan = (struct xeon_phi_dma_channel *) chan;
141
142    uint32_t num_desc = req_num_desc_needed(xchan, setup->args.memcpy.bytes);
143
144    XPHIREQ_DEBUG("DMA Memcpy request: [0x%016lx]->[0x%016lx] of %lu bytes (%u desc)\n",
145                  setup->args.memcpy.src, setup->args.memcpy.dst,
146                  setup->args.memcpy.bytes, num_desc);
147
148    struct dma_ring *ring = xeon_phi_dma_channel_get_ring(xchan);
149
150    if (num_desc > dma_ring_get_space(ring)) {
151        XPHIREQ_DEBUG("Too less space in ring: %u / %u\n", num_desc,
152                      dma_ring_get_space(ring));
153        return DMA_ERR_NO_DESCRIPTORS;
154    }
155
156    struct xeon_phi_dma_request *req = request_alloc();
157    if (req == NULL) {
158        XPHIREQ_DEBUG("No request descriptors for holding request data\n");
159        return DMA_ERR_NO_REQUESTS;
160    }
161
162    dma_request_common_init(&req->common, chan, setup->type);
163
164    struct dma_descriptor *desc;
165    size_t length = setup->args.memcpy.bytes;
166    lpaddr_t src = setup->args.memcpy.src;
167    lpaddr_t dst = setup->args.memcpy.dst;
168    uint32_t flags = 0;
169    size_t bytes, max_xfer_size = dma_channel_get_max_xfer_size(chan);
170    do {
171        desc = dma_ring_get_next_desc(ring);
172
173        if (!req->desc_head) {
174            req->desc_head = desc;
175        }
176        if (length <= max_xfer_size) {
177            /* the last one */
178            bytes = length;
179            req->desc_tail = desc;
180        } else {
181            bytes = max_xfer_size;
182        }
183        xeon_phi_dma_desc_fill_memcpy(desc, src, dst, bytes, flags);
184        dma_desc_set_request(desc, NULL);
185
186        length -= bytes;
187        src += bytes;
188        dst += bytes;
189    } while (length > 0);
190
191    req->common.setup = *setup;
192
193    if (id) {
194        *id = req->common.id;
195    }
196    /* set the request pointer in the last descriptor */
197    dma_desc_set_request(desc, &req->common);
198
199    assert(req->desc_tail);
200    assert(dma_desc_get_request(req->desc_tail));
201
202    //desc = dma_ring_get_next_desc(ring);
203    //xeon_phi_dma_desc_fill_status(desc, (512UL * 1024 * 1024 * 1024) + xchan->dstat.paddr, 0xFAFAFAFA, 0);
204
205    return xeon_phi_dma_channel_submit_request(xchan, req);
206}
207
208/**
209 * \brief issues a memcpy request to a channel of the given device
210 *
211 * \param dev   Xeon Phi DMA device
212 * \param setup request setup information
213 * \param id    returns the generated request id
214 *
215 * \returns SYS_ERR_OK on success
216 *          errval on failure
217 */
218errval_t xeon_phi_dma_request_memcpy(struct dma_device *dev,
219                                     struct dma_req_setup *setup,
220                                     dma_req_id_t *id)
221{
222    struct dma_channel *chan = dma_device_get_channel(dev);
223    return xeon_phi_dma_request_memcpy_chan(chan, setup, id);
224}
225
226/**
227 * \brief issues a NOP / NULL descriptor request on the given channel
228 *
229 * \param chan  Xeon Phi DMA channel
230 * \param setup request setup information
231 *
232 * \returns SYS_ERR_OK on success
233 *          errval on failure
234 */
235void xeon_phi_dma_request_nop_chan(struct dma_channel *chan)
236{
237    struct xeon_phi_dma_channel *xchan = (struct xeon_phi_dma_channel *)chan;
238    struct dma_ring *ring = xeon_phi_dma_channel_get_ring(xchan);
239    assert(ring);
240
241    struct dma_descriptor *desc = dma_ring_get_next_desc(ring);
242    assert(desc);
243    XPHIREQ_DEBUG("New DMA NOP request: descriptor=%p\n", desc);
244
245    xeon_phi_dma_desc_fill_nop(desc);
246}
247
248/**
249 * \brief issues a NOP / NULL descriptor request on the given device
250 *
251 * \param dev   Xeon Phi DMA device
252 * \param setup request setup information
253 *
254 * \returns SYS_ERR_OK on success
255 *          errval on failure
256 */
257inline void xeon_phi_dma_request_nop(struct dma_device *dev)
258{
259    struct dma_channel *chan = dma_device_get_channel(dev);
260    xeon_phi_dma_request_nop_chan(chan);
261}
262