1/*
2 * Copyright (c) 2007-2011, 2017, ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#ifndef E10K_QUEUE_H_
11#define E10K_QUEUE_H_
12
13#include <string.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <net_interfaces/flags.h>
17
18#include <net_interfaces/net_interfaces.h>
19#include <devif/queue_interface.h>
20#include <devif/queue_interface_backend.h>
21#include <devif/backends/net/e10k_devif.h>
22#include <dev/e10k_dev.h>
23#include <dev/e10k_q_dev.h>
24#include <machine/atomic.h>
25
26//#define BENCH_QUEUE 1
27
28#ifdef BENCH_QUEUE
29#define BENCH_SIZE 100000
30#include <bench/bench.h>
31#endif
32
33struct e10k_queue_ops {
34    errval_t (*update_txtail)(struct e10k_queue*, size_t);
35    errval_t (*update_rxtail)(struct e10k_queue*, size_t);
36};
37
38/**
39 * Context structure for RX descriptors. This is needed to implement RSC, since
40 * we need to be able to chain buffers together. */
41struct e10k_queue_rxctx {
42    struct devq_buf         buf;
43    struct e10k_queue_rxctx *previous;
44    bool                    used;
45};
46
47
48struct region_entry {
49    uint32_t rid;
50    struct dmem mem;
51    struct region_entry* next;
52};
53
54struct e10k_queue {
55    struct devq q;
56
57#ifdef BENCH_QUEUE
58    struct bench_ctl en_tx;
59    struct bench_ctl en_rx;
60    struct bench_ctl deq_rx;
61    struct bench_ctl deq_tx;
62#endif
63
64    // registers
65    void* d;
66    struct capref                   regs;
67    struct capref                   filter_ep; // EP to add filter
68    struct dmem                     reg_mem;
69
70    // queue info
71    bool enabled;
72    uint16_t id;
73    uint32_t rsbufsz;
74    bool use_vf; // use VF for this queue
75    bool use_rsc; // Receive Side Coalescing
76    bool use_vtd; // Virtual addressing (required for VF)
77    bool use_rxctx; //
78    bool use_txhwb; //
79    bool use_msix;
80    size_t rxbufsz;
81    uint8_t pci_function;
82    uint64_t mac;
83
84    // registered regions
85    struct region_entry* regions;
86
87    // interrupt
88    bool use_irq;
89    // callback
90    e10k_event_cb_t cb;
91
92
93    // memory caps
94    struct capref                   rx_frame;
95    struct capref                   tx_frame;
96    struct capref                   txhwb_frame;
97    struct dmem                     tx;
98    struct dmem                     txhwb;
99    struct dmem                     rx;
100    size_t rx_ring_size;
101    size_t tx_ring_size;
102
103    // vf state
104    struct vf_state* vf;
105
106    // Communicatio to PF
107    struct e10k_vf_binding *binding;
108    bool bound;
109
110    // FIXME: Look for appropriate type for the _head/tail/size fields
111    e10k_q_tdesc_adv_wb_array_t*    tx_ring;
112    struct devq_buf*                tx_bufs;
113    bool*                           tx_isctx;
114    size_t                          tx_head;
115    size_t                          tx_tail, tx_lasttail;
116    size_t                          tx_size;
117    void*                           tx_hwb;
118
119    e10k_q_rdesc_adv_wb_array_t*    rx_ring;
120    struct devq_buf*                rx_bufs;
121    struct e10k_queue_rxctx*        rx_context;
122    size_t                          rx_head;
123    size_t                          rx_tail;
124    size_t                          rx_size;
125
126    struct e10k_queue_ops           ops;
127    void*                           opaque;
128
129};
130
131typedef struct e10k_queue e10k_queue_t;
132
133// Does not initalize the queue struct itself
134static inline void e10k_queue_init(struct e10k_queue* q, void* tx, size_t tx_size,
135                                   uint32_t* tx_hwb, void* rx, size_t rx_size,
136                                   struct e10k_queue_ops* ops)
137{
138    q->tx_ring = tx;
139    q->tx_bufs = calloc(tx_size, sizeof(struct devq_buf));
140    q->tx_isctx = calloc(tx_size, sizeof(bool));
141    q->tx_head = 0;
142    q->tx_tail = q->tx_lasttail = 0;
143    q->tx_size = tx_size;
144    q->tx_hwb = tx_hwb;
145
146    q->rx_ring = rx;
147    q->rx_bufs = calloc(rx_size, sizeof(struct devq_buf));
148    q->rx_context = calloc(rx_size, sizeof(*q->rx_context));
149    q->rx_head = 0;
150    q->rx_tail = 0;
151    q->rx_size = rx_size;
152
153    q->ops = *ops;
154
155    // Initialize ring memory with zero
156    memset(tx, 0, tx_size * e10k_q_tdesc_adv_wb_size);
157    memset(rx, 0, rx_size * e10k_q_rdesc_adv_wb_size);
158    memset(q->tx_isctx, 0, tx_size*sizeof(bool));
159    memset(q->rx_context, 0, tx_size*sizeof(*q->rx_context));
160
161#ifdef BENCH_QUEUE
162    q->en_tx.mode = BENCH_MODE_FIXEDRUNS;
163    q->en_tx.result_dimensions = 1;
164    q->en_tx.min_runs = BENCH_SIZE;
165    q->en_tx.data = calloc(q->en_tx.min_runs * q->en_tx.result_dimensions,
166                       sizeof(*q->en_tx.data));
167    assert(q->en_tx.data != NULL);
168
169    q->en_rx.mode = BENCH_MODE_FIXEDRUNS;
170    q->en_rx.result_dimensions = 1;
171    q->en_rx.min_runs = BENCH_SIZE;
172    q->en_rx.data = calloc(q->en_rx.min_runs * q->en_rx.result_dimensions,
173                       sizeof(*q->en_rx.data));
174    assert(q->en_rx.data != NULL);
175
176    q->deq_rx.mode = BENCH_MODE_FIXEDRUNS;
177    q->deq_rx.result_dimensions = 1;
178    q->deq_rx.min_runs = BENCH_SIZE;
179    q->deq_rx.data = calloc(q->deq_rx.min_runs * q->deq_rx.result_dimensions,
180                       sizeof(*q->deq_rx.data));
181    assert(q->deq_rx.data != NULL);
182
183    q->deq_tx.mode = BENCH_MODE_FIXEDRUNS;
184    q->deq_tx.result_dimensions = 1;
185    q->deq_tx.min_runs = BENCH_SIZE;
186    q->deq_tx.data = calloc(q->deq_tx.min_runs * q->deq_tx.result_dimensions,
187                       sizeof(*q->deq_tx.data));
188
189    assert(q->deq_tx.data != NULL);
190#endif
191
192}
193
194static inline int e10k_queue_add_txcontext(e10k_queue_t* q, uint8_t idx,
195                                           uint8_t maclen, uint16_t iplen,
196                                           uint8_t l4len, e10k_q_l4_type_t l4t)
197{
198    e10k_q_tdesc_adv_ctx_t d;
199    size_t tail = q->tx_tail;
200
201    memset(q->tx_ring[tail], 0, e10k_q_tdesc_adv_wb_size);
202
203    // TODO: Check if there is room in the queue
204    q->tx_isctx[tail] = true;
205    d = q->tx_ring[tail];
206
207    e10k_q_tdesc_adv_rd_dtyp_insert(d, e10k_q_adv_ctx);
208    e10k_q_tdesc_adv_rd_dext_insert(d, 1);
209
210    /* e10k_q_tdesc_adv_ctx_bcntlen_insert(d, 0x3f); */
211    e10k_q_tdesc_adv_ctx_idx_insert(d, idx);
212    e10k_q_tdesc_adv_ctx_maclen_insert(d, maclen);
213    e10k_q_tdesc_adv_ctx_iplen_insert(d, iplen);
214    e10k_q_tdesc_adv_ctx_ipv4_insert(d, 1);
215    e10k_q_tdesc_adv_ctx_l4len_insert(d, l4len);
216    e10k_q_tdesc_adv_ctx_l4t_insert(d, l4t);
217
218    q->tx_lasttail = q->tx_tail;
219    q->tx_tail = (tail + 1) % q->tx_size;
220    return 0;
221}
222
223// len is only length of this descriptor where length is the total length
224static inline int e10k_queue_add_txbuf_ctx(e10k_queue_t* q, lpaddr_t phys,
225                                           regionid_t rid,
226                                           genoffset_t offset,
227                                           genoffset_t length,
228                                           genoffset_t valid_data,
229                                           genoffset_t valid_length,
230                                           uint64_t flags,
231                                           size_t len, uint8_t ctx,
232                                           bool ixsm, bool txsm)
233{
234    e10k_q_tdesc_adv_rd_t d;
235    size_t tail = q->tx_tail;
236
237    memset(q->tx_ring[tail], 0, e10k_q_tdesc_adv_wb_size);
238
239    // TODO: Check if there is room in the queue
240    q->tx_isctx[tail] = false;
241    struct devq_buf* buf = &q->tx_bufs[tail];
242    buf->rid = rid;
243    buf->offset = offset;
244    buf->length = length;
245    buf->valid_data = valid_data;
246    buf->valid_length = valid_length;
247    buf->flags = flags;
248    d = q->tx_ring[tail];
249
250    e10k_q_tdesc_adv_rd_buffer_insert(d, phys);
251    e10k_q_tdesc_adv_rd_dtalen_insert(d, valid_length);
252    e10k_q_tdesc_adv_rd_paylen_insert(d, valid_length);
253
254    // TODO use flags of devq interface to set eop and rs
255    e10k_q_tdesc_adv_rd_dtyp_insert(d, e10k_q_adv_data);
256    e10k_q_tdesc_adv_rd_dext_insert(d, 1);
257    e10k_q_tdesc_adv_rd_rs_insert(d, 1);
258    e10k_q_tdesc_adv_rd_ifcs_insert(d, 1);
259    e10k_q_tdesc_adv_rd_eop_insert(d, 1);
260
261    if (ctx != (uint8_t)-1) {
262        e10k_q_tdesc_adv_rd_idx_insert(d, ctx);
263        e10k_q_tdesc_adv_rd_cc_insert(d, 1);
264        e10k_q_tdesc_adv_rd_ixsm_insert(d, ixsm);
265        e10k_q_tdesc_adv_rd_txsm_insert(d, txsm);
266    }
267
268    q->tx_lasttail = q->tx_tail;
269    q->tx_tail = (tail + 1) % q->tx_size;
270    return 0;
271
272}
273
274
275static inline int e10k_queue_add_txbuf_legacy(e10k_queue_t* q, lpaddr_t phys,
276                                       regionid_t rid,
277                                       genoffset_t offset,
278                                       genoffset_t length,
279                                       genoffset_t valid_data,
280                                       genoffset_t valid_length,
281                                       uint64_t flags,
282                                       size_t len)
283{
284    size_t tail = q->tx_tail;
285
286
287    struct devq_buf* buf = &q->tx_bufs[tail];
288    buf->rid = rid;
289    buf->offset = offset;
290    buf->length = length;
291    buf->valid_data = valid_data;
292    buf->valid_length = valid_length;
293    buf->flags = flags;
294
295#ifdef BENCH_QUEUE
296    uint64_t start, end;
297    start = rdtscp();
298#endif
299
300    e10k_q_tdesc_legacy_t d;
301    d = q->tx_ring[tail];
302
303    e10k_q_tdesc_legacy_buffer_insert(d, phys);
304    e10k_q_tdesc_legacy_length_insert(d, len);
305    // OPTIMIZATION: Maybe only set rs on last packet?
306    bool last = flags & NETIF_TXFLAG_LAST;
307    e10k_q_tdesc_legacy_rs_insert(d, last);
308    e10k_q_tdesc_legacy_ifcs_insert(d,  1);
309    e10k_q_tdesc_legacy_eop_insert(d, last);
310
311#ifdef BENCH_QUEUE
312    end = rdtscp();
313    uint64_t res = end - start;
314    bench_ctl_add_run(&q->en_tx, &res);
315#endif
316    __sync_synchronize();
317
318    q->tx_tail = (tail + 1) % q->tx_size;
319    return 0;
320}
321
322static inline int e10k_queue_add_txbuf(e10k_queue_t* q, lpaddr_t phys,
323                                       regionid_t rid,
324                                       genoffset_t offset,
325                                       genoffset_t length,
326                                       genoffset_t valid_data,
327                                       genoffset_t valid_length,
328                                       uint64_t flags,
329                                       size_t len)
330{
331    if(!q->use_vf) {
332    /*
333        return e10k_queue_add_txbuf_legacy(q, phys, rid, offset, length,
334                                    valid_data, valid_length,
335                                    flags, len);
336    */
337        return e10k_queue_add_txbuf_ctx(q, phys, rid, offset, length,
338                                    valid_data, valid_length,
339                                    flags, len, -1, false, false);
340    } else {
341        // TODO try generate checksums
342        return e10k_queue_add_txbuf_ctx(q, phys, rid, offset, length,
343                                    valid_data, valid_length,
344                                    flags, len, -1, false, false);
345    }
346}
347
348/*
349 * Reclaim 1 packet from the TX queue once it's handled by the
350 * card. Call multiple times to reclaim more packets.
351 *
352 * \param       q       Queue to check
353 * \param       opaque  Contains opaque data of reclaimed packet, if any
354 *
355 * \return true if packet can be reclaimed otherwise false
356 */
357static inline bool e10k_queue_get_txbuf_avd(e10k_queue_t* q, regionid_t* rid,
358                                        genoffset_t* offset,
359                                        genoffset_t* length,
360                                        genoffset_t* valid_data,
361                                        genoffset_t* valid_length,
362                                        uint64_t* flags)
363{
364    /* e10k_q_tdesc_adv_wb_t d; */
365    size_t head = q->tx_head;
366    bool result = false;
367    // If HWB is enabled, we can skip reading the descriptor if nothing happened
368
369    if (q->tx_hwb && *((uint32_t*)q->tx_hwb) == head) {
370        return false;
371    }
372
373    if(!q->tx_hwb) {
374        size_t idx = head;
375
376        // Skip over context and non-EOP descriptors
377        while(idx != q->tx_tail && q->tx_isctx[idx] &&
378              !e10k_q_tdesc_adv_wb_dd_extract(q->tx_ring[idx])) {
379            idx = (idx + 1) % q->tx_size;
380        }
381
382        if(idx == q->tx_tail) {
383            return false;
384        }
385    }
386
387    // That last packet got written out, now go reclaim from the head pointer.
388    if (!q->tx_isctx[head]) {
389        *rid = q->tx_bufs[head].rid;
390        *offset = q->tx_bufs[head].offset;
391        *length = q->tx_bufs[head].length;
392        *valid_data = q->tx_bufs[head].valid_data;
393        *valid_length = q->tx_bufs[head].valid_length;
394        *flags = q->tx_bufs[head].flags;
395
396        result = true;
397    }
398
399    /* memset(q->tx_ring[head], 0, e10k_q_tdesc_adv_wb_size); */
400    q->tx_head = (head + 1) % q->tx_size;
401    return result;
402}
403
404static inline bool e10k_queue_get_txbuf_legacy(e10k_queue_t* q, regionid_t* rid,
405                                        genoffset_t* offset,
406                                        genoffset_t* length,
407                                        genoffset_t* valid_data,
408                                        genoffset_t* valid_length,
409                                        uint64_t* flags)
410{
411#ifdef BENCH_QUEUE
412    uint64_t start, end;
413    start = rdtscp();
414#endif
415
416    e10k_q_tdesc_legacy_t d;
417    size_t head = q->tx_head;
418
419    d = q->tx_ring[head];
420    if (e10k_q_tdesc_legacy_dd_extract(d)) {
421        *rid = q->tx_bufs[head].rid;
422        *offset = q->tx_bufs[head].offset;
423        *length = q->tx_bufs[head].length;
424        *valid_data = q->tx_bufs[head].valid_data;
425        *valid_length = q->tx_bufs[head].valid_length;
426        *flags = q->tx_bufs[head].flags;
427        memset(d, 0, e10k_q_tdesc_legacy_size);
428
429        q->tx_head = (head + 1) % q->tx_size;
430
431#ifdef BENCH_QUEUE
432        end = rdtscp();
433        uint64_t res = end - start;
434        bench_ctl_add_run(&q->deq_tx, &res);
435#endif
436        return true;
437    }
438
439    if (q->tx_hwb) {
440        head = *((uint32_t*) q->tx_hwb);
441        if (q->tx_head == head) {
442            return false;
443        } else {
444            *rid = q->tx_bufs[q->tx_head].rid;
445            *offset = q->tx_bufs[q->tx_head].offset;
446            *length = q->tx_bufs[q->tx_head].length;
447            *valid_data = q->tx_bufs[q->tx_head].valid_data;
448            *valid_length = q->tx_bufs[q->tx_head].valid_length;
449            *flags = q->tx_bufs[q->tx_head].flags;
450            memset(d, 0, e10k_q_tdesc_legacy_size);
451
452            q->tx_head = (q->tx_head + 1) % q->tx_size;
453
454#ifdef BENCH_QUEUE
455            end = rdtscp();
456            uint64_t res = end - start;
457            bench_ctl_add_run(&q->deq_tx, &res);
458#endif
459            return true;
460        }
461    }
462
463    return false;
464}
465
466static inline bool e10k_queue_get_txbuf(e10k_queue_t* q, regionid_t* rid,
467                                        genoffset_t* offset,
468                                        genoffset_t* length,
469                                        genoffset_t* valid_data,
470                                        genoffset_t* valid_length,
471                                        uint64_t* flags)
472{
473    if(!q->use_vf) {
474        /*
475        return e10k_queue_get_txbuf_legacy(q, rid, offset, length, valid_data,
476                                           valid_length, flags);
477        */
478        return e10k_queue_get_txbuf_avd(q, rid, offset, length, valid_data,
479                                        valid_length, flags);
480    } else {
481        return e10k_queue_get_txbuf_avd(q, rid, offset, length, valid_data,
482                                        valid_length, flags);
483    }
484}
485
486static inline errval_t e10k_queue_bump_txtail(e10k_queue_t* q)
487{
488    return q->ops.update_txtail(q, q->tx_tail);
489}
490
491static inline size_t e10k_queue_free_txslots(e10k_queue_t* q)
492{
493    size_t head = q->tx_head;
494    size_t tail = q->tx_tail;
495    size_t size = q->tx_size;
496
497    if (tail >= head) {
498        return size - (tail - head) - 1; // TODO: could this be off by 1?
499    } else {
500        return size - (tail + size - head) - 1; // TODO: off by 1?
501    }
502
503}
504
505static inline int e10k_queue_add_rxbuf_adv(e10k_queue_t* q,
506                                       lpaddr_t phys,
507                                       regionid_t rid,
508                                       genoffset_t offset,
509                                       genoffset_t length,
510                                       genoffset_t valid_data,
511                                       genoffset_t valid_length,
512                                       uint64_t flags)
513{
514    size_t tail = q->rx_tail;
515    struct e10k_queue_rxctx *ctx;
516
517    ctx = q->rx_context + tail;
518    if (ctx->used) {
519        printf("e10k: Already used!\n");
520        return 1;
521    }
522
523    // TODO: Check if there is room in the queue
524    ctx->buf.rid = rid;
525    ctx->buf.offset = offset;
526    ctx->buf.length = length;
527    ctx->buf.valid_data = valid_data;
528    ctx->buf.valid_length = valid_length;
529    ctx->buf.flags = flags;
530    ctx->used = true;
531
532#ifdef BENCH_QUEUE
533    uint64_t start, end;
534    start = rdtscp();
535#endif
536    e10k_q_rdesc_adv_rd_t d;
537    d = (e10k_q_rdesc_adv_rd_t) q->rx_ring[tail];
538
539    e10k_q_rdesc_adv_rd_buffer_insert(d, phys);
540    // TODO: Does this make sense for RSC?
541    e10k_q_rdesc_adv_rd_hdr_buffer_insert(d, 0);
542
543    __sync_synchronize();
544
545    q->rx_tail = (tail + 1) % q->rx_size;
546
547#ifdef BENCH_QUEUE
548    end = rdtscp();
549    uint64_t res = end - start;
550    bench_ctl_add_run(&q->en_rx, &res);
551#endif
552
553    return 0;
554}
555
556static inline int e10k_queue_add_rxbuf_legacy(e10k_queue_t* q,
557                                       lpaddr_t phys,
558                                       regionid_t rid,
559                                       genoffset_t offset,
560                                       genoffset_t length,
561                                       genoffset_t valid_data,
562                                       genoffset_t valid_length,
563                                       uint64_t flags)
564{
565    size_t tail = q->rx_tail;
566
567
568    struct devq_buf* buf = &q->rx_bufs[tail];
569    buf->rid = rid;
570    buf->offset = offset;
571    buf->length = length;
572    buf->valid_data = valid_data;
573    buf->valid_length = valid_length;
574    buf->flags = flags;
575
576#ifdef BENCH_QUEUE
577    uint64_t start, end;
578    start = rdtscp();
579#endif
580
581    e10k_q_rdesc_legacy_t d;
582
583    d = q->rx_ring[tail];
584    e10k_q_rdesc_legacy_buffer_insert(d, phys);
585
586    __sync_synchronize();
587
588    q->rx_tail = (tail + 1) % q->rx_size;
589
590#ifdef BENCH_QUEUE
591    end = rdtscp();
592    uint64_t res = end - start;
593    bench_ctl_add_run(&q->en_rx, &res);
594#endif
595    return 0;
596}
597
598
599static inline int e10k_queue_add_rxbuf(e10k_queue_t* q,
600                                       lpaddr_t phys,
601                                       regionid_t rid,
602                                       genoffset_t offset,
603                                       genoffset_t length,
604                                       genoffset_t valid_data,
605                                       genoffset_t valid_length,
606                                       uint64_t flags)
607{
608    if(!q->use_vf) {
609    /*
610        return e10k_queue_add_rxbuf_legacy(q, phys, rid, offset, length, valid_data,
611                                           valid_length, flags);
612    */
613        return e10k_queue_add_rxbuf_adv(q, phys, rid, offset, length, valid_data,
614                                        valid_length, flags);
615    } else {
616        return e10k_queue_add_rxbuf_adv(q, phys, rid, offset, length, valid_data,
617                                        valid_length, flags);
618    }
619}
620static inline uint64_t e10k_queue_convert_rxflags(e10k_q_rdesc_adv_wb_t d)
621{
622    uint64_t flags = 0;
623
624    // IP checksum
625    if (e10k_q_rdesc_adv_wb_ipcs_extract(d)) {
626        flags |= NETIF_RXFLAG_IPCHECKSUM;
627        if (!e10k_q_rdesc_adv_wb_ipe_extract(d)) {
628            flags |= NETIF_RXFLAG_IPCHECKSUM_GOOD;
629        }
630    }
631
632    // L4 checksum
633    if (e10k_q_rdesc_adv_wb_l4i_extract(d)) {
634        flags |= NETIF_RXFLAG_L4CHECKSUM;
635        if (!e10k_q_rdesc_adv_wb_l4e_extract(d)) {
636            flags |= NETIF_RXFLAG_L4CHECKSUM_GOOD;
637        }
638    }
639
640    // Packet type
641    if (e10k_q_rdesc_adv_wb_pt_ipv4_extract(d)) {
642        flags |= NETIF_RXFLAG_TYPE_IPV4;
643    }
644    if (e10k_q_rdesc_adv_wb_pt_tcp_extract(d)) {
645        flags |= NETIF_RXFLAG_TYPE_TCP;
646    }
647    if (e10k_q_rdesc_adv_wb_pt_udp_extract(d)) {
648        flags |= NETIF_RXFLAG_TYPE_UDP;
649    }
650
651    return flags;
652}
653
654static inline bool e10k_queue_get_rxbuf_avd(e10k_queue_t* q, regionid_t* rid,
655                                        genoffset_t* offset,
656                                        genoffset_t* length,
657                                        genoffset_t* valid_data,
658                                        genoffset_t* valid_length,
659                                        uint64_t* flags,
660                                        int* last)
661{
662    e10k_q_rdesc_adv_wb_t d;
663    size_t head = q->rx_head;
664    struct e10k_queue_rxctx *ctx;
665
666    d = q->rx_ring[head];
667    ctx = q->rx_context + head;
668
669    if (!e10k_q_rdesc_adv_wb_dd_extract(d)) {
670        return false;
671    }
672
673    // Barrier needed according to linux driver to make sure nothing else is
674    // read before the dd bit TODO: make sure
675    rmb();
676
677    // TODO add code for RSC
678
679    *flags = ctx->buf.flags;
680    // Set flags if it this is a descriptor with EOP
681    // TODO: with multi-part packets, we want these flags on the first packet
682    if (e10k_q_rdesc_adv_wb_eop_extract(d)) {
683        *flags = *flags | e10k_queue_convert_rxflags(d);
684    }
685
686    // TODO: Extract status (okay/error)
687    *last = e10k_q_rdesc_adv_wb_eop_extract(d);
688    *valid_length = e10k_q_rdesc_adv_wb_pkt_len_extract(d);
689    *rid = ctx->buf.rid;
690    *offset = ctx->buf.offset;
691    *length = ctx->buf.length;
692    *valid_data = ctx->buf.valid_data;
693
694    ctx->used = false;
695    memset(d, 0, e10k_q_rdesc_adv_wb_size);
696
697    q->rx_head = (head + 1) % q->rx_size;
698    return true;
699}
700
701
702static inline bool e10k_queue_get_rxbuf_legacy(e10k_queue_t* q, regionid_t* rid,
703                                        genoffset_t* offset,
704                                        genoffset_t* length,
705                                        genoffset_t* valid_data,
706                                        genoffset_t* valid_length,
707                                        uint64_t* flags,
708                                        int* last)
709{
710#ifdef BENCH_QUEUE
711    uint64_t start, end;
712    start = rdtscp();
713#endif
714
715    e10k_q_rdesc_legacy_t d;
716    size_t head = q->rx_head;
717    struct devq_buf* buf = &q->rx_bufs[head];
718
719    d = q->rx_ring[head];
720    if (e10k_q_rdesc_legacy_dd_extract(d)) {
721        *last = e10k_q_rdesc_legacy_eop_extract(d);
722        *valid_length = e10k_q_rdesc_legacy_length_extract(d);
723
724        *rid = buf->rid;
725        *offset = buf->offset;
726        *length = buf->length;
727        *valid_data = buf->valid_data;
728        *flags = buf->flags;
729
730        memset(d, 0, e10k_q_rdesc_legacy_size);
731
732        q->rx_head = (head + 1) % q->rx_size;
733#ifdef BENCH_QUEUE
734        end = rdtscp();
735        uint64_t res = end - start;
736        bench_ctl_add_run(&q->deq_rx, &res);
737#endif
738        return true;
739    } else {
740        return false;
741    }
742}
743
744
745static inline bool e10k_queue_get_rxbuf(e10k_queue_t* q, regionid_t* rid,
746                                        genoffset_t* offset,
747                                        genoffset_t* length,
748                                        genoffset_t* valid_data,
749                                        genoffset_t* valid_length,
750                                        uint64_t* flags,
751                                        int* last)
752{
753    if(!q->use_vf) {
754    /*
755       return e10k_queue_get_rxbuf_legacy(q, rid, offset, length, valid_data, valid_length,
756                                    flags, last);
757    */
758       return e10k_queue_get_rxbuf_avd(q, rid, offset, length, valid_data, valid_length,
759                                       flags, last);
760    } else {
761       return e10k_queue_get_rxbuf_avd(q, rid, offset, length, valid_data, valid_length,
762                                       flags, last);
763    }
764}
765
766static inline errval_t e10k_queue_bump_rxtail(e10k_queue_t* q)
767{
768    return q->ops.update_rxtail(q, q->rx_tail);
769}
770
771static inline size_t e10k_queue_free_rxslots(e10k_queue_t* q)
772{
773    size_t head = q->rx_head;
774    size_t tail = q->rx_tail;
775    size_t size = q->rx_size;
776
777    if (tail >= head) {
778        return size - (tail - head) - 1; // TODO: could this be off by 1?
779    } else {
780        return size - (tail + size - head) - 1; // TODO: off by 1?
781    }
782}
783
784static inline struct bench_ctl* e10k_queue_get_benchmark_data(e10k_queue_t* q, uint8_t type)
785{
786#ifdef BENCH_QUEUE
787    switch (type) {
788        case 0:
789            return &q->en_rx;
790        case 1:
791            return &q->en_tx;
792        case 2:
793            return &q->deq_rx;
794        case 3:
795            return &q->deq_tx;
796        default:
797            return NULL;
798    }
799#endif
800    return NULL;
801}
802
803#endif // ndef E10K_QUEUE_H_
804