Deleted Added
full compact
if_re_netmap.h (259412) if_re_netmap.h (260368)
1/*
1/*
2 * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
2 * Copyright (C) 2011-2014 Luigi Rizzo. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the

--- 8 unchanged lines hidden (view full) ---

19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * $FreeBSD: head/sys/dev/netmap/if_re_netmap.h 259412 2013-12-15 08:37:24Z luigi $
27 * $FreeBSD: head/sys/dev/netmap/if_re_netmap.h 260368 2014-01-06 12:53:15Z luigi $
28 *
29 * netmap support for: re
30 *
31 * For more details on netmap support please see ixgbe_netmap.h
32 */
33
34
35#include <net/netmap.h>

--- 31 unchanged lines hidden (view full) ---

67static int
68re_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
69{
70 struct ifnet *ifp = na->ifp;
71 struct netmap_kring *kring = &na->tx_rings[ring_nr];
72 struct netmap_ring *ring = kring->ring;
73 u_int nm_i; /* index into the netmap ring */
74 u_int nic_i; /* index into the NIC ring */
28 *
29 * netmap support for: re
30 *
31 * For more details on netmap support please see ixgbe_netmap.h
32 */
33
34
35#include <net/netmap.h>

--- 31 unchanged lines hidden (view full) ---

67static int
68re_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
69{
70 struct ifnet *ifp = na->ifp;
71 struct netmap_kring *kring = &na->tx_rings[ring_nr];
72 struct netmap_ring *ring = kring->ring;
73 u_int nm_i; /* index into the netmap ring */
74 u_int nic_i; /* index into the NIC ring */
75 u_int n, new_slots;
75 u_int n;
76 u_int const lim = kring->nkr_num_slots - 1;
76 u_int const lim = kring->nkr_num_slots - 1;
77 u_int const cur = nm_txsync_prologue(kring, &new_slots);
77 u_int const head = kring->rhead;
78
79 /* device-specific */
80 struct rl_softc *sc = ifp->if_softc;
81 struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
82
78
79 /* device-specific */
80 struct rl_softc *sc = ifp->if_softc;
81 struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
82
83 if (cur > lim) /* error checking in nm_txsync_prologue() */
84 return netmap_ring_reinit(kring);
85
86 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
87 sc->rl_ldata.rl_tx_list_map,
88 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ?
89
90 /*
91 * First part: process new packets to send.
92 */
93 nm_i = kring->nr_hwcur;
83 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
84 sc->rl_ldata.rl_tx_list_map,
85 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ?
86
87 /*
88 * First part: process new packets to send.
89 */
90 nm_i = kring->nr_hwcur;
94 if (nm_i != cur) { /* we have new packets to send */
91 if (nm_i != head) { /* we have new packets to send */
95 nic_i = sc->rl_ldata.rl_tx_prodidx;
96 // XXX or netmap_idx_k2n(kring, nm_i);
97
92 nic_i = sc->rl_ldata.rl_tx_prodidx;
93 // XXX or netmap_idx_k2n(kring, nm_i);
94
98 for (n = 0; nm_i != cur; n++) {
95 for (n = 0; nm_i != head; n++) {
99 struct netmap_slot *slot = &ring->slot[nm_i];
100 u_int len = slot->len;
101 uint64_t paddr;
102 void *addr = PNMB(slot, &paddr);
103
104 /* device-specific */
105 struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i];
106 int cmd = slot->len | RL_TDESC_CMD_EOF |

--- 20 unchanged lines hidden (view full) ---

127 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
128 txd[nic_i].tx_dmamap,
129 BUS_DMASYNC_PREWRITE);
130
131 nm_i = nm_next(nm_i, lim);
132 nic_i = nm_next(nic_i, lim);
133 }
134 sc->rl_ldata.rl_tx_prodidx = nic_i;
96 struct netmap_slot *slot = &ring->slot[nm_i];
97 u_int len = slot->len;
98 uint64_t paddr;
99 void *addr = PNMB(slot, &paddr);
100
101 /* device-specific */
102 struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i];
103 int cmd = slot->len | RL_TDESC_CMD_EOF |

--- 20 unchanged lines hidden (view full) ---

124 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
125 txd[nic_i].tx_dmamap,
126 BUS_DMASYNC_PREWRITE);
127
128 nm_i = nm_next(nm_i, lim);
129 nic_i = nm_next(nic_i, lim);
130 }
131 sc->rl_ldata.rl_tx_prodidx = nic_i;
135 /* decrease avail by # of packets sent minus previous ones */
136 kring->nr_hwcur = cur; /* the saved ring->cur */
137 kring->nr_hwavail -= new_slots;
132 kring->nr_hwcur = head;
138
139 /* synchronize the NIC ring */
140 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
141 sc->rl_ldata.rl_tx_list_map,
142 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
143
144 /* start ? */
145 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
146 }
147
148 /*
149 * Second part: reclaim buffers for completed transmissions.
150 */
133
134 /* synchronize the NIC ring */
135 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
136 sc->rl_ldata.rl_tx_list_map,
137 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
138
139 /* start ? */
140 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
141 }
142
143 /*
144 * Second part: reclaim buffers for completed transmissions.
145 */
151 if (flags & NAF_FORCE_RECLAIM || kring->nr_hwavail < 1) {
146 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
152 nic_i = sc->rl_ldata.rl_tx_considx;
153 for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx;
154 n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) {
155 uint32_t cmdstat =
156 le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat);
157 if (cmdstat & RL_TDESC_STAT_OWN)
158 break;
159 }
160 if (n > 0) {
161 sc->rl_ldata.rl_tx_considx = nic_i;
162 sc->rl_ldata.rl_tx_free += n;
147 nic_i = sc->rl_ldata.rl_tx_considx;
148 for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx;
149 n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) {
150 uint32_t cmdstat =
151 le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat);
152 if (cmdstat & RL_TDESC_STAT_OWN)
153 break;
154 }
155 if (n > 0) {
156 sc->rl_ldata.rl_tx_considx = nic_i;
157 sc->rl_ldata.rl_tx_free += n;
163 kring->nr_hwavail += n;
158 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
164 }
165 }
166
159 }
160 }
161
167 nm_txsync_finalize(kring, cur);
162 nm_txsync_finalize(kring);
168
169 return 0;
170}
171
172
173/*
174 * Reconcile kernel and user view of the receive ring.
175 */
176static int
177re_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
178{
179 struct ifnet *ifp = na->ifp;
180 struct netmap_kring *kring = &na->rx_rings[ring_nr];
181 struct netmap_ring *ring = kring->ring;
182 u_int nm_i; /* index into the netmap ring */
183 u_int nic_i; /* index into the NIC ring */
163
164 return 0;
165}
166
167
168/*
169 * Reconcile kernel and user view of the receive ring.
170 */
171static int
172re_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
173{
174 struct ifnet *ifp = na->ifp;
175 struct netmap_kring *kring = &na->rx_rings[ring_nr];
176 struct netmap_ring *ring = kring->ring;
177 u_int nm_i; /* index into the netmap ring */
178 u_int nic_i; /* index into the NIC ring */
184 u_int n, resvd;
179 u_int n;
185 u_int const lim = kring->nkr_num_slots - 1;
180 u_int const lim = kring->nkr_num_slots - 1;
186 u_int const cur = nm_rxsync_prologue(kring, &resvd); /* cur + res */
181 u_int const head = nm_rxsync_prologue(kring);
187 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
188
189 /* device-specific */
190 struct rl_softc *sc = ifp->if_softc;
191 struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
192
182 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
183
184 /* device-specific */
185 struct rl_softc *sc = ifp->if_softc;
186 struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
187
193 if (cur > lim)
188 if (head > lim)
194 return netmap_ring_reinit(kring);
195
196 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
197 sc->rl_ldata.rl_rx_list_map,
198 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
199
200 /*
201 * First part: import newly received packets.
202 *
203 * This device uses all the buffers in the ring, so we need
204 * another termination condition in addition to RL_RDESC_STAT_OWN
189 return netmap_ring_reinit(kring);
190
191 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
192 sc->rl_ldata.rl_rx_list_map,
193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
194
195 /*
196 * First part: import newly received packets.
197 *
198 * This device uses all the buffers in the ring, so we need
199 * another termination condition in addition to RL_RDESC_STAT_OWN
205 * cleared (all buffers could have it cleared. The easiest one
206 * is to limit the amount of data reported up to 'lim'
200 * cleared (all buffers could have it cleared). The easiest one
201 * is to stop right before nm_hwcur.
207 */
208 if (netmap_no_pendintr || force_update) {
209 uint16_t slot_flags = kring->nkr_slot_flags;
202 */
203 if (netmap_no_pendintr || force_update) {
204 uint16_t slot_flags = kring->nkr_slot_flags;
205 uint32_t stop_i = nm_prev(kring->nr_hwcur, lim);
210
211 nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
212 nm_i = netmap_idx_n2k(kring, nic_i);
213
206
207 nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
208 nm_i = netmap_idx_n2k(kring, nic_i);
209
214 for (n = kring->nr_hwavail; n < lim ; n++) {
210 while (nm_i != stop_i) {
215 struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i];
216 uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
217 uint32_t total_len;
218
219 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
220 break;
221 total_len = rxstat & sc->rl_rxlenmask;
222 /* XXX subtract crc */
223 total_len = (total_len < 4) ? 0 : total_len - 4;
224 ring->slot[nm_i].len = total_len;
225 ring->slot[nm_i].flags = slot_flags;
226 /* sync was in re_newbuf() */
227 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
228 rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
211 struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i];
212 uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
213 uint32_t total_len;
214
215 if ((rxstat & RL_RDESC_STAT_OWN) != 0)
216 break;
217 total_len = rxstat & sc->rl_rxlenmask;
218 /* XXX subtract crc */
219 total_len = (total_len < 4) ? 0 : total_len - 4;
220 ring->slot[nm_i].len = total_len;
221 ring->slot[nm_i].flags = slot_flags;
222 /* sync was in re_newbuf() */
223 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
224 rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
225 // sc->rl_ifp->if_ipackets++;
229 nm_i = nm_next(nm_i, lim);
230 nic_i = nm_next(nic_i, lim);
231 }
226 nm_i = nm_next(nm_i, lim);
227 nic_i = nm_next(nic_i, lim);
228 }
232 if (n != kring->nr_hwavail) {
233 sc->rl_ldata.rl_rx_prodidx = nic_i;
234 sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
235 kring->nr_hwavail = n;
236 }
229 sc->rl_ldata.rl_rx_prodidx = nic_i;
230 kring->nr_hwtail = nm_i;
237 kring->nr_kflags &= ~NKR_PENDINTR;
238 }
239
240 /*
241 * Second part: skip past packets that userspace has released.
242 */
243 nm_i = kring->nr_hwcur;
231 kring->nr_kflags &= ~NKR_PENDINTR;
232 }
233
234 /*
235 * Second part: skip past packets that userspace has released.
236 */
237 nm_i = kring->nr_hwcur;
244 if (nm_i != cur) {
238 if (nm_i != head) {
245 nic_i = netmap_idx_k2n(kring, nm_i);
239 nic_i = netmap_idx_k2n(kring, nm_i);
246 for (n = 0; nm_i != cur; n++) {
240 for (n = 0; nm_i != head; n++) {
247 struct netmap_slot *slot = &ring->slot[nm_i];
248 uint64_t paddr;
249 void *addr = PNMB(slot, &paddr);
250
251 struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i];
252 int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
253
254 if (addr == netmap_buffer_base) /* bad buf */

--- 12 unchanged lines hidden (view full) ---

267 }
268 desc->rl_cmdstat = htole32(cmd);
269 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
270 rxd[nic_i].rx_dmamap,
271 BUS_DMASYNC_PREREAD);
272 nm_i = nm_next(nm_i, lim);
273 nic_i = nm_next(nic_i, lim);
274 }
241 struct netmap_slot *slot = &ring->slot[nm_i];
242 uint64_t paddr;
243 void *addr = PNMB(slot, &paddr);
244
245 struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i];
246 int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
247
248 if (addr == netmap_buffer_base) /* bad buf */

--- 12 unchanged lines hidden (view full) ---

261 }
262 desc->rl_cmdstat = htole32(cmd);
263 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
264 rxd[nic_i].rx_dmamap,
265 BUS_DMASYNC_PREREAD);
266 nm_i = nm_next(nm_i, lim);
267 nic_i = nm_next(nic_i, lim);
268 }
275 kring->nr_hwavail -= n;
276 kring->nr_hwcur = cur;
269 kring->nr_hwcur = head;
277
278 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
279 sc->rl_ldata.rl_rx_list_map,
280 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
281 }
282
283 /* tell userspace that there might be new packets */
270
271 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
272 sc->rl_ldata.rl_rx_list_map,
273 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
274 }
275
276 /* tell userspace that there might be new packets */
284 ring->avail = kring->nr_hwavail - resvd;
277 nm_rxsync_finalize(kring);
285
286 return 0;
287
288ring_reset:
289 return netmap_ring_reinit(kring);
290}
291
292

--- 38 unchanged lines hidden (view full) ---

331
332static void
333re_netmap_rx_init(struct rl_softc *sc)
334{
335 struct netmap_adapter *na = NA(sc->rl_ifp);
336 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
337 struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
338 uint32_t cmdstat;
278
279 return 0;
280
281ring_reset:
282 return netmap_ring_reinit(kring);
283}
284
285

--- 38 unchanged lines hidden (view full) ---

324
325static void
326re_netmap_rx_init(struct rl_softc *sc)
327{
328 struct netmap_adapter *na = NA(sc->rl_ifp);
329 struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
330 struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
331 uint32_t cmdstat;
339 int i, n, max_avail;
332 uint32_t nic_i, max_avail;
333 uint32_t const n = sc->rl_ldata.rl_rx_desc_cnt;
340
341 if (!slot)
342 return;
334
335 if (!slot)
336 return;
343 n = sc->rl_ldata.rl_rx_desc_cnt;
344 /*
337 /*
345 * Userspace owned hwavail packets before the reset,
346 * so the NIC that last hwavail descriptors of the ring
347 * are still owned by the driver (and keep one empty).
338 * Do not release the slots owned by userspace,
339 * and also keep one empty.
348 */
340 */
349 max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
350 for (i = 0; i < n; i++) {
341 max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]);
342 for (nic_i = 0; nic_i < n; nic_i++) {
351 void *addr;
352 uint64_t paddr;
343 void *addr;
344 uint64_t paddr;
353 int l = netmap_idx_n2k(&na->rx_rings[0], i);
345 uint32_t nm_i = netmap_idx_n2k(&na->rx_rings[0], nic_i);
354
346
355 addr = PNMB(slot + l, &paddr);
347 addr = PNMB(slot + nm_i, &paddr);
356
357 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
348
349 netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
358 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
350 sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, addr);
359 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
351 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
360 sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
361 desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
362 desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
352 sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, BUS_DMASYNC_PREREAD);
353 desc[nic_i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
354 desc[nic_i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
363 cmdstat = NETMAP_BUF_SIZE;
355 cmdstat = NETMAP_BUF_SIZE;
364 if (i == n - 1) /* mark the end of ring */
356 if (nic_i == n - 1) /* mark the end of ring */
365 cmdstat |= RL_RDESC_CMD_EOR;
357 cmdstat |= RL_RDESC_CMD_EOR;
366 if (i < max_avail)
358 if (nic_i < max_avail)
367 cmdstat |= RL_RDESC_CMD_OWN;
359 cmdstat |= RL_RDESC_CMD_OWN;
368 desc[i].rl_cmdstat = htole32(cmdstat);
360 desc[nic_i].rl_cmdstat = htole32(cmdstat);
369 }
370}
371
372
373static void
374re_netmap_attach(struct rl_softc *sc)
375{
376 struct netmap_adapter na;

--- 15 unchanged lines hidden ---
361 }
362}
363
364
365static void
366re_netmap_attach(struct rl_softc *sc)
367{
368 struct netmap_adapter na;

--- 15 unchanged lines hidden ---