Deleted Added
full compact
1/*-
2 * Copyright (C) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/powerpc/ps3/if_glc.c 217044 2011-01-06 04:12:29Z nwhitehorn $
25 * $FreeBSD: head/sys/powerpc/ps3/if_glc.c 223324 2011-06-20 02:17:34Z nwhitehorn $
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/sockio.h>
31#include <sys/endian.h>
32#include <sys/mbuf.h>
33#include <sys/module.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/socket.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <net/bpf.h>
42#include <net/if.h>
43#include <net/if_arp.h>
44#include <net/ethernet.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47#include <net/if_types.h>
48#include <net/if_vlan_var.h>
49
50#include <machine/pio.h>
51#include <machine/bus.h>
52#include <machine/platform.h>
53#include <machine/pmap.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include "ps3bus.h"
59#include "ps3-hvcall.h"
60#include "if_glcreg.h"
61
62static int glc_probe(device_t);
63static int glc_attach(device_t);
64static void glc_init(void *xsc);
65static void glc_start(struct ifnet *ifp);
66static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
67static void glc_set_multicast(struct glc_softc *sc);
68static int glc_add_rxbuf(struct glc_softc *sc, int idx);
69static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70static int glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 bus_addr_t *pktdesc);
72static int glc_intr_filter(void *xsc);
73static void glc_intr(void *xsc);
74static void glc_tick(void *xsc);
75static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
76static int glc_media_change(struct ifnet *ifp);
77
78static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
79
80static device_method_t glc_methods[] = {
81 /* Device interface */
82 DEVMETHOD(device_probe, glc_probe),
83 DEVMETHOD(device_attach, glc_attach),
84
85 { 0, 0 }
86};
87
88static driver_t glc_driver = {
89 "glc",
90 glc_methods,
91 sizeof(struct glc_softc)
92};
93
94static devclass_t glc_devclass;
95
96DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0);
97
98static int
99glc_probe(device_t dev)
100{
101
102 if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
103 ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
104 return (ENXIO);
105
106 device_set_desc(dev, "Playstation 3 GELIC Network Controller");
107 return (BUS_PROBE_SPECIFIC);
108}
109
110static void
111glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
112{
113 if (error != 0)
114 return;
115
116 *(bus_addr_t *)xaddr = segs[0].ds_addr;
117}
118
119static int
120glc_attach(device_t dev)
121{
122 struct glc_softc *sc;
123 struct glc_txsoft *txs;
124 uint64_t mac64, val, junk;
125 int i, err;
126
127 sc = device_get_softc(dev);
128
129 sc->sc_bus = ps3bus_get_bus(dev);
130 sc->sc_dev = ps3bus_get_device(dev);
131 sc->sc_self = dev;
132
133 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
134 MTX_DEF);
135 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
136 sc->next_txdma_slot = 0;
137 sc->bsy_txdma_slots = 0;
138 sc->sc_next_rxdma_slot = 0;
139 sc->first_used_txdma_slot = -1;
140
141 /*
142 * Shut down existing tasks.
143 */
144
145 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
146 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
147
148 sc->sc_ifp = if_alloc(IFT_ETHER);
149 sc->sc_ifp->if_softc = sc;
150
151 /*
152 * Get MAC address and VLAN id
153 */
154
155 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
156 0, 0, 0, &mac64, &junk);
157 memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
158 sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
159 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
160 GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
161 if (err == 0)
162 sc->sc_tx_vlan = val;
163 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
164 GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
165 if (err == 0)
166 sc->sc_rx_vlan = val;
167
168 /*
169 * Set up interrupt handler
170 */
171 sc->sc_irqid = 0;
172 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
173 RF_ACTIVE);
174 if (sc->sc_irq == NULL) {
175 device_printf(dev, "Could not allocate IRQ!\n");
176 mtx_destroy(&sc->sc_mtx);
177 return (ENXIO);
178 }
179
180 bus_setup_intr(dev, sc->sc_irq,
181 INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY,
182 glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
183 sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
184 BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
185 lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
186 vtophys(sc->sc_hwirq_status), 0);
187 lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
188 GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
189 GELIC_INT_TX_CHAIN_END, 0);
190
191 /*
192 * Set up DMA.
193 */
194
195 err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
198 0, NULL,NULL, &sc->sc_dmadesc_tag);
199
200 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
201 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
202 &sc->sc_txdmadesc_map);
203 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
204 sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
205 &sc->sc_txdmadesc_phys, 0);
206 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
207 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
208 &sc->sc_rxdmadesc_map);
209 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
210 sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
211 &sc->sc_rxdmadesc_phys, 0);
212
213 err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
214 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
215 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
216 &sc->sc_rxdma_tag);
217 err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
218 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
219 BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
220 &sc->sc_txdma_tag);
221
222 /* init transmit descriptors */
223 STAILQ_INIT(&sc->sc_txfreeq);
224 STAILQ_INIT(&sc->sc_txdirtyq);
225
226 /* create TX DMA maps */
227 err = ENOMEM;
228 for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
229 txs = &sc->sc_txsoft[i];
230 txs->txs_mbuf = NULL;
231 err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
232 if (err) {
233 device_printf(dev,
234 "unable to create TX DMA map %d, error = %d\n",
235 i, err);
236 }
237 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
238 }
239
240 /* Create the receive buffer DMA maps. */
241 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
242 err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
243 &sc->sc_rxsoft[i].rxs_dmamap);
244 if (err) {
245 device_printf(dev,
246 "unable to create RX DMA map %d, error = %d\n",
247 i, err);
248 }
249 sc->sc_rxsoft[i].rxs_mbuf = NULL;
250 }
251
252 /*
253 * Attach to network stack
254 */
255
256 if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
257 sc->sc_ifp->if_mtu = ETHERMTU;
258 sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
260 sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
261 sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
262 sc->sc_ifp->if_start = glc_start;
263 sc->sc_ifp->if_ioctl = glc_ioctl;
264 sc->sc_ifp->if_init = glc_init;
265
266 ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
267 glc_media_status);
268 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
269 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
270 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
271 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
272 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
273 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
274 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
275
276 IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
277 sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
278 IFQ_SET_READY(&sc->sc_ifp->if_snd);
279
280 ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
281 sc->sc_ifp->if_hwassist = 0;
282
283 return (0);
284
285 mtx_destroy(&sc->sc_mtx);
286 if_free(sc->sc_ifp);
287 return (ENXIO);
288}
289
290static void
291glc_init_locked(struct glc_softc *sc)
292{
293 int i, error;
294 struct glc_rxsoft *rxs;
295 struct glc_txsoft *txs;
296
297 mtx_assert(&sc->sc_mtx, MA_OWNED);
298
299 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
300 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
301
302 glc_set_multicast(sc);
303
304 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
305 rxs = &sc->sc_rxsoft[i];
306 rxs->rxs_desc_slot = i;
307
308 if (rxs->rxs_mbuf == NULL) {
309 glc_add_rxbuf(sc, i);
310
311 if (rxs->rxs_mbuf == NULL) {
312 rxs->rxs_desc_slot = -1;
313 break;
314 }
315 }
316
317 glc_add_rxbuf_dma(sc, i);
318 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
319 BUS_DMASYNC_PREREAD);
320 }
321
322 /* Clear TX dirty queue */
323 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
324 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
325 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
326
327 if (txs->txs_mbuf != NULL) {
328 m_freem(txs->txs_mbuf);
329 txs->txs_mbuf = NULL;
330 }
331
332 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
333 }
334 sc->first_used_txdma_slot = -1;
335 sc->bsy_txdma_slots = 0;
336
337 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
338 sc->sc_rxsoft[0].rxs_desc, 0);
339 if (error != 0)
340 device_printf(sc->sc_self,
341 "lv1_net_start_rx_dma error: %d\n", error);
342
343 sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
344 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
345 sc->sc_ifpflags = sc->sc_ifp->if_flags;
346
347 sc->sc_wdog_timer = 0;
348 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
349}
350
351static void
352glc_stop(void *xsc)
353{
354 struct glc_softc *sc = xsc;
355
356 mtx_assert(&sc->sc_mtx, MA_OWNED);
357
358 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
359 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
360}
361
362static void
363glc_init(void *xsc)
364{
365 struct glc_softc *sc = xsc;
366
367 mtx_lock(&sc->sc_mtx);
368 glc_init_locked(sc);
369 mtx_unlock(&sc->sc_mtx);
370}
371
372static void
373glc_tick(void *xsc)
374{
375 struct glc_softc *sc = xsc;
376
377 mtx_assert(&sc->sc_mtx, MA_OWNED);
378
379 /*
380 * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
381 * we figure out why. This will fail harmlessly if the RX queue is
382 * already running.
383 */
384 lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
385 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
386
387 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
388 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
389 return;
390 }
391
392 /* Problems */
393 device_printf(sc->sc_self, "device timeout\n");
394
395 glc_init_locked(sc);
396}
397
398static void
399glc_start_locked(struct ifnet *ifp)
400{
401 struct glc_softc *sc = ifp->if_softc;
402 bus_addr_t first, pktdesc;
403 int kickstart = 0;
404 int error;
405 struct mbuf *mb_head;
406
407 mtx_assert(&sc->sc_mtx, MA_OWNED);
408 first = 0;
409
410 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
411 IFF_DRV_RUNNING)
412 return;
413
414 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
415 kickstart = 1;
416
417 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
418 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
419
420 if (mb_head == NULL)
421 break;
422
423 /* Check if the ring buffer is full */
424 if (sc->bsy_txdma_slots > 125) {
425 /* Put the packet back and stop */
426 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
427 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
428 break;
429 }
430
431 BPF_MTAP(ifp, mb_head);
432
433 if (sc->sc_tx_vlan >= 0)
434 mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
435
436 if (glc_encap(sc, &mb_head, &pktdesc)) {
437 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
438 break;
439 }
440
441 if (first == 0)
442 first = pktdesc;
443 }
444
445 if (kickstart && first != 0) {
446 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
447 if (error != 0)
448 device_printf(sc->sc_self,
449 "lv1_net_start_tx_dma error: %d\n", error);
450 sc->sc_wdog_timer = 5;
451 }
452}
453
454static void
455glc_start(struct ifnet *ifp)
456{
457 struct glc_softc *sc = ifp->if_softc;
458
459 mtx_lock(&sc->sc_mtx);
460 glc_start_locked(ifp);
461 mtx_unlock(&sc->sc_mtx);
462}
463
464static int
465glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
466{
467 struct glc_softc *sc = ifp->if_softc;
468 struct ifreq *ifr = (struct ifreq *)data;
469 int err = 0;
470
471 switch (cmd) {
472 case SIOCSIFFLAGS:
473 mtx_lock(&sc->sc_mtx);
474 if ((ifp->if_flags & IFF_UP) != 0) {
475 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
476 ((ifp->if_flags ^ sc->sc_ifpflags) &
477 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
478 glc_set_multicast(sc);
479 else
480 glc_init_locked(sc);
481 }
482 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
483 glc_stop(sc);
484 sc->sc_ifpflags = ifp->if_flags;
485 mtx_unlock(&sc->sc_mtx);
486 break;
487 case SIOCADDMULTI:
488 case SIOCDELMULTI:
489 mtx_lock(&sc->sc_mtx);
490 glc_set_multicast(sc);
491 mtx_unlock(&sc->sc_mtx);
492 break;
493 case SIOCGIFMEDIA:
494 case SIOCSIFMEDIA:
495 err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
496 break;
497 default:
498 err = ether_ioctl(ifp, cmd, data);
499 break;
500 }
501
502 return (err);
503}
504
505static void
506glc_set_multicast(struct glc_softc *sc)
507{
508 struct ifnet *ifp = sc->sc_ifp;
509 struct ifmultiaddr *inm;
510 uint64_t addr;
511 int naddrs;
512
513 /* Clear multicast filter */
514 lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
515
516 /* Add broadcast */
517 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
518 0xffffffffffffL, 0);
519
520 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
521 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
522 } else {
523 if_maddr_rlock(ifp);
524 naddrs = 1; /* Include broadcast */
525 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
526 if (inm->ifma_addr->sa_family != AF_LINK)
527 continue;
528 addr = 0;
529 memcpy(&((uint8_t *)(&addr))[2],
530 LLADDR((struct sockaddr_dl *)inm->ifma_addr),
531 ETHER_ADDR_LEN);
532
533 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
534 addr, 0);
535
536 /*
537 * Filter can only hold 32 addresses, so fall back to
538 * the IFF_ALLMULTI case if we have too many.
539 */
540 if (++naddrs >= 32) {
541 lv1_net_add_multicast_address(sc->sc_bus,
542 sc->sc_dev, 0, 1);
543 break;
544 }
545 }
546 if_maddr_runlock(ifp);
547 }
548}
549
550static int
551glc_add_rxbuf(struct glc_softc *sc, int idx)
552{
553 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
554 struct mbuf *m;
555 bus_dma_segment_t segs[1];
556 int error, nsegs;
557
558 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
559 if (m == NULL)
560 return (ENOBUFS);
561 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
562
563 if (rxs->rxs_mbuf != NULL) {
564 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
565 BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
567 }
568
569 error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
570 segs, &nsegs, BUS_DMA_NOWAIT);
571 if (error != 0) {
572 device_printf(sc->sc_self,
573 "cannot load RS DMA map %d, error = %d\n", idx, error);
574 m_freem(m);
575 return (error);
576 }
577 /* If nsegs is wrong then the stack is corrupt. */
578 KASSERT(nsegs == 1,
579 ("%s: too many DMA segments (%d)", __func__, nsegs));
580 rxs->rxs_mbuf = m;
581 rxs->segment = segs[0];
582
583 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
584
585 return (0);
586}
587
588static int
589glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
590{
591 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
592
593 bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
594 sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
595 sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
596 sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
597 ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
598 sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
599
600 rxs->rxs_desc_slot = idx;
601 rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
602
603 return (0);
604}
605
606static int
607glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
608{
609 bus_dma_segment_t segs[16];
610 struct glc_txsoft *txs;
611 struct mbuf *m;
612 bus_addr_t firstslotphys;
613 int i, idx, nsegs, nsegs_max;
614 int err = 0;
615
616 /* Max number of segments is the number of free DMA slots */
617 nsegs_max = 128 - sc->bsy_txdma_slots;
618
619 if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
620 nsegs_max = 16;
621
622 /* Get a work queue entry. */
623 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
624 /* Ran out of descriptors. */
625 return (ENOBUFS);
626 }
627
628 nsegs = 0;
629 for (m = *m_head; m != NULL; m = m->m_next)
630 nsegs++;
631
632 if (nsegs > nsegs_max) {
633 m = m_collapse(*m_head, M_DONTWAIT, nsegs_max);
634 if (m == NULL) {
635 m_freem(*m_head);
636 *m_head = NULL;
637 return (ENOBUFS);
638 }
639 *m_head = m;
640 }
641
642 err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
643 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
644 if (err != 0) {
645 m_freem(*m_head);
646 *m_head = NULL;
647 return (err);
648 }
649
650 KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
651 ("GLC: Mapped too many (%d) DMA segments with %d available",
652 nsegs, 128 - sc->bsy_txdma_slots));
653
654 if (nsegs == 0) {
655 m_freem(*m_head);
656 *m_head = NULL;
657 return (EIO);
658 }
659
660 txs->txs_ndescs = nsegs;
661 txs->txs_firstdesc = sc->next_txdma_slot;
662
663 idx = txs->txs_firstdesc;
664 firstslotphys = sc->sc_txdmadesc_phys +
665 txs->txs_firstdesc*sizeof(struct glc_dmadesc);
666
667 for (i = 0; i < nsegs; i++) {
668 bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
669 sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
670 sc->sc_txdmadesc[idx].len = segs[i].ds_len;
671 sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
672 ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
673 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
674
675 if (i+1 == nsegs) {
676 txs->txs_lastdesc = idx;
677 sc->sc_txdmadesc[idx].next = 0;
678 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
679 }
680
681 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
682 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
683 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
684 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
685 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
686
687 idx = (idx + 1) % GLC_MAX_TX_PACKETS;
688 }
689 sc->next_txdma_slot = idx;
690 sc->bsy_txdma_slots += nsegs;
691 if (txs->txs_firstdesc != 0)
692 idx = txs->txs_firstdesc - 1;
693 else
694 idx = GLC_MAX_TX_PACKETS - 1;
695
696 if (sc->first_used_txdma_slot < 0)
697 sc->first_used_txdma_slot = txs->txs_firstdesc;
698
699 bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
700 BUS_DMASYNC_PREWRITE);
701 sc->sc_txdmadesc[idx].next = firstslotphys;
702
703 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
704 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
705 txs->txs_mbuf = *m_head;
706 *pktdesc = firstslotphys;
707
708 return (0);
709}
710
711static void
712glc_rxintr(struct glc_softc *sc)
713{
714 int i, restart_rxdma, error;
715 struct mbuf *m;
716 struct ifnet *ifp = sc->sc_ifp;
717
718 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
710 BUS_DMASYNC_PREWRITE);
719 BUS_DMASYNC_POSTREAD);
720
721 restart_rxdma = 0;
722 while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
723 GELIC_DESCR_OWNED) == 0) {
724 i = sc->sc_next_rxdma_slot;
725 sc->sc_next_rxdma_slot++;
726 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
727 sc->sc_next_rxdma_slot = 0;
728
729 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
730 restart_rxdma = 1;
731
732 if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
733 ifp->if_ierrors++;
734 goto requeue;
735 }
736
737 m = sc->sc_rxsoft[i].rxs_mbuf;
738 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
739 m->m_pkthdr.csum_flags |=
740 CSUM_IP_CHECKED | CSUM_IP_VALID;
741 }
742 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
743 m->m_pkthdr.csum_flags |=
744 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
745 m->m_pkthdr.csum_data = 0xffff;
746 }
747
748 if (glc_add_rxbuf(sc, i)) {
749 ifp->if_ierrors++;
750 goto requeue;
751 }
752
753 ifp->if_ipackets++;
754 m->m_pkthdr.rcvif = ifp;
755 m->m_len = sc->sc_rxdmadesc[i].valid_size;
756 m->m_pkthdr.len = m->m_len;
741 sc->sc_next_rxdma_slot++;
742 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
743 sc->sc_next_rxdma_slot = 0;
757
758 if (sc->sc_rx_vlan >= 0)
759 m_adj(m, 2);
760
761 mtx_unlock(&sc->sc_mtx);
762 (*ifp->if_input)(ifp, m);
763 mtx_lock(&sc->sc_mtx);
764
765 requeue:
753 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
754 restart_rxdma = 1;
766 glc_add_rxbuf_dma(sc, i);
756 if (restart_rxdma) {
757 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
758 sc->sc_rxsoft[i].rxs_desc, 0);
759 if (error != 0)
760 device_printf(sc->sc_self,
761 "lv1_net_start_rx_dma error: %d\n", error);
762 }
767 }
768
769 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
770 BUS_DMASYNC_PREWRITE);
771
772 if (restart_rxdma) {
773 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
774 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
775 if (error != 0)
776 device_printf(sc->sc_self,
777 "lv1_net_start_rx_dma error: %d\n", error);
778 }
779}
780
781static void
782glc_txintr(struct glc_softc *sc)
783{
784 struct ifnet *ifp = sc->sc_ifp;
785 struct glc_txsoft *txs;
786 int progress = 0, kickstart = 0, error;
787
788 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
789 BUS_DMASYNC_POSTREAD);
790
791 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
792 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
793 & GELIC_DESCR_OWNED)
794 break;
795
796 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
797 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
798 sc->bsy_txdma_slots -= txs->txs_ndescs;
799
800 if (txs->txs_mbuf != NULL) {
801 m_freem(txs->txs_mbuf);
802 txs->txs_mbuf = NULL;
803 }
804
805 if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
806 != 0) {
807 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
808 kickstart = 1;
809 ifp->if_oerrors++;
810 }
811
812 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
813 GELIC_CMDSTAT_CHAIN_END)
814 kickstart = 1;
815
816 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
817 ifp->if_opackets++;
818 progress = 1;
819 }
820
821 if (txs != NULL)
822 sc->first_used_txdma_slot = txs->txs_firstdesc;
823 else
824 sc->first_used_txdma_slot = -1;
825
808 if (kickstart && txs != NULL) {
826 if (kickstart || txs != NULL) {
827 /* Speculatively (or necessarily) start the TX queue again */
828 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
829 sc->sc_txdmadesc_phys +
830 txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0);
831 if (error != 0)
832 device_printf(sc->sc_self,
833 "lv1_net_start_tx_dma error: %d\n", error);
834 }
835
836 if (progress) {
837 /*
838 * We freed some descriptors, so reset IFF_DRV_OACTIVE
839 * and restart.
840 */
841 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
842 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
843
844 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
845 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
846 glc_start_locked(ifp);
847 }
848}
849
850static int
851glc_intr_filter(void *xsc)
852{
853 struct glc_softc *sc = xsc;
854
855 powerpc_sync();
856 atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
857 return (FILTER_SCHEDULE_THREAD);
858}
859
860static void
861glc_intr(void *xsc)
862{
863 struct glc_softc *sc = xsc;
864 uint64_t status, linkstat, junk;
865
866 mtx_lock(&sc->sc_mtx);
867
868 status = atomic_readandclear_64(&sc->sc_interrupt_status);
869
870 if (status == 0) {
871 mtx_unlock(&sc->sc_mtx);
872 return;
873 }
874
875 if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
876 glc_rxintr(sc);
877
878 if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
879 glc_txintr(sc);
880
881 if (status & GELIC_INT_PHY) {
882 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
883 GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
884
885 linkstat = (linkstat & GELIC_LINK_UP) ?
886 LINK_STATE_UP : LINK_STATE_DOWN;
887 if (linkstat != sc->sc_ifp->if_link_state)
888 if_link_state_change(sc->sc_ifp, linkstat);
889 }
890
891 mtx_unlock(&sc->sc_mtx);
892}
893
894static void
895glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
896{
897 struct glc_softc *sc = ifp->if_softc;
898 uint64_t status, junk;
899
900 ifmr->ifm_status = IFM_AVALID;
901 ifmr->ifm_active = IFM_ETHER;
902
903 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
904 GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
905
906 if (status & GELIC_LINK_UP)
907 ifmr->ifm_status |= IFM_ACTIVE;
908
909 if (status & GELIC_SPEED_10)
910 ifmr->ifm_active |= IFM_10_T;
911 else if (status & GELIC_SPEED_100)
912 ifmr->ifm_active |= IFM_100_TX;
913 else if (status & GELIC_SPEED_1000)
914 ifmr->ifm_active |= IFM_1000_T;
915
916 if (status & GELIC_FULL_DUPLEX)
917 ifmr->ifm_active |= IFM_FDX;
918 else
919 ifmr->ifm_active |= IFM_HDX;
920}
921
922static int
923glc_media_change(struct ifnet *ifp)
924{
925 struct glc_softc *sc = ifp->if_softc;
926 uint64_t mode, junk;
927 int result;
928
929 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
930 return (EINVAL);
931
932 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
933 case IFM_AUTO:
934 mode = GELIC_AUTO_NEG;
935 break;
936 case IFM_10_T:
937 mode = GELIC_SPEED_10;
938 break;
939 case IFM_100_TX:
940 mode = GELIC_SPEED_100;
941 break;
942 case IFM_1000_T:
943 mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
944 break;
945 default:
946 return (EINVAL);
947 }
948
949 if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
950 mode |= GELIC_FULL_DUPLEX;
951
952 result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
953 GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
954
955 return (result ? EIO : 0);
956}
957