Deleted Added
full compact
if_glc.c (217044) if_glc.c (223324)
1/*-
2 * Copyright (C) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
1/*-
2 * Copyright (C) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
21 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
22 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
23 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/powerpc/ps3/if_glc.c 217044 2011-01-06 04:12:29Z nwhitehorn $
25 * $FreeBSD: head/sys/powerpc/ps3/if_glc.c 223324 2011-06-20 02:17:34Z nwhitehorn $
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/sockio.h>
31#include <sys/endian.h>
32#include <sys/mbuf.h>
33#include <sys/module.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/socket.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <net/bpf.h>
42#include <net/if.h>
43#include <net/if_arp.h>
44#include <net/ethernet.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47#include <net/if_types.h>
48#include <net/if_vlan_var.h>
49
50#include <machine/pio.h>
51#include <machine/bus.h>
52#include <machine/platform.h>
53#include <machine/pmap.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include "ps3bus.h"
59#include "ps3-hvcall.h"
60#include "if_glcreg.h"
61
62static int glc_probe(device_t);
63static int glc_attach(device_t);
64static void glc_init(void *xsc);
65static void glc_start(struct ifnet *ifp);
66static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
67static void glc_set_multicast(struct glc_softc *sc);
68static int glc_add_rxbuf(struct glc_softc *sc, int idx);
69static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70static int glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 bus_addr_t *pktdesc);
72static int glc_intr_filter(void *xsc);
73static void glc_intr(void *xsc);
74static void glc_tick(void *xsc);
75static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
76static int glc_media_change(struct ifnet *ifp);
77
78static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
79
80static device_method_t glc_methods[] = {
81 /* Device interface */
82 DEVMETHOD(device_probe, glc_probe),
83 DEVMETHOD(device_attach, glc_attach),
84
85 { 0, 0 }
86};
87
88static driver_t glc_driver = {
89 "glc",
90 glc_methods,
91 sizeof(struct glc_softc)
92};
93
94static devclass_t glc_devclass;
95
96DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0);
97
98static int
99glc_probe(device_t dev)
100{
101
102 if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
103 ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
104 return (ENXIO);
105
106 device_set_desc(dev, "Playstation 3 GELIC Network Controller");
107 return (BUS_PROBE_SPECIFIC);
108}
109
110static void
111glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
112{
113 if (error != 0)
114 return;
115
116 *(bus_addr_t *)xaddr = segs[0].ds_addr;
117}
118
119static int
120glc_attach(device_t dev)
121{
122 struct glc_softc *sc;
123 struct glc_txsoft *txs;
124 uint64_t mac64, val, junk;
125 int i, err;
126
127 sc = device_get_softc(dev);
128
129 sc->sc_bus = ps3bus_get_bus(dev);
130 sc->sc_dev = ps3bus_get_device(dev);
131 sc->sc_self = dev;
132
133 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
134 MTX_DEF);
135 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
136 sc->next_txdma_slot = 0;
137 sc->bsy_txdma_slots = 0;
26 */
27
28#include <sys/param.h>
29#include <sys/systm.h>
30#include <sys/sockio.h>
31#include <sys/endian.h>
32#include <sys/mbuf.h>
33#include <sys/module.h>
34#include <sys/malloc.h>
35#include <sys/kernel.h>
36#include <sys/socket.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <net/bpf.h>
42#include <net/if.h>
43#include <net/if_arp.h>
44#include <net/ethernet.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47#include <net/if_types.h>
48#include <net/if_vlan_var.h>
49
50#include <machine/pio.h>
51#include <machine/bus.h>
52#include <machine/platform.h>
53#include <machine/pmap.h>
54#include <machine/resource.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include "ps3bus.h"
59#include "ps3-hvcall.h"
60#include "if_glcreg.h"
61
62static int glc_probe(device_t);
63static int glc_attach(device_t);
64static void glc_init(void *xsc);
65static void glc_start(struct ifnet *ifp);
66static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
67static void glc_set_multicast(struct glc_softc *sc);
68static int glc_add_rxbuf(struct glc_softc *sc, int idx);
69static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx);
70static int glc_encap(struct glc_softc *sc, struct mbuf **m_head,
71 bus_addr_t *pktdesc);
72static int glc_intr_filter(void *xsc);
73static void glc_intr(void *xsc);
74static void glc_tick(void *xsc);
75static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
76static int glc_media_change(struct ifnet *ifp);
77
78static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet");
79
80static device_method_t glc_methods[] = {
81 /* Device interface */
82 DEVMETHOD(device_probe, glc_probe),
83 DEVMETHOD(device_attach, glc_attach),
84
85 { 0, 0 }
86};
87
88static driver_t glc_driver = {
89 "glc",
90 glc_methods,
91 sizeof(struct glc_softc)
92};
93
94static devclass_t glc_devclass;
95
96DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0);
97
98static int
99glc_probe(device_t dev)
100{
101
102 if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS ||
103 ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC)
104 return (ENXIO);
105
106 device_set_desc(dev, "Playstation 3 GELIC Network Controller");
107 return (BUS_PROBE_SPECIFIC);
108}
109
110static void
111glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error)
112{
113 if (error != 0)
114 return;
115
116 *(bus_addr_t *)xaddr = segs[0].ds_addr;
117}
118
119static int
120glc_attach(device_t dev)
121{
122 struct glc_softc *sc;
123 struct glc_txsoft *txs;
124 uint64_t mac64, val, junk;
125 int i, err;
126
127 sc = device_get_softc(dev);
128
129 sc->sc_bus = ps3bus_get_bus(dev);
130 sc->sc_dev = ps3bus_get_device(dev);
131 sc->sc_self = dev;
132
133 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
134 MTX_DEF);
135 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
136 sc->next_txdma_slot = 0;
137 sc->bsy_txdma_slots = 0;
138 sc->sc_next_rxdma_slot = 0;
138 sc->first_used_txdma_slot = -1;
139
140 /*
141 * Shut down existing tasks.
142 */
143
144 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
145 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
146
147 sc->sc_ifp = if_alloc(IFT_ETHER);
148 sc->sc_ifp->if_softc = sc;
149
150 /*
151 * Get MAC address and VLAN id
152 */
153
154 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
155 0, 0, 0, &mac64, &junk);
156 memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
157 sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
158 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
159 GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
160 if (err == 0)
161 sc->sc_tx_vlan = val;
162 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
163 GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
164 if (err == 0)
165 sc->sc_rx_vlan = val;
166
167 /*
168 * Set up interrupt handler
169 */
170 sc->sc_irqid = 0;
171 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
172 RF_ACTIVE);
173 if (sc->sc_irq == NULL) {
174 device_printf(dev, "Could not allocate IRQ!\n");
175 mtx_destroy(&sc->sc_mtx);
176 return (ENXIO);
177 }
178
179 bus_setup_intr(dev, sc->sc_irq,
180 INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY,
181 glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
182 sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
183 BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
184 lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
185 vtophys(sc->sc_hwirq_status), 0);
186 lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
187 GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
188 GELIC_INT_TX_CHAIN_END, 0);
189
190 /*
191 * Set up DMA.
192 */
193
194 err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
195 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
196 129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
197 0, NULL,NULL, &sc->sc_dmadesc_tag);
198
199 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
200 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
201 &sc->sc_txdmadesc_map);
202 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
203 sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
204 &sc->sc_txdmadesc_phys, 0);
205 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
206 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
207 &sc->sc_rxdmadesc_map);
208 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
209 sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
210 &sc->sc_rxdmadesc_phys, 0);
211
212 err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
213 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
214 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
215 &sc->sc_rxdma_tag);
216 err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
217 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
218 BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
219 &sc->sc_txdma_tag);
220
221 /* init transmit descriptors */
222 STAILQ_INIT(&sc->sc_txfreeq);
223 STAILQ_INIT(&sc->sc_txdirtyq);
224
225 /* create TX DMA maps */
226 err = ENOMEM;
227 for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
228 txs = &sc->sc_txsoft[i];
229 txs->txs_mbuf = NULL;
230 err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
231 if (err) {
232 device_printf(dev,
233 "unable to create TX DMA map %d, error = %d\n",
234 i, err);
235 }
236 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
237 }
238
239 /* Create the receive buffer DMA maps. */
240 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
241 err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
242 &sc->sc_rxsoft[i].rxs_dmamap);
243 if (err) {
244 device_printf(dev,
245 "unable to create RX DMA map %d, error = %d\n",
246 i, err);
247 }
248 sc->sc_rxsoft[i].rxs_mbuf = NULL;
249 }
250
251 /*
252 * Attach to network stack
253 */
254
255 if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
256 sc->sc_ifp->if_mtu = ETHERMTU;
257 sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
258 sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
259 sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
260 sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
261 sc->sc_ifp->if_start = glc_start;
262 sc->sc_ifp->if_ioctl = glc_ioctl;
263 sc->sc_ifp->if_init = glc_init;
264
265 ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
266 glc_media_status);
267 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
268 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
269 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
270 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
271 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
272 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
273 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
274
275 IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
276 sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
277 IFQ_SET_READY(&sc->sc_ifp->if_snd);
278
279 ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
280 sc->sc_ifp->if_hwassist = 0;
281
282 return (0);
283
284 mtx_destroy(&sc->sc_mtx);
285 if_free(sc->sc_ifp);
286 return (ENXIO);
287}
288
289static void
290glc_init_locked(struct glc_softc *sc)
291{
292 int i, error;
293 struct glc_rxsoft *rxs;
294 struct glc_txsoft *txs;
295
296 mtx_assert(&sc->sc_mtx, MA_OWNED);
297
298 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
299 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
300
301 glc_set_multicast(sc);
302
303 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
304 rxs = &sc->sc_rxsoft[i];
305 rxs->rxs_desc_slot = i;
306
307 if (rxs->rxs_mbuf == NULL) {
308 glc_add_rxbuf(sc, i);
309
310 if (rxs->rxs_mbuf == NULL) {
311 rxs->rxs_desc_slot = -1;
312 break;
313 }
314 }
315
316 glc_add_rxbuf_dma(sc, i);
317 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
318 BUS_DMASYNC_PREREAD);
319 }
320
321 /* Clear TX dirty queue */
322 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
323 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
324 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
325
326 if (txs->txs_mbuf != NULL) {
327 m_freem(txs->txs_mbuf);
328 txs->txs_mbuf = NULL;
329 }
330
331 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
332 }
333 sc->first_used_txdma_slot = -1;
334 sc->bsy_txdma_slots = 0;
335
336 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
337 sc->sc_rxsoft[0].rxs_desc, 0);
338 if (error != 0)
339 device_printf(sc->sc_self,
340 "lv1_net_start_rx_dma error: %d\n", error);
341
342 sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
343 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
344 sc->sc_ifpflags = sc->sc_ifp->if_flags;
345
346 sc->sc_wdog_timer = 0;
347 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
348}
349
350static void
351glc_stop(void *xsc)
352{
353 struct glc_softc *sc = xsc;
354
355 mtx_assert(&sc->sc_mtx, MA_OWNED);
356
357 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
358 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
359}
360
361static void
362glc_init(void *xsc)
363{
364 struct glc_softc *sc = xsc;
365
366 mtx_lock(&sc->sc_mtx);
367 glc_init_locked(sc);
368 mtx_unlock(&sc->sc_mtx);
369}
370
371static void
372glc_tick(void *xsc)
373{
374 struct glc_softc *sc = xsc;
375
376 mtx_assert(&sc->sc_mtx, MA_OWNED);
377
139 sc->first_used_txdma_slot = -1;
140
141 /*
142 * Shut down existing tasks.
143 */
144
145 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
146 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
147
148 sc->sc_ifp = if_alloc(IFT_ETHER);
149 sc->sc_ifp->if_softc = sc;
150
151 /*
152 * Get MAC address and VLAN id
153 */
154
155 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS,
156 0, 0, 0, &mac64, &junk);
157 memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr));
158 sc->sc_tx_vlan = sc->sc_rx_vlan = -1;
159 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
160 GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk);
161 if (err == 0)
162 sc->sc_tx_vlan = val;
163 err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID,
164 GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk);
165 if (err == 0)
166 sc->sc_rx_vlan = val;
167
168 /*
169 * Set up interrupt handler
170 */
171 sc->sc_irqid = 0;
172 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid,
173 RF_ACTIVE);
174 if (sc->sc_irq == NULL) {
175 device_printf(dev, "Could not allocate IRQ!\n");
176 mtx_destroy(&sc->sc_mtx);
177 return (ENXIO);
178 }
179
180 bus_setup_intr(dev, sc->sc_irq,
181 INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY,
182 glc_intr_filter, glc_intr, sc, &sc->sc_irqctx);
183 sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0,
184 BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE);
185 lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev,
186 vtophys(sc->sc_hwirq_status), 0);
187 lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev,
188 GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY |
189 GELIC_INT_TX_CHAIN_END, 0);
190
191 /*
192 * Set up DMA.
193 */
194
195 err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0,
196 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197 129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc),
198 0, NULL,NULL, &sc->sc_dmadesc_tag);
199
200 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc,
201 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
202 &sc->sc_txdmadesc_map);
203 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
204 sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
205 &sc->sc_txdmadesc_phys, 0);
206 err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc,
207 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
208 &sc->sc_rxdmadesc_map);
209 err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
210 sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys,
211 &sc->sc_rxdmadesc_phys, 0);
212
213 err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0,
214 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
215 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
216 &sc->sc_rxdma_tag);
217 err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
218 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
219 BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL,
220 &sc->sc_txdma_tag);
221
222 /* init transmit descriptors */
223 STAILQ_INIT(&sc->sc_txfreeq);
224 STAILQ_INIT(&sc->sc_txdirtyq);
225
226 /* create TX DMA maps */
227 err = ENOMEM;
228 for (i = 0; i < GLC_MAX_TX_PACKETS; i++) {
229 txs = &sc->sc_txsoft[i];
230 txs->txs_mbuf = NULL;
231 err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap);
232 if (err) {
233 device_printf(dev,
234 "unable to create TX DMA map %d, error = %d\n",
235 i, err);
236 }
237 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
238 }
239
240 /* Create the receive buffer DMA maps. */
241 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
242 err = bus_dmamap_create(sc->sc_rxdma_tag, 0,
243 &sc->sc_rxsoft[i].rxs_dmamap);
244 if (err) {
245 device_printf(dev,
246 "unable to create RX DMA map %d, error = %d\n",
247 i, err);
248 }
249 sc->sc_rxsoft[i].rxs_mbuf = NULL;
250 }
251
252 /*
253 * Attach to network stack
254 */
255
256 if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev));
257 sc->sc_ifp->if_mtu = ETHERMTU;
258 sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
260 sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM;
261 sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM;
262 sc->sc_ifp->if_start = glc_start;
263 sc->sc_ifp->if_ioctl = glc_ioctl;
264 sc->sc_ifp->if_init = glc_init;
265
266 ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change,
267 glc_media_status);
268 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
269 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
270 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
271 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
272 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
273 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
274 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
275
276 IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS);
277 sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS;
278 IFQ_SET_READY(&sc->sc_ifp->if_snd);
279
280 ether_ifattach(sc->sc_ifp, sc->sc_enaddr);
281 sc->sc_ifp->if_hwassist = 0;
282
283 return (0);
284
285 mtx_destroy(&sc->sc_mtx);
286 if_free(sc->sc_ifp);
287 return (ENXIO);
288}
289
290static void
291glc_init_locked(struct glc_softc *sc)
292{
293 int i, error;
294 struct glc_rxsoft *rxs;
295 struct glc_txsoft *txs;
296
297 mtx_assert(&sc->sc_mtx, MA_OWNED);
298
299 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
300 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
301
302 glc_set_multicast(sc);
303
304 for (i = 0; i < GLC_MAX_RX_PACKETS; i++) {
305 rxs = &sc->sc_rxsoft[i];
306 rxs->rxs_desc_slot = i;
307
308 if (rxs->rxs_mbuf == NULL) {
309 glc_add_rxbuf(sc, i);
310
311 if (rxs->rxs_mbuf == NULL) {
312 rxs->rxs_desc_slot = -1;
313 break;
314 }
315 }
316
317 glc_add_rxbuf_dma(sc, i);
318 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
319 BUS_DMASYNC_PREREAD);
320 }
321
322 /* Clear TX dirty queue */
323 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
324 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
325 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
326
327 if (txs->txs_mbuf != NULL) {
328 m_freem(txs->txs_mbuf);
329 txs->txs_mbuf = NULL;
330 }
331
332 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
333 }
334 sc->first_used_txdma_slot = -1;
335 sc->bsy_txdma_slots = 0;
336
337 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
338 sc->sc_rxsoft[0].rxs_desc, 0);
339 if (error != 0)
340 device_printf(sc->sc_self,
341 "lv1_net_start_rx_dma error: %d\n", error);
342
343 sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
344 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
345 sc->sc_ifpflags = sc->sc_ifp->if_flags;
346
347 sc->sc_wdog_timer = 0;
348 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
349}
350
351static void
352glc_stop(void *xsc)
353{
354 struct glc_softc *sc = xsc;
355
356 mtx_assert(&sc->sc_mtx, MA_OWNED);
357
358 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
359 lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0);
360}
361
362static void
363glc_init(void *xsc)
364{
365 struct glc_softc *sc = xsc;
366
367 mtx_lock(&sc->sc_mtx);
368 glc_init_locked(sc);
369 mtx_unlock(&sc->sc_mtx);
370}
371
372static void
373glc_tick(void *xsc)
374{
375 struct glc_softc *sc = xsc;
376
377 mtx_assert(&sc->sc_mtx, MA_OWNED);
378
379 /*
380 * XXX: Sometimes the RX queue gets stuck. Poke it periodically until
381 * we figure out why. This will fail harmlessly if the RX queue is
382 * already running.
383 */
384 lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
385 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
386
378 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
379 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
380 return;
381 }
382
383 /* Problems */
384 device_printf(sc->sc_self, "device timeout\n");
385
386 glc_init_locked(sc);
387}
388
389static void
390glc_start_locked(struct ifnet *ifp)
391{
392 struct glc_softc *sc = ifp->if_softc;
393 bus_addr_t first, pktdesc;
394 int kickstart = 0;
395 int error;
396 struct mbuf *mb_head;
397
398 mtx_assert(&sc->sc_mtx, MA_OWNED);
399 first = 0;
400
401 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
402 IFF_DRV_RUNNING)
403 return;
404
405 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
406 kickstart = 1;
407
408 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
409 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
410
411 if (mb_head == NULL)
412 break;
413
414 /* Check if the ring buffer is full */
415 if (sc->bsy_txdma_slots > 125) {
416 /* Put the packet back and stop */
417 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
418 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
419 break;
420 }
421
422 BPF_MTAP(ifp, mb_head);
423
424 if (sc->sc_tx_vlan >= 0)
425 mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
426
427 if (glc_encap(sc, &mb_head, &pktdesc)) {
428 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
429 break;
430 }
431
432 if (first == 0)
433 first = pktdesc;
434 }
435
436 if (kickstart && first != 0) {
437 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
438 if (error != 0)
439 device_printf(sc->sc_self,
440 "lv1_net_start_tx_dma error: %d\n", error);
441 sc->sc_wdog_timer = 5;
442 }
443}
444
445static void
446glc_start(struct ifnet *ifp)
447{
448 struct glc_softc *sc = ifp->if_softc;
449
450 mtx_lock(&sc->sc_mtx);
451 glc_start_locked(ifp);
452 mtx_unlock(&sc->sc_mtx);
453}
454
455static int
456glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
457{
458 struct glc_softc *sc = ifp->if_softc;
459 struct ifreq *ifr = (struct ifreq *)data;
460 int err = 0;
461
462 switch (cmd) {
463 case SIOCSIFFLAGS:
464 mtx_lock(&sc->sc_mtx);
465 if ((ifp->if_flags & IFF_UP) != 0) {
466 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
467 ((ifp->if_flags ^ sc->sc_ifpflags) &
468 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
469 glc_set_multicast(sc);
470 else
471 glc_init_locked(sc);
472 }
473 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
474 glc_stop(sc);
475 sc->sc_ifpflags = ifp->if_flags;
476 mtx_unlock(&sc->sc_mtx);
477 break;
478 case SIOCADDMULTI:
479 case SIOCDELMULTI:
480 mtx_lock(&sc->sc_mtx);
481 glc_set_multicast(sc);
482 mtx_unlock(&sc->sc_mtx);
483 break;
484 case SIOCGIFMEDIA:
485 case SIOCSIFMEDIA:
486 err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
487 break;
488 default:
489 err = ether_ioctl(ifp, cmd, data);
490 break;
491 }
492
493 return (err);
494}
495
496static void
497glc_set_multicast(struct glc_softc *sc)
498{
499 struct ifnet *ifp = sc->sc_ifp;
500 struct ifmultiaddr *inm;
501 uint64_t addr;
502 int naddrs;
503
504 /* Clear multicast filter */
505 lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
506
507 /* Add broadcast */
508 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
509 0xffffffffffffL, 0);
510
511 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
512 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
513 } else {
514 if_maddr_rlock(ifp);
515 naddrs = 1; /* Include broadcast */
516 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
517 if (inm->ifma_addr->sa_family != AF_LINK)
518 continue;
519 addr = 0;
520 memcpy(&((uint8_t *)(&addr))[2],
521 LLADDR((struct sockaddr_dl *)inm->ifma_addr),
522 ETHER_ADDR_LEN);
523
524 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
525 addr, 0);
526
527 /*
528 * Filter can only hold 32 addresses, so fall back to
529 * the IFF_ALLMULTI case if we have too many.
530 */
531 if (++naddrs >= 32) {
532 lv1_net_add_multicast_address(sc->sc_bus,
533 sc->sc_dev, 0, 1);
534 break;
535 }
536 }
537 if_maddr_runlock(ifp);
538 }
539}
540
541static int
542glc_add_rxbuf(struct glc_softc *sc, int idx)
543{
544 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
545 struct mbuf *m;
546 bus_dma_segment_t segs[1];
547 int error, nsegs;
548
549 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
550 if (m == NULL)
551 return (ENOBUFS);
552 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
553
554 if (rxs->rxs_mbuf != NULL) {
555 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
556 BUS_DMASYNC_POSTREAD);
557 bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
558 }
559
560 error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
561 segs, &nsegs, BUS_DMA_NOWAIT);
562 if (error != 0) {
563 device_printf(sc->sc_self,
564 "cannot load RS DMA map %d, error = %d\n", idx, error);
565 m_freem(m);
566 return (error);
567 }
568 /* If nsegs is wrong then the stack is corrupt. */
569 KASSERT(nsegs == 1,
570 ("%s: too many DMA segments (%d)", __func__, nsegs));
571 rxs->rxs_mbuf = m;
572 rxs->segment = segs[0];
573
574 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
575
576 return (0);
577}
578
579static int
580glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
581{
582 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
583
584 bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
585 sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
586 sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
587 sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
588 ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
589 sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
590
591 rxs->rxs_desc_slot = idx;
592 rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
593
594 return (0);
595}
596
597static int
598glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
599{
600 bus_dma_segment_t segs[16];
601 struct glc_txsoft *txs;
602 struct mbuf *m;
603 bus_addr_t firstslotphys;
604 int i, idx, nsegs, nsegs_max;
605 int err = 0;
606
607 /* Max number of segments is the number of free DMA slots */
608 nsegs_max = 128 - sc->bsy_txdma_slots;
609
610 if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
611 nsegs_max = 16;
612
613 /* Get a work queue entry. */
614 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
615 /* Ran out of descriptors. */
616 return (ENOBUFS);
617 }
618
619 nsegs = 0;
620 for (m = *m_head; m != NULL; m = m->m_next)
621 nsegs++;
622
623 if (nsegs > nsegs_max) {
624 m = m_collapse(*m_head, M_DONTWAIT, nsegs_max);
625 if (m == NULL) {
626 m_freem(*m_head);
627 *m_head = NULL;
628 return (ENOBUFS);
629 }
630 *m_head = m;
631 }
632
633 err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
634 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
635 if (err != 0) {
636 m_freem(*m_head);
637 *m_head = NULL;
638 return (err);
639 }
640
641 KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
642 ("GLC: Mapped too many (%d) DMA segments with %d available",
643 nsegs, 128 - sc->bsy_txdma_slots));
644
645 if (nsegs == 0) {
646 m_freem(*m_head);
647 *m_head = NULL;
648 return (EIO);
649 }
650
651 txs->txs_ndescs = nsegs;
652 txs->txs_firstdesc = sc->next_txdma_slot;
653
654 idx = txs->txs_firstdesc;
655 firstslotphys = sc->sc_txdmadesc_phys +
656 txs->txs_firstdesc*sizeof(struct glc_dmadesc);
657
658 for (i = 0; i < nsegs; i++) {
659 bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
660 sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
661 sc->sc_txdmadesc[idx].len = segs[i].ds_len;
662 sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
663 ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
664 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
665
666 if (i+1 == nsegs) {
667 txs->txs_lastdesc = idx;
668 sc->sc_txdmadesc[idx].next = 0;
669 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
670 }
671
672 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
673 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
674 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
675 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
676 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
677
678 idx = (idx + 1) % GLC_MAX_TX_PACKETS;
679 }
680 sc->next_txdma_slot = idx;
681 sc->bsy_txdma_slots += nsegs;
682 if (txs->txs_firstdesc != 0)
683 idx = txs->txs_firstdesc - 1;
684 else
685 idx = GLC_MAX_TX_PACKETS - 1;
686
687 if (sc->first_used_txdma_slot < 0)
688 sc->first_used_txdma_slot = txs->txs_firstdesc;
689
690 bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
691 BUS_DMASYNC_PREWRITE);
692 sc->sc_txdmadesc[idx].next = firstslotphys;
693
694 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
695 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
696 txs->txs_mbuf = *m_head;
697 *pktdesc = firstslotphys;
698
699 return (0);
700}
701
702static void
703glc_rxintr(struct glc_softc *sc)
704{
705 int i, restart_rxdma, error;
706 struct mbuf *m;
707 struct ifnet *ifp = sc->sc_ifp;
708
709 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
387 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
388 callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc);
389 return;
390 }
391
392 /* Problems */
393 device_printf(sc->sc_self, "device timeout\n");
394
395 glc_init_locked(sc);
396}
397
398static void
399glc_start_locked(struct ifnet *ifp)
400{
401 struct glc_softc *sc = ifp->if_softc;
402 bus_addr_t first, pktdesc;
403 int kickstart = 0;
404 int error;
405 struct mbuf *mb_head;
406
407 mtx_assert(&sc->sc_mtx, MA_OWNED);
408 first = 0;
409
410 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
411 IFF_DRV_RUNNING)
412 return;
413
414 if (STAILQ_EMPTY(&sc->sc_txdirtyq))
415 kickstart = 1;
416
417 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
418 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
419
420 if (mb_head == NULL)
421 break;
422
423 /* Check if the ring buffer is full */
424 if (sc->bsy_txdma_slots > 125) {
425 /* Put the packet back and stop */
426 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
427 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
428 break;
429 }
430
431 BPF_MTAP(ifp, mb_head);
432
433 if (sc->sc_tx_vlan >= 0)
434 mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan);
435
436 if (glc_encap(sc, &mb_head, &pktdesc)) {
437 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
438 break;
439 }
440
441 if (first == 0)
442 first = pktdesc;
443 }
444
445 if (kickstart && first != 0) {
446 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0);
447 if (error != 0)
448 device_printf(sc->sc_self,
449 "lv1_net_start_tx_dma error: %d\n", error);
450 sc->sc_wdog_timer = 5;
451 }
452}
453
454static void
455glc_start(struct ifnet *ifp)
456{
457 struct glc_softc *sc = ifp->if_softc;
458
459 mtx_lock(&sc->sc_mtx);
460 glc_start_locked(ifp);
461 mtx_unlock(&sc->sc_mtx);
462}
463
464static int
465glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
466{
467 struct glc_softc *sc = ifp->if_softc;
468 struct ifreq *ifr = (struct ifreq *)data;
469 int err = 0;
470
471 switch (cmd) {
472 case SIOCSIFFLAGS:
473 mtx_lock(&sc->sc_mtx);
474 if ((ifp->if_flags & IFF_UP) != 0) {
475 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
476 ((ifp->if_flags ^ sc->sc_ifpflags) &
477 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
478 glc_set_multicast(sc);
479 else
480 glc_init_locked(sc);
481 }
482 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
483 glc_stop(sc);
484 sc->sc_ifpflags = ifp->if_flags;
485 mtx_unlock(&sc->sc_mtx);
486 break;
487 case SIOCADDMULTI:
488 case SIOCDELMULTI:
489 mtx_lock(&sc->sc_mtx);
490 glc_set_multicast(sc);
491 mtx_unlock(&sc->sc_mtx);
492 break;
493 case SIOCGIFMEDIA:
494 case SIOCSIFMEDIA:
495 err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
496 break;
497 default:
498 err = ether_ioctl(ifp, cmd, data);
499 break;
500 }
501
502 return (err);
503}
504
505static void
506glc_set_multicast(struct glc_softc *sc)
507{
508 struct ifnet *ifp = sc->sc_ifp;
509 struct ifmultiaddr *inm;
510 uint64_t addr;
511 int naddrs;
512
513 /* Clear multicast filter */
514 lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
515
516 /* Add broadcast */
517 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
518 0xffffffffffffL, 0);
519
520 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
521 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1);
522 } else {
523 if_maddr_rlock(ifp);
524 naddrs = 1; /* Include broadcast */
525 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
526 if (inm->ifma_addr->sa_family != AF_LINK)
527 continue;
528 addr = 0;
529 memcpy(&((uint8_t *)(&addr))[2],
530 LLADDR((struct sockaddr_dl *)inm->ifma_addr),
531 ETHER_ADDR_LEN);
532
533 lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev,
534 addr, 0);
535
536 /*
537 * Filter can only hold 32 addresses, so fall back to
538 * the IFF_ALLMULTI case if we have too many.
539 */
540 if (++naddrs >= 32) {
541 lv1_net_add_multicast_address(sc->sc_bus,
542 sc->sc_dev, 0, 1);
543 break;
544 }
545 }
546 if_maddr_runlock(ifp);
547 }
548}
549
550static int
551glc_add_rxbuf(struct glc_softc *sc, int idx)
552{
553 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
554 struct mbuf *m;
555 bus_dma_segment_t segs[1];
556 int error, nsegs;
557
558 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
559 if (m == NULL)
560 return (ENOBUFS);
561 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
562
563 if (rxs->rxs_mbuf != NULL) {
564 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap,
565 BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap);
567 }
568
569 error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m,
570 segs, &nsegs, BUS_DMA_NOWAIT);
571 if (error != 0) {
572 device_printf(sc->sc_self,
573 "cannot load RS DMA map %d, error = %d\n", idx, error);
574 m_freem(m);
575 return (error);
576 }
577 /* If nsegs is wrong then the stack is corrupt. */
578 KASSERT(nsegs == 1,
579 ("%s: too many DMA segments (%d)", __func__, nsegs));
580 rxs->rxs_mbuf = m;
581 rxs->segment = segs[0];
582
583 bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
584
585 return (0);
586}
587
588static int
589glc_add_rxbuf_dma(struct glc_softc *sc, int idx)
590{
591 struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx];
592
593 bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx]));
594 sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr;
595 sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len;
596 sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys +
597 ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]);
598 sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED;
599
600 rxs->rxs_desc_slot = idx;
601 rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc);
602
603 return (0);
604}
605
606static int
607glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc)
608{
609 bus_dma_segment_t segs[16];
610 struct glc_txsoft *txs;
611 struct mbuf *m;
612 bus_addr_t firstslotphys;
613 int i, idx, nsegs, nsegs_max;
614 int err = 0;
615
616 /* Max number of segments is the number of free DMA slots */
617 nsegs_max = 128 - sc->bsy_txdma_slots;
618
619 if (nsegs_max > 16 || sc->first_used_txdma_slot < 0)
620 nsegs_max = 16;
621
622 /* Get a work queue entry. */
623 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
624 /* Ran out of descriptors. */
625 return (ENOBUFS);
626 }
627
628 nsegs = 0;
629 for (m = *m_head; m != NULL; m = m->m_next)
630 nsegs++;
631
632 if (nsegs > nsegs_max) {
633 m = m_collapse(*m_head, M_DONTWAIT, nsegs_max);
634 if (m == NULL) {
635 m_freem(*m_head);
636 *m_head = NULL;
637 return (ENOBUFS);
638 }
639 *m_head = m;
640 }
641
642 err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap,
643 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
644 if (err != 0) {
645 m_freem(*m_head);
646 *m_head = NULL;
647 return (err);
648 }
649
650 KASSERT(nsegs <= 128 - sc->bsy_txdma_slots,
651 ("GLC: Mapped too many (%d) DMA segments with %d available",
652 nsegs, 128 - sc->bsy_txdma_slots));
653
654 if (nsegs == 0) {
655 m_freem(*m_head);
656 *m_head = NULL;
657 return (EIO);
658 }
659
660 txs->txs_ndescs = nsegs;
661 txs->txs_firstdesc = sc->next_txdma_slot;
662
663 idx = txs->txs_firstdesc;
664 firstslotphys = sc->sc_txdmadesc_phys +
665 txs->txs_firstdesc*sizeof(struct glc_dmadesc);
666
667 for (i = 0; i < nsegs; i++) {
668 bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx]));
669 sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr;
670 sc->sc_txdmadesc[idx].len = segs[i].ds_len;
671 sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys +
672 ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc);
673 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC;
674
675 if (i+1 == nsegs) {
676 txs->txs_lastdesc = idx;
677 sc->sc_txdmadesc[idx].next = 0;
678 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST;
679 }
680
681 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
682 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP;
683 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
684 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP;
685 sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED;
686
687 idx = (idx + 1) % GLC_MAX_TX_PACKETS;
688 }
689 sc->next_txdma_slot = idx;
690 sc->bsy_txdma_slots += nsegs;
691 if (txs->txs_firstdesc != 0)
692 idx = txs->txs_firstdesc - 1;
693 else
694 idx = GLC_MAX_TX_PACKETS - 1;
695
696 if (sc->first_used_txdma_slot < 0)
697 sc->first_used_txdma_slot = txs->txs_firstdesc;
698
699 bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap,
700 BUS_DMASYNC_PREWRITE);
701 sc->sc_txdmadesc[idx].next = firstslotphys;
702
703 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
704 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
705 txs->txs_mbuf = *m_head;
706 *pktdesc = firstslotphys;
707
708 return (0);
709}
710
711static void
712glc_rxintr(struct glc_softc *sc)
713{
714 int i, restart_rxdma, error;
715 struct mbuf *m;
716 struct ifnet *ifp = sc->sc_ifp;
717
718 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
710 BUS_DMASYNC_PREWRITE);
719 BUS_DMASYNC_POSTREAD);
711
712 restart_rxdma = 0;
713 while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
714 GELIC_DESCR_OWNED) == 0) {
715 i = sc->sc_next_rxdma_slot;
720
721 restart_rxdma = 0;
722 while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat &
723 GELIC_DESCR_OWNED) == 0) {
724 i = sc->sc_next_rxdma_slot;
725 sc->sc_next_rxdma_slot++;
726 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
727 sc->sc_next_rxdma_slot = 0;
728
729 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
730 restart_rxdma = 1;
731
716 if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
717 ifp->if_ierrors++;
718 goto requeue;
719 }
720
721 m = sc->sc_rxsoft[i].rxs_mbuf;
722 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
723 m->m_pkthdr.csum_flags |=
724 CSUM_IP_CHECKED | CSUM_IP_VALID;
725 }
726 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
727 m->m_pkthdr.csum_flags |=
728 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
729 m->m_pkthdr.csum_data = 0xffff;
730 }
731
732 if (glc_add_rxbuf(sc, i)) {
733 ifp->if_ierrors++;
734 goto requeue;
735 }
736
737 ifp->if_ipackets++;
738 m->m_pkthdr.rcvif = ifp;
739 m->m_len = sc->sc_rxdmadesc[i].valid_size;
740 m->m_pkthdr.len = m->m_len;
732 if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) {
733 ifp->if_ierrors++;
734 goto requeue;
735 }
736
737 m = sc->sc_rxsoft[i].rxs_mbuf;
738 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) {
739 m->m_pkthdr.csum_flags |=
740 CSUM_IP_CHECKED | CSUM_IP_VALID;
741 }
742 if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) {
743 m->m_pkthdr.csum_flags |=
744 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
745 m->m_pkthdr.csum_data = 0xffff;
746 }
747
748 if (glc_add_rxbuf(sc, i)) {
749 ifp->if_ierrors++;
750 goto requeue;
751 }
752
753 ifp->if_ipackets++;
754 m->m_pkthdr.rcvif = ifp;
755 m->m_len = sc->sc_rxdmadesc[i].valid_size;
756 m->m_pkthdr.len = m->m_len;
741 sc->sc_next_rxdma_slot++;
742 if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS)
743 sc->sc_next_rxdma_slot = 0;
744
745 if (sc->sc_rx_vlan >= 0)
746 m_adj(m, 2);
747
748 mtx_unlock(&sc->sc_mtx);
749 (*ifp->if_input)(ifp, m);
750 mtx_lock(&sc->sc_mtx);
751
752 requeue:
757
758 if (sc->sc_rx_vlan >= 0)
759 m_adj(m, 2);
760
761 mtx_unlock(&sc->sc_mtx);
762 (*ifp->if_input)(ifp, m);
763 mtx_lock(&sc->sc_mtx);
764
765 requeue:
753 if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END)
754 restart_rxdma = 1;
755 glc_add_rxbuf_dma(sc, i);
766 glc_add_rxbuf_dma(sc, i);
756 if (restart_rxdma) {
757 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
758 sc->sc_rxsoft[i].rxs_desc, 0);
759 if (error != 0)
760 device_printf(sc->sc_self,
761 "lv1_net_start_rx_dma error: %d\n", error);
762 }
763 }
767 }
768
769 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map,
770 BUS_DMASYNC_PREWRITE);
771
772 if (restart_rxdma) {
773 error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev,
774 sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0);
775 if (error != 0)
776 device_printf(sc->sc_self,
777 "lv1_net_start_rx_dma error: %d\n", error);
778 }
764}
765
766static void
767glc_txintr(struct glc_softc *sc)
768{
769 struct ifnet *ifp = sc->sc_ifp;
770 struct glc_txsoft *txs;
771 int progress = 0, kickstart = 0, error;
772
779}
780
781static void
782glc_txintr(struct glc_softc *sc)
783{
784 struct ifnet *ifp = sc->sc_ifp;
785 struct glc_txsoft *txs;
786 int progress = 0, kickstart = 0, error;
787
788 bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map,
789 BUS_DMASYNC_POSTREAD);
790
773 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
774 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
775 & GELIC_DESCR_OWNED)
776 break;
777
778 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
779 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
780 sc->bsy_txdma_slots -= txs->txs_ndescs;
781
782 if (txs->txs_mbuf != NULL) {
783 m_freem(txs->txs_mbuf);
784 txs->txs_mbuf = NULL;
785 }
786
787 if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
788 != 0) {
789 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
790 kickstart = 1;
791 ifp->if_oerrors++;
792 }
793
794 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
795 GELIC_CMDSTAT_CHAIN_END)
796 kickstart = 1;
797
798 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
799 ifp->if_opackets++;
800 progress = 1;
801 }
802
803 if (txs != NULL)
804 sc->first_used_txdma_slot = txs->txs_firstdesc;
805 else
806 sc->first_used_txdma_slot = -1;
807
791 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
792 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat
793 & GELIC_DESCR_OWNED)
794 break;
795
796 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
797 bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap);
798 sc->bsy_txdma_slots -= txs->txs_ndescs;
799
800 if (txs->txs_mbuf != NULL) {
801 m_freem(txs->txs_mbuf);
802 txs->txs_mbuf = NULL;
803 }
804
805 if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000)
806 != 0) {
807 lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0);
808 kickstart = 1;
809 ifp->if_oerrors++;
810 }
811
812 if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat &
813 GELIC_CMDSTAT_CHAIN_END)
814 kickstart = 1;
815
816 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
817 ifp->if_opackets++;
818 progress = 1;
819 }
820
821 if (txs != NULL)
822 sc->first_used_txdma_slot = txs->txs_firstdesc;
823 else
824 sc->first_used_txdma_slot = -1;
825
808 if (kickstart && txs != NULL) {
826 if (kickstart || txs != NULL) {
827 /* Speculatively (or necessarily) start the TX queue again */
809 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
810 sc->sc_txdmadesc_phys +
811 txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0);
812 if (error != 0)
813 device_printf(sc->sc_self,
814 "lv1_net_start_tx_dma error: %d\n", error);
815 }
816
817 if (progress) {
818 /*
819 * We freed some descriptors, so reset IFF_DRV_OACTIVE
820 * and restart.
821 */
822 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
823 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
824
825 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
826 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
827 glc_start_locked(ifp);
828 }
829}
830
831static int
832glc_intr_filter(void *xsc)
833{
834 struct glc_softc *sc = xsc;
835
836 powerpc_sync();
837 atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
838 return (FILTER_SCHEDULE_THREAD);
839}
840
841static void
842glc_intr(void *xsc)
843{
844 struct glc_softc *sc = xsc;
845 uint64_t status, linkstat, junk;
846
847 mtx_lock(&sc->sc_mtx);
848
849 status = atomic_readandclear_64(&sc->sc_interrupt_status);
850
851 if (status == 0) {
852 mtx_unlock(&sc->sc_mtx);
853 return;
854 }
855
856 if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
857 glc_rxintr(sc);
858
859 if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
860 glc_txintr(sc);
861
862 if (status & GELIC_INT_PHY) {
863 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
864 GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
865
866 linkstat = (linkstat & GELIC_LINK_UP) ?
867 LINK_STATE_UP : LINK_STATE_DOWN;
868 if (linkstat != sc->sc_ifp->if_link_state)
869 if_link_state_change(sc->sc_ifp, linkstat);
870 }
871
872 mtx_unlock(&sc->sc_mtx);
873}
874
875static void
876glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
877{
878 struct glc_softc *sc = ifp->if_softc;
879 uint64_t status, junk;
880
881 ifmr->ifm_status = IFM_AVALID;
882 ifmr->ifm_active = IFM_ETHER;
883
884 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
885 GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
886
887 if (status & GELIC_LINK_UP)
888 ifmr->ifm_status |= IFM_ACTIVE;
889
890 if (status & GELIC_SPEED_10)
891 ifmr->ifm_active |= IFM_10_T;
892 else if (status & GELIC_SPEED_100)
893 ifmr->ifm_active |= IFM_100_TX;
894 else if (status & GELIC_SPEED_1000)
895 ifmr->ifm_active |= IFM_1000_T;
896
897 if (status & GELIC_FULL_DUPLEX)
898 ifmr->ifm_active |= IFM_FDX;
899 else
900 ifmr->ifm_active |= IFM_HDX;
901}
902
903static int
904glc_media_change(struct ifnet *ifp)
905{
906 struct glc_softc *sc = ifp->if_softc;
907 uint64_t mode, junk;
908 int result;
909
910 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
911 return (EINVAL);
912
913 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
914 case IFM_AUTO:
915 mode = GELIC_AUTO_NEG;
916 break;
917 case IFM_10_T:
918 mode = GELIC_SPEED_10;
919 break;
920 case IFM_100_TX:
921 mode = GELIC_SPEED_100;
922 break;
923 case IFM_1000_T:
924 mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
925 break;
926 default:
927 return (EINVAL);
928 }
929
930 if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
931 mode |= GELIC_FULL_DUPLEX;
932
933 result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
934 GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
935
936 return (result ? EIO : 0);
937}
938
828 error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev,
829 sc->sc_txdmadesc_phys +
830 txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0);
831 if (error != 0)
832 device_printf(sc->sc_self,
833 "lv1_net_start_tx_dma error: %d\n", error);
834 }
835
836 if (progress) {
837 /*
838 * We freed some descriptors, so reset IFF_DRV_OACTIVE
839 * and restart.
840 */
841 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
842 sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
843
844 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
845 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
846 glc_start_locked(ifp);
847 }
848}
849
850static int
851glc_intr_filter(void *xsc)
852{
853 struct glc_softc *sc = xsc;
854
855 powerpc_sync();
856 atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status);
857 return (FILTER_SCHEDULE_THREAD);
858}
859
860static void
861glc_intr(void *xsc)
862{
863 struct glc_softc *sc = xsc;
864 uint64_t status, linkstat, junk;
865
866 mtx_lock(&sc->sc_mtx);
867
868 status = atomic_readandclear_64(&sc->sc_interrupt_status);
869
870 if (status == 0) {
871 mtx_unlock(&sc->sc_mtx);
872 return;
873 }
874
875 if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME))
876 glc_rxintr(sc);
877
878 if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END))
879 glc_txintr(sc);
880
881 if (status & GELIC_INT_PHY) {
882 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
883 GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk);
884
885 linkstat = (linkstat & GELIC_LINK_UP) ?
886 LINK_STATE_UP : LINK_STATE_DOWN;
887 if (linkstat != sc->sc_ifp->if_link_state)
888 if_link_state_change(sc->sc_ifp, linkstat);
889 }
890
891 mtx_unlock(&sc->sc_mtx);
892}
893
894static void
895glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
896{
897 struct glc_softc *sc = ifp->if_softc;
898 uint64_t status, junk;
899
900 ifmr->ifm_status = IFM_AVALID;
901 ifmr->ifm_active = IFM_ETHER;
902
903 lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS,
904 GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk);
905
906 if (status & GELIC_LINK_UP)
907 ifmr->ifm_status |= IFM_ACTIVE;
908
909 if (status & GELIC_SPEED_10)
910 ifmr->ifm_active |= IFM_10_T;
911 else if (status & GELIC_SPEED_100)
912 ifmr->ifm_active |= IFM_100_TX;
913 else if (status & GELIC_SPEED_1000)
914 ifmr->ifm_active |= IFM_1000_T;
915
916 if (status & GELIC_FULL_DUPLEX)
917 ifmr->ifm_active |= IFM_FDX;
918 else
919 ifmr->ifm_active |= IFM_HDX;
920}
921
922static int
923glc_media_change(struct ifnet *ifp)
924{
925 struct glc_softc *sc = ifp->if_softc;
926 uint64_t mode, junk;
927 int result;
928
929 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
930 return (EINVAL);
931
932 switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
933 case IFM_AUTO:
934 mode = GELIC_AUTO_NEG;
935 break;
936 case IFM_10_T:
937 mode = GELIC_SPEED_10;
938 break;
939 case IFM_100_TX:
940 mode = GELIC_SPEED_100;
941 break;
942 case IFM_1000_T:
943 mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX;
944 break;
945 default:
946 return (EINVAL);
947 }
948
949 if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX)
950 mode |= GELIC_FULL_DUPLEX;
951
952 result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE,
953 GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk);
954
955 return (result ? EIO : 0);
956}
957