1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29 *     and you.
30 *
31 *     The Software IS NOT an item of Licensed Software or Licensed Product
32 *     under any End User Software License Agreement or Agreement for Licensed
33 *     Product with Synopsys or any supplement thereto.  Permission is hereby
34 *     granted, free of charge, to any person obtaining a copy of this software
35 *     annotated with this license and the Software, to deal in the Software
36 *     without restriction, including without limitation the rights to use,
37 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 *     of the Software, and to permit persons to whom the Software is furnished
39 *     to do so, subject to the following conditions:
40 *
41 *     The above copyright notice and this permission notice shall be included
42 *     in all copies or substantial portions of the Software.
43 *
44 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 *     THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 *     * Redistributions of source code must retain the above copyright
65 *       notice, this list of conditions and the following disclaimer.
66 *     * Redistributions in binary form must reproduce the above copyright
67 *       notice, this list of conditions and the following disclaimer in the
68 *       documentation and/or other materials provided with the distribution.
69 *     * Neither the name of Advanced Micro Devices, Inc. nor the
70 *       names of its contributors may be used to endorse or promote products
71 *       derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89 *     and you.
90 *
91 *     The Software IS NOT an item of Licensed Software or Licensed Product
92 *     under any End User Software License Agreement or Agreement for Licensed
93 *     Product with Synopsys or any supplement thereto.  Permission is hereby
94 *     granted, free of charge, to any person obtaining a copy of this software
95 *     annotated with this license and the Software, to deal in the Software
96 *     without restriction, including without limitation the rights to use,
97 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 *     of the Software, and to permit persons to whom the Software is furnished
99 *     to do so, subject to the following conditions:
100 *
101 *     The above copyright notice and this permission notice shall be included
102 *     in all copies or substantial portions of the Software.
103 *
104 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 *     THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <sys/cdefs.h>
118__FBSDID("$FreeBSD$");
119
120#include <sys/param.h>
121#include <sys/kernel.h>
122
123#include "xgbe.h"
124#include "xgbe-common.h"
125
126static int xgbe_one_poll(struct xgbe_channel *channel, int budget);
127static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget);
128
129static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
130{
131	struct xgbe_channel *channel_mem, *channel;
132	struct xgbe_ring *tx_ring, *rx_ring;
133	unsigned int count, i;
134	int ret = -ENOMEM;
135
136	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
137
138	channel_mem = malloc(count * sizeof(struct xgbe_channel), M_AXGBE,
139	    M_WAITOK | M_ZERO);
140	tx_ring = malloc(pdata->tx_ring_count * sizeof(struct xgbe_ring),
141	    M_AXGBE, M_WAITOK | M_ZERO);
142	rx_ring = malloc(pdata->rx_ring_count * sizeof(struct xgbe_ring),
143	    M_AXGBE, M_WAITOK | M_ZERO);
144
145	for (i = 0, channel = channel_mem; i < count; i++, channel++) {
146		snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
147		channel->pdata = pdata;
148		channel->queue_index = i;
149		channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
150		bus_space_subregion(channel->dma_tag,
151		    rman_get_bushandle(pdata->xgmac_res),
152		    DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
153		    &channel->dma_handle);
154
155		if (pdata->per_channel_irq) {
156			if (pdata->chan_irq_res[i] == NULL)
157				goto err_irq;
158
159			channel->dma_irq_res = pdata->chan_irq_res[i];
160		}
161
162		if (i < pdata->tx_ring_count) {
163			spin_lock_init(&tx_ring->lock);
164			channel->tx_ring = tx_ring++;
165		}
166
167		if (i < pdata->rx_ring_count) {
168			spin_lock_init(&rx_ring->lock);
169			channel->rx_ring = rx_ring++;
170		}
171	}
172
173	pdata->channel = channel_mem;
174	pdata->channel_count = count;
175
176	return 0;
177
178err_irq:
179	free(rx_ring, M_AXGBE);
180	free(tx_ring, M_AXGBE);
181	free(channel_mem, M_AXGBE);
182
183	return ret;
184}
185
186static void xgbe_free_channels(struct xgbe_prv_data *pdata)
187{
188	if (!pdata->channel)
189		return;
190
191	free(pdata->channel->rx_ring, M_AXGBE);
192	free(pdata->channel->tx_ring, M_AXGBE);
193	free(pdata->channel, M_AXGBE);
194
195	pdata->channel = NULL;
196	pdata->channel_count = 0;
197}
198
199static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
200{
201	return (ring->rdesc_count - (ring->cur - ring->dirty));
202}
203
204static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
205{
206	return (ring->cur - ring->dirty);
207}
208
209static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
210				    struct xgbe_ring *ring, unsigned int count)
211{
212	struct xgbe_prv_data *pdata = channel->pdata;
213
214	if (count > xgbe_tx_avail_desc(ring)) {
215		/* If we haven't notified the hardware because of xmit_more
216		 * support, tell it now
217		 */
218		if (ring->tx.xmit_more)
219			pdata->hw_if.tx_start_xmit(channel, ring);
220
221		return EFBIG;
222	}
223
224	return 0;
225}
226
227static int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu)
228{
229	unsigned int rx_buf_size;
230
231	if (mtu > XGMAC_JUMBO_PACKET_MTU) {
232		return -EINVAL;
233	}
234
235	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
236	rx_buf_size = MIN(XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
237
238	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
239		      ~(XGBE_RX_BUF_ALIGN - 1);
240
241	return rx_buf_size;
242}
243
244static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
245{
246	struct xgbe_hw_if *hw_if = &pdata->hw_if;
247	struct xgbe_channel *channel;
248	enum xgbe_int int_id;
249	unsigned int i;
250
251	channel = pdata->channel;
252	for (i = 0; i < pdata->channel_count; i++, channel++) {
253		if (channel->tx_ring && channel->rx_ring)
254			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
255		else if (channel->tx_ring)
256			int_id = XGMAC_INT_DMA_CH_SR_TI;
257		else if (channel->rx_ring)
258			int_id = XGMAC_INT_DMA_CH_SR_RI;
259		else
260			continue;
261
262		hw_if->enable_int(channel, int_id);
263	}
264}
265
266static void xgbe_isr(void *data)
267{
268	struct xgbe_prv_data *pdata = data;
269	struct xgbe_hw_if *hw_if = &pdata->hw_if;
270	struct xgbe_channel *channel;
271	unsigned int dma_isr, dma_ch_isr;
272	unsigned int mac_isr;
273	unsigned int i;
274
275	/* The DMA interrupt status register also reports MAC and MTL
276	 * interrupts. So for polling mode, we just need to check for
277	 * this register to be non-zero
278	 */
279	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
280	if (!dma_isr)
281		return;
282
283	for (i = 0; i < pdata->channel_count; i++) {
284		if (!(dma_isr & (1 << i)))
285			continue;
286
287		channel = pdata->channel + i;
288
289		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
290
291		/* The TI or RI interrupt bits may still be set even if using
292		 * per channel DMA interrupts. Check to be sure those are not
293		 * enabled before using the private data napi structure.
294		 */
295		if (!pdata->per_channel_irq &&
296		    (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
297		     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
298			xgbe_all_poll(pdata, 16);
299		}
300
301		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
302			pdata->ext_stats.rx_buffer_unavailable++;
303
304		/* Restart the device on a Fatal Bus Error */
305		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
306			taskqueue_enqueue(taskqueue_thread,
307			    &pdata->restart_work);
308
309		/* Clear all interrupt signals */
310		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
311	}
312
313	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
314		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
315
316		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
317			hw_if->tx_mmc_int(pdata);
318
319		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
320			hw_if->rx_mmc_int(pdata);
321	}
322}
323
324static void xgbe_dma_isr(void *data)
325{
326	struct xgbe_channel *channel = data;
327
328	xgbe_one_poll(channel, 16);
329}
330
331static void xgbe_service(void *ctx, int pending)
332{
333	struct xgbe_prv_data *pdata = ctx;
334
335	pdata->phy_if.phy_status(pdata);
336}
337
338static void xgbe_service_timer(void *data)
339{
340	struct xgbe_prv_data *pdata = data;
341
342	DBGPR("--> xgbe_service_timer\n");
343	taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
344
345	callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
346	DBGPR("<-- xgbe_service_timer\n");
347}
348
349static void xgbe_init_timers(struct xgbe_prv_data *pdata)
350{
351
352	callout_init(&pdata->service_timer, 1);
353}
354
355static void xgbe_start_timers(struct xgbe_prv_data *pdata)
356{
357	callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
358}
359
360static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
361{
362
363	callout_drain(&pdata->service_timer);
364}
365
366void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
367{
368	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
369	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
370
371	DBGPR("-->xgbe_get_all_hw_features\n");
372
373	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
374	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
375	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
376
377	memset(hw_feat, 0, sizeof(*hw_feat));
378
379	hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
380
381	/* Hardware feature register 0 */
382	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
383	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
384	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
385	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
386	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
387	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
388	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
389	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
390	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
391	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
392	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
393	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
394					      ADDMACADRSEL);
395	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
396	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
397
398	/* Hardware feature register 1 */
399	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
400						RXFIFOSIZE);
401	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
402						TXFIFOSIZE);
403	hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
404	hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
405	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
406	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
407	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
408	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
409	hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
410	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
411	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
412						  HASHTBLSZ);
413	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
414						  L3L4FNUM);
415
416	/* Hardware feature register 2 */
417	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
418	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
419	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
420	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
421	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
422	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
423
424	/* Translate the Hash Table size into actual number */
425	switch (hw_feat->hash_table_size) {
426	case 0:
427		break;
428	case 1:
429		hw_feat->hash_table_size = 64;
430		break;
431	case 2:
432		hw_feat->hash_table_size = 128;
433		break;
434	case 3:
435		hw_feat->hash_table_size = 256;
436		break;
437	}
438
439	/* Translate the address width setting into actual number */
440	switch (hw_feat->dma_width) {
441	case 0:
442		hw_feat->dma_width = 32;
443		break;
444	case 1:
445		hw_feat->dma_width = 40;
446		break;
447	case 2:
448		hw_feat->dma_width = 48;
449		break;
450	default:
451		hw_feat->dma_width = 32;
452	}
453
454	/* The Queue, Channel and TC counts are zero based so increment them
455	 * to get the actual number
456	 */
457	hw_feat->rx_q_cnt++;
458	hw_feat->tx_q_cnt++;
459	hw_feat->rx_ch_cnt++;
460	hw_feat->tx_ch_cnt++;
461	hw_feat->tc_cnt++;
462
463	DBGPR("<--xgbe_get_all_hw_features\n");
464}
465
466static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
467{
468	struct xgbe_channel *channel;
469	unsigned int i;
470	int ret;
471
472	ret = bus_setup_intr(pdata->dev, pdata->dev_irq_res,
473	    INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_isr, pdata,
474	    &pdata->dev_irq_tag);
475	if (ret) {
476		return ret;
477	}
478
479	if (!pdata->per_channel_irq)
480		return 0;
481
482	channel = pdata->channel;
483	for (i = 0; i < pdata->channel_count; i++, channel++) {
484		ret = bus_setup_intr(pdata->dev, channel->dma_irq_res,
485		    INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_dma_isr, channel,
486		    &channel->dma_irq_tag);
487		if (ret != 0) {
488			goto err_irq;
489		}
490	}
491
492	return 0;
493
494err_irq:
495	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
496	for (i--, channel--; i < pdata->channel_count; i--, channel--)
497		bus_teardown_intr(pdata->dev, channel->dma_irq_res,
498		    channel->dma_irq_tag);
499
500	bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
501
502	return -ret;
503}
504
505static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
506{
507	struct xgbe_channel *channel;
508	unsigned int i;
509
510	bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
511
512	if (!pdata->per_channel_irq)
513		return;
514
515	channel = pdata->channel;
516	for (i = 0; i < pdata->channel_count; i++, channel++)
517		bus_teardown_intr(pdata->dev, channel->dma_irq_res,
518		    channel->dma_irq_tag);
519}
520
521void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
522{
523	struct xgbe_hw_if *hw_if = &pdata->hw_if;
524
525	DBGPR("-->xgbe_init_tx_coalesce\n");
526
527	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
528	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
529
530	hw_if->config_tx_coalesce(pdata);
531
532	DBGPR("<--xgbe_init_tx_coalesce\n");
533}
534
535void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
536{
537	struct xgbe_hw_if *hw_if = &pdata->hw_if;
538
539	DBGPR("-->xgbe_init_rx_coalesce\n");
540
541	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
542	pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
543	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
544
545	hw_if->config_rx_coalesce(pdata);
546
547	DBGPR("<--xgbe_init_rx_coalesce\n");
548}
549
550static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
551{
552	struct xgbe_desc_if *desc_if = &pdata->desc_if;
553	struct xgbe_channel *channel;
554	struct xgbe_ring *ring;
555	struct xgbe_ring_data *rdata;
556	unsigned int i, j;
557
558	DBGPR("-->xgbe_free_tx_data\n");
559
560	channel = pdata->channel;
561	for (i = 0; i < pdata->channel_count; i++, channel++) {
562		ring = channel->tx_ring;
563		if (!ring)
564			break;
565
566		for (j = 0; j < ring->rdesc_count; j++) {
567			rdata = XGBE_GET_DESC_DATA(ring, j);
568			desc_if->unmap_rdata(pdata, rdata);
569		}
570	}
571
572	DBGPR("<--xgbe_free_tx_data\n");
573}
574
575static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
576{
577	struct xgbe_desc_if *desc_if = &pdata->desc_if;
578	struct xgbe_channel *channel;
579	struct xgbe_ring *ring;
580	struct xgbe_ring_data *rdata;
581	unsigned int i, j;
582
583	DBGPR("-->xgbe_free_rx_data\n");
584
585	channel = pdata->channel;
586	for (i = 0; i < pdata->channel_count; i++, channel++) {
587		ring = channel->rx_ring;
588		if (!ring)
589			break;
590
591		for (j = 0; j < ring->rdesc_count; j++) {
592			rdata = XGBE_GET_DESC_DATA(ring, j);
593			desc_if->unmap_rdata(pdata, rdata);
594		}
595	}
596
597	DBGPR("<--xgbe_free_rx_data\n");
598}
599
600static int xgbe_phy_init(struct xgbe_prv_data *pdata)
601{
602	pdata->phy_link = -1;
603	pdata->phy_speed = SPEED_UNKNOWN;
604
605	return pdata->phy_if.phy_reset(pdata);
606}
607
608static int xgbe_start(struct xgbe_prv_data *pdata)
609{
610	struct xgbe_hw_if *hw_if = &pdata->hw_if;
611	struct xgbe_phy_if *phy_if = &pdata->phy_if;
612	int ret;
613
614	DBGPR("-->xgbe_start\n");
615
616	hw_if->init(pdata);
617
618	ret = phy_if->phy_start(pdata);
619	if (ret)
620		goto err_phy;
621
622	ret = xgbe_request_irqs(pdata);
623	if (ret)
624		goto err_napi;
625
626	hw_if->enable_tx(pdata);
627	hw_if->enable_rx(pdata);
628
629	xgbe_enable_rx_tx_ints(pdata);
630
631	xgbe_start_timers(pdata);
632	taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
633
634	DBGPR("<--xgbe_start\n");
635
636	return 0;
637
638err_napi:
639	phy_if->phy_stop(pdata);
640
641err_phy:
642	hw_if->exit(pdata);
643
644	return ret;
645}
646
647static void xgbe_stop(struct xgbe_prv_data *pdata)
648{
649	struct xgbe_hw_if *hw_if = &pdata->hw_if;
650	struct xgbe_phy_if *phy_if = &pdata->phy_if;
651
652	DBGPR("-->xgbe_stop\n");
653
654	xgbe_stop_timers(pdata);
655	taskqueue_drain_all(pdata->dev_workqueue);
656
657	hw_if->disable_tx(pdata);
658	hw_if->disable_rx(pdata);
659
660	xgbe_free_irqs(pdata);
661
662	phy_if->phy_stop(pdata);
663
664	hw_if->exit(pdata);
665
666	DBGPR("<--xgbe_stop\n");
667}
668
669static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
670{
671	DBGPR("-->xgbe_restart_dev\n");
672
673	/* If not running, "restart" will happen on open */
674	if ((pdata->netdev->if_drv_flags & IFF_DRV_RUNNING) == 0)
675		return;
676
677	xgbe_stop(pdata);
678
679	xgbe_free_tx_data(pdata);
680	xgbe_free_rx_data(pdata);
681
682	xgbe_start(pdata);
683
684	DBGPR("<--xgbe_restart_dev\n");
685}
686
687static void xgbe_restart(void *ctx, int pending)
688{
689	struct xgbe_prv_data *pdata = ctx;
690
691	xgbe_restart_dev(pdata);
692}
693
694static void xgbe_packet_info(struct xgbe_prv_data *pdata,
695			     struct xgbe_ring *ring, struct mbuf *m0,
696			     struct xgbe_packet_data *packet)
697{
698	struct mbuf *m;
699	unsigned int len;
700
701	packet->m = m0;
702
703	packet->rdesc_count = 0;
704
705	packet->tx_packets = 1;
706	packet->tx_bytes = m_length(m0, NULL);
707
708	for (m = m0; m != NULL; m = m->m_next) {
709		for (len = m->m_len; len != 0;) {
710			packet->rdesc_count++;
711			len -= MIN(len, XGBE_TX_MAX_BUF_SIZE);
712		}
713	}
714}
715
716int xgbe_open(struct ifnet *netdev)
717{
718	struct xgbe_prv_data *pdata = netdev->if_softc;
719	struct xgbe_desc_if *desc_if = &pdata->desc_if;
720	int ret;
721
722	DBGPR("-->xgbe_open\n");
723
724	/* Initialize the phy */
725	ret = xgbe_phy_init(pdata);
726	if (ret)
727		return ret;
728
729	/* Calculate the Rx buffer size before allocating rings */
730	ret = xgbe_calc_rx_buf_size(netdev, if_getmtu(netdev));
731	if (ret < 0) {
732		goto err_ptpclk;
733	}
734	pdata->rx_buf_size = ret;
735
736	/* Allocate the channel and ring structures */
737	ret = xgbe_alloc_channels(pdata);
738	if (ret) {
739		printf("xgbe_alloc_channels failed\n");
740		goto err_ptpclk;
741	}
742
743	/* Allocate the ring descriptors and buffers */
744	ret = desc_if->alloc_ring_resources(pdata);
745	if (ret) {
746		printf("desc_if->alloc_ring_resources failed\n");
747		goto err_channels;
748	}
749
750	TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
751	TASK_INIT(&pdata->restart_work, 0, xgbe_restart, pdata);
752	xgbe_init_timers(pdata);
753
754	ret = xgbe_start(pdata);
755	if (ret)
756		goto err_rings;
757
758	clear_bit(XGBE_DOWN, &pdata->dev_state);
759
760	DBGPR("<--xgbe_open\n");
761
762	return 0;
763
764err_rings:
765	desc_if->free_ring_resources(pdata);
766
767err_channels:
768	xgbe_free_channels(pdata);
769
770err_ptpclk:
771
772	return ret;
773}
774
775int xgbe_close(struct ifnet *netdev)
776{
777	struct xgbe_prv_data *pdata = netdev->if_softc;
778	struct xgbe_desc_if *desc_if = &pdata->desc_if;
779
780	DBGPR("-->xgbe_close\n");
781
782	/* Stop the device */
783	xgbe_stop(pdata);
784
785	/* Free the ring descriptors and buffers */
786	desc_if->free_ring_resources(pdata);
787
788	/* Free the channel and ring structures */
789	xgbe_free_channels(pdata);
790
791	set_bit(XGBE_DOWN, &pdata->dev_state);
792
793	DBGPR("<--xgbe_close\n");
794
795	return 0;
796}
797
798int xgbe_xmit(struct ifnet *ifp, struct mbuf *m)
799{
800	struct xgbe_prv_data *pdata = ifp->if_softc;
801	struct xgbe_hw_if *hw_if = &pdata->hw_if;
802	struct xgbe_desc_if *desc_if = &pdata->desc_if;
803	struct xgbe_channel *channel;
804	struct xgbe_ring *ring;
805	struct xgbe_packet_data *packet;
806	int ret;
807
808	M_ASSERTPKTHDR(m);
809	MPASS(m->m_nextpkt == NULL);
810
811	if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state) ||
812	    !pdata->phy.link)) {
813		m_freem(m);
814		return (ENETDOWN);
815	}
816
817	channel = pdata->channel;
818	ring = channel->tx_ring;
819	packet = &ring->packet_data;
820
821	/* Calculate preliminary packet info */
822	memset(packet, 0, sizeof(*packet));
823	xgbe_packet_info(pdata, ring, m, packet);
824
825	/* Check that there are enough descriptors available */
826	ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
827	if (ret)
828		goto tx_netdev_return;
829
830	if (!desc_if->map_tx_skb(channel, m)) {
831		goto tx_netdev_return;
832	}
833
834	/* Configure required descriptor fields for transmission */
835	hw_if->dev_xmit(channel);
836
837	return 0;
838
839tx_netdev_return:
840	m_free(m);
841
842	return 0;
843}
844
845int xgbe_change_mtu(struct ifnet *netdev, int mtu)
846{
847	struct xgbe_prv_data *pdata = netdev->if_softc;
848	int ret;
849
850	DBGPR("-->xgbe_change_mtu\n");
851
852	ret = xgbe_calc_rx_buf_size(netdev, mtu);
853	if (ret < 0)
854		return -ret;
855
856	pdata->rx_buf_size = ret;
857	netdev->if_mtu = mtu;
858
859	xgbe_restart_dev(pdata);
860
861	DBGPR("<--xgbe_change_mtu\n");
862
863	return 0;
864}
865
866static void xgbe_rx_refresh(struct xgbe_channel *channel)
867{
868	struct xgbe_prv_data *pdata = channel->pdata;
869	struct xgbe_hw_if *hw_if = &pdata->hw_if;
870	struct xgbe_desc_if *desc_if = &pdata->desc_if;
871	struct xgbe_ring *ring = channel->rx_ring;
872	struct xgbe_ring_data *rdata;
873
874	while (ring->dirty != ring->cur) {
875		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
876
877		/* Reset rdata values */
878		desc_if->unmap_rdata(pdata, rdata);
879
880		if (desc_if->map_rx_buffer(pdata, ring, rdata))
881			break;
882
883		hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
884
885		ring->dirty++;
886	}
887
888	/* Make sure everything is written before the register write */
889	dsb(sy);
890
891	/* Update the Rx Tail Pointer Register with address of
892	 * the last cleaned entry */
893	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
894	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
895			  lower_32_bits(rdata->rdata_paddr));
896}
897
898static int xgbe_tx_poll(struct xgbe_channel *channel)
899{
900	struct xgbe_prv_data *pdata = channel->pdata;
901	struct xgbe_hw_if *hw_if = &pdata->hw_if;
902	struct xgbe_desc_if *desc_if = &pdata->desc_if;
903	struct xgbe_ring *ring = channel->tx_ring;
904	struct xgbe_ring_data *rdata;
905	struct xgbe_ring_desc *rdesc;
906	int processed = 0;
907	unsigned int cur;
908
909	DBGPR("-->xgbe_tx_poll\n");
910
911	/* Nothing to do if there isn't a Tx ring for this channel */
912	if (!ring)
913		return 0;
914
915	cur = ring->cur;
916
917	/* Be sure we get ring->cur before accessing descriptor data */
918	dsb(sy);
919
920	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
921	       (ring->dirty != cur)) {
922		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
923		rdesc = rdata->rdesc;
924
925		if (!hw_if->tx_complete(rdesc))
926			break;
927
928		/* Make sure descriptor fields are read after reading the OWN
929		 * bit */
930		dsb(sy);
931
932		/* Free the SKB and reset the descriptor for re-use */
933		desc_if->unmap_rdata(pdata, rdata);
934		hw_if->tx_desc_reset(rdata);
935
936		processed++;
937		ring->dirty++;
938	}
939
940	if (!processed)
941		return 0;
942
943	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
944
945	return processed;
946}
947
948static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
949{
950	struct xgbe_prv_data *pdata = channel->pdata;
951	struct xgbe_hw_if *hw_if = &pdata->hw_if;
952	struct xgbe_ring *ring = channel->rx_ring;
953	struct xgbe_ring_data *rdata;
954	struct xgbe_packet_data *packet;
955	struct ifnet *ifp = pdata->netdev;
956	struct mbuf *m;
957	unsigned int incomplete, context_next;
958	unsigned int received = 0;
959	int packet_count = 0;
960
961	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
962
963	/* Nothing to do if there isn't a Rx ring for this channel */
964	if (!ring)
965		return 0;
966
967	incomplete = 0;
968	context_next = 0;
969
970	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
971	packet = &ring->packet_data;
972	while (packet_count < budget) {
973		DBGPR("  cur = %d\n", ring->cur);
974
975read_again:
976		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
977
978		if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
979			xgbe_rx_refresh(channel);
980
981		if (hw_if->dev_read(channel))
982			break;
983
984		m = rdata->mb;
985
986		received++;
987		ring->cur++;
988
989		incomplete = XGMAC_GET_BITS(packet->attributes,
990					    RX_PACKET_ATTRIBUTES,
991					    INCOMPLETE);
992		context_next = XGMAC_GET_BITS(packet->attributes,
993					      RX_PACKET_ATTRIBUTES,
994					      CONTEXT_NEXT);
995
996		/* Earlier error, just drain the remaining data */
997		if (incomplete || context_next) {
998			goto read_again;
999		}
1000
1001		if (packet->errors) {
1002			rdata->mbuf_free = 1;
1003			goto next_packet;
1004		}
1005		rdata->mb = NULL;
1006
1007		m->m_pkthdr.len = rdata->rx.hdr_len + rdata->rx.len;
1008		if (rdata->rx.hdr_len != 0) {
1009			m->m_len = rdata->rx.hdr_len;
1010			m->m_next->m_len = rdata->rx.len;
1011		} else {
1012			m->m_len = rdata->rx.len;
1013			m_freem(m->m_next);
1014			m->m_next = NULL;
1015		}
1016		if_setrcvif(m, ifp);
1017		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1018
1019		ifp->if_input(ifp, m);
1020
1021next_packet:
1022		packet_count++;
1023	}
1024
1025	DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
1026
1027	return packet_count;
1028}
1029
1030static int xgbe_one_poll(struct xgbe_channel *channel, int budget)
1031{
1032	int processed = 0;
1033
1034	DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
1035
1036	/* Cleanup Tx ring first */
1037	xgbe_tx_poll(channel);
1038
1039	/* Process Rx ring next */
1040	processed = xgbe_rx_poll(channel, budget);
1041
1042	DBGPR("<--xgbe_one_poll: received = %d\n", processed);
1043
1044	return processed;
1045}
1046
1047static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget)
1048{
1049	struct xgbe_channel *channel;
1050	int ring_budget;
1051	int processed, last_processed;
1052	unsigned int i;
1053
1054	DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
1055
1056	processed = 0;
1057	ring_budget = budget / pdata->rx_ring_count;
1058	do {
1059		last_processed = processed;
1060
1061		channel = pdata->channel;
1062		for (i = 0; i < pdata->channel_count; i++, channel++) {
1063			/* Cleanup Tx ring first */
1064			xgbe_tx_poll(channel);
1065
1066			/* Process Rx ring next */
1067			if (ring_budget > (budget - processed))
1068				ring_budget = budget - processed;
1069			processed += xgbe_rx_poll(channel, ring_budget);
1070		}
1071	} while ((processed < budget) && (processed != last_processed));
1072
1073	DBGPR("<--xgbe_all_poll: received = %d\n", processed);
1074
1075	return processed;
1076}
1077