Deleted Added
full compact
thunder_bgx.c (297707) thunder_bgx.c (300294)
1/*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/vnic/thunder_bgx.c 297707 2016-04-08 11:20:56Z wma $
26 * $FreeBSD: head/sys/dev/vnic/thunder_bgx.c 300294 2016-05-20 11:00:06Z wma $
27 *
28 */
29#include "opt_platform.h"
30
31#include <sys/cdefs.h>
27 *
28 */
29#include "opt_platform.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/vnic/thunder_bgx.c 297707 2016-04-08 11:20:56Z wma $");
32__FBSDID("$FreeBSD: head/sys/dev/vnic/thunder_bgx.c 300294 2016-05-20 11:00:06Z wma $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bitset.h>
37#include <sys/bitstring.h>
38#include <sys/bus.h>
39#include <sys/endian.h>
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/pciio.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/socket.h>
48#include <sys/sockio.h>
49#include <sys/cpuset.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_media.h>
56
57#include <machine/bus.h>
58
59#include <dev/pci/pcireg.h>
60#include <dev/pci/pcivar.h>
61
62#include "thunder_bgx.h"
63#include "thunder_bgx_var.h"
64#include "nic_reg.h"
65#include "nic.h"
66
67#include "lmac_if.h"
68
69#define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface"
70
71MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72
73#define BGX_NODE_ID_MASK 0x1
74#define BGX_NODE_ID_SHIFT 24
75
76#define DRV_NAME "thunder-BGX"
77#define DRV_VERSION "1.0"
78
79static int bgx_init_phy(struct bgx *);
80
81static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82static int lmac_count __unused; /* Total no of LMACs in system */
83
84static int bgx_xaui_check_link(struct lmac *lmac);
85static void bgx_get_qlm_mode(struct bgx *);
86static void bgx_init_hw(struct bgx *);
87static int bgx_lmac_enable(struct bgx *, uint8_t);
88static void bgx_lmac_disable(struct bgx *, uint8_t);
89
90static int thunder_bgx_probe(device_t);
91static int thunder_bgx_attach(device_t);
92static int thunder_bgx_detach(device_t);
93
94static device_method_t thunder_bgx_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, thunder_bgx_probe),
97 DEVMETHOD(device_attach, thunder_bgx_attach),
98 DEVMETHOD(device_detach, thunder_bgx_detach),
99
100 DEVMETHOD_END,
101};
102
103static driver_t thunder_bgx_driver = {
104 "bgx",
105 thunder_bgx_methods,
106 sizeof(struct lmac),
107};
108
109static devclass_t thunder_bgx_devclass;
110
111DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bitset.h>
37#include <sys/bitstring.h>
38#include <sys/bus.h>
39#include <sys/endian.h>
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/pciio.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/socket.h>
48#include <sys/sockio.h>
49#include <sys/cpuset.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_media.h>
56
57#include <machine/bus.h>
58
59#include <dev/pci/pcireg.h>
60#include <dev/pci/pcivar.h>
61
62#include "thunder_bgx.h"
63#include "thunder_bgx_var.h"
64#include "nic_reg.h"
65#include "nic.h"
66
67#include "lmac_if.h"
68
69#define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface"
70
71MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72
73#define BGX_NODE_ID_MASK 0x1
74#define BGX_NODE_ID_SHIFT 24
75
76#define DRV_NAME "thunder-BGX"
77#define DRV_VERSION "1.0"
78
79static int bgx_init_phy(struct bgx *);
80
81static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82static int lmac_count __unused; /* Total no of LMACs in system */
83
84static int bgx_xaui_check_link(struct lmac *lmac);
85static void bgx_get_qlm_mode(struct bgx *);
86static void bgx_init_hw(struct bgx *);
87static int bgx_lmac_enable(struct bgx *, uint8_t);
88static void bgx_lmac_disable(struct bgx *, uint8_t);
89
90static int thunder_bgx_probe(device_t);
91static int thunder_bgx_attach(device_t);
92static int thunder_bgx_detach(device_t);
93
94static device_method_t thunder_bgx_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, thunder_bgx_probe),
97 DEVMETHOD(device_attach, thunder_bgx_attach),
98 DEVMETHOD(device_detach, thunder_bgx_detach),
99
100 DEVMETHOD_END,
101};
102
103static driver_t thunder_bgx_driver = {
104 "bgx",
105 thunder_bgx_methods,
106 sizeof(struct lmac),
107};
108
109static devclass_t thunder_bgx_devclass;
110
111DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
112MODULE_VERSION(thunder_bgx, 1);
112MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
113MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
113MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
114MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
114MODULE_DEPEND(thunder_bgx, octeon_mdio, 1, 1, 1);
115MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
115
116static int
117thunder_bgx_probe(device_t dev)
118{
119 uint16_t vendor_id;
120 uint16_t device_id;
121
122 vendor_id = pci_get_vendor(dev);
123 device_id = pci_get_device(dev);
124
125 if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
126 device_id == PCI_DEVICE_ID_THUNDER_BGX) {
127 device_set_desc(dev, THUNDER_BGX_DEVSTR);
128 return (BUS_PROBE_DEFAULT);
129 }
130
131 return (ENXIO);
132}
133
134static int
135thunder_bgx_attach(device_t dev)
136{
137 struct bgx *bgx;
138 uint8_t lmac;
139 int err;
140 int rid;
141
142 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
143 bgx->dev = dev;
144 /* Enable bus mastering */
145 pci_enable_busmaster(dev);
146 /* Allocate resources - configuration registers */
147 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
148 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
149 RF_ACTIVE);
150 if (bgx->reg_base == NULL) {
151 device_printf(dev, "Could not allocate CSR memory space\n");
152 err = ENXIO;
153 goto err_disable_device;
154 }
155
156 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
157 BGX_NODE_ID_MASK;
158 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
159
160 bgx_vnic[bgx->bgx_id] = bgx;
161 bgx_get_qlm_mode(bgx);
162
163 err = bgx_init_phy(bgx);
164 if (err != 0)
165 goto err_free_res;
166
167 bgx_init_hw(bgx);
168
169 /* Enable all LMACs */
170 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
171 err = bgx_lmac_enable(bgx, lmac);
172 if (err) {
173 device_printf(dev, "BGX%d failed to enable lmac%d\n",
174 bgx->bgx_id, lmac);
175 goto err_free_res;
176 }
177 }
178
179 return (0);
180
181err_free_res:
182 bgx_vnic[bgx->bgx_id] = NULL;
183 bus_release_resource(dev, SYS_RES_MEMORY,
184 rman_get_rid(bgx->reg_base), bgx->reg_base);
185err_disable_device:
186 free(bgx, M_BGX);
187 pci_disable_busmaster(dev);
188
189 return (err);
190}
191
192static int
193thunder_bgx_detach(device_t dev)
194{
195 struct lmac *lmac;
196 struct bgx *bgx;
197 uint8_t lmacid;
198
199 lmac = device_get_softc(dev);
200 bgx = lmac->bgx;
201 /* Disable all LMACs */
202 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
203 bgx_lmac_disable(bgx, lmacid);
204
205 return (0);
206}
207
208/* Register read/write APIs */
209static uint64_t
210bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
211{
212 bus_space_handle_t addr;
213
214 addr = ((uint32_t)lmac << 20) + offset;
215
216 return (bus_read_8(bgx->reg_base, addr));
217}
218
219static void
220bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
221{
222 bus_space_handle_t addr;
223
224 addr = ((uint32_t)lmac << 20) + offset;
225
226 bus_write_8(bgx->reg_base, addr, val);
227}
228
229static void
230bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
231{
232 bus_space_handle_t addr;
233
234 addr = ((uint32_t)lmac << 20) + offset;
235
236 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
237}
238
239static int
240bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
241 boolean_t zero)
242{
243 int timeout = 10;
244 uint64_t reg_val;
245
246 while (timeout) {
247 reg_val = bgx_reg_read(bgx, lmac, reg);
248 if (zero && !(reg_val & mask))
249 return (0);
250 if (!zero && (reg_val & mask))
251 return (0);
252
253 DELAY(100);
254 timeout--;
255 }
256 return (ETIMEDOUT);
257}
258
259/* Return number of BGX present in HW */
260u_int
261bgx_get_map(int node)
262{
263 int i;
264 u_int map = 0;
265
266 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
267 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
268 map |= (1 << i);
269 }
270
271 return (map);
272}
273
274/* Return number of LMAC configured for this BGX */
275int
276bgx_get_lmac_count(int node, int bgx_idx)
277{
278 struct bgx *bgx;
279
280 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
281 if (bgx != NULL)
282 return (bgx->lmac_count);
283
284 return (0);
285}
286
287/* Returns the current link status of LMAC */
288void
289bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
290{
291 struct bgx_link_status *link = (struct bgx_link_status *)status;
292 struct bgx *bgx;
293 struct lmac *lmac;
294
295 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
296 if (bgx == NULL)
297 return;
298
299 lmac = &bgx->lmac[lmacid];
300 link->link_up = lmac->link_up;
301 link->duplex = lmac->last_duplex;
302 link->speed = lmac->last_speed;
303}
304
305const uint8_t
306*bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
307{
308 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
309
310 if (bgx != NULL)
311 return (bgx->lmac[lmacid].mac);
312
313 return (NULL);
314}
315
316void
317bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
318{
319 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
320
321 if (bgx == NULL)
322 return;
323
324 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
325}
326
327static void
328bgx_sgmii_change_link_state(struct lmac *lmac)
329{
330 struct bgx *bgx = lmac->bgx;
331 uint64_t cmr_cfg;
332 uint64_t port_cfg = 0;
333 uint64_t misc_ctl = 0;
334
335 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
336 cmr_cfg &= ~CMR_EN;
337 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
338
339 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
340 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
341
342 if (lmac->link_up) {
343 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
344 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
345 port_cfg |= (lmac->last_duplex << 2);
346 } else {
347 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
348 }
349
350 switch (lmac->last_speed) {
351 case 10:
352 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
353 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
354 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
355 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
356 misc_ctl |= 50; /* samp_pt */
357 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
358 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
359 break;
360 case 100:
361 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
362 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
363 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
364 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
365 misc_ctl |= 5; /* samp_pt */
366 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
368 break;
369 case 1000:
370 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
371 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
372 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
373 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
374 misc_ctl |= 1; /* samp_pt */
375 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
376 if (lmac->last_duplex)
377 bgx_reg_write(bgx, lmac->lmacid,
378 BGX_GMP_GMI_TXX_BURST, 0);
379 else
380 bgx_reg_write(bgx, lmac->lmacid,
381 BGX_GMP_GMI_TXX_BURST, 8192);
382 break;
383 default:
384 break;
385 }
386 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
387 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
388
389 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
390
391 /* renable lmac */
392 cmr_cfg |= CMR_EN;
393 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
394}
395
396static void
397bgx_lmac_handler(void *arg)
398{
399 struct lmac *lmac;
400 int link, duplex, speed;
401 int link_changed = 0;
402 int err;
403
404 lmac = (struct lmac *)arg;
405
406 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
407 &link, &duplex, &speed);
408 if (err != 0)
409 goto out;
410
411 if (!link && lmac->last_link)
412 link_changed = -1;
413
414 if (link &&
415 (lmac->last_duplex != duplex ||
416 lmac->last_link != link ||
417 lmac->last_speed != speed)) {
418 link_changed = 1;
419 }
420
421 lmac->last_link = link;
422 lmac->last_speed = speed;
423 lmac->last_duplex = duplex;
424
425 if (!link_changed)
426 goto out;
427
428 if (link_changed > 0)
429 lmac->link_up = true;
430 else
431 lmac->link_up = false;
432
433 if (lmac->is_sgmii)
434 bgx_sgmii_change_link_state(lmac);
435 else
436 bgx_xaui_check_link(lmac);
437
438out:
439 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
440}
441
442uint64_t
443bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
444{
445 struct bgx *bgx;
446
447 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
448 if (bgx == NULL)
449 return (0);
450
451 if (idx > 8)
452 lmac = (0);
453 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
454}
455
456uint64_t
457bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
458{
459 struct bgx *bgx;
460
461 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
462 if (bgx == NULL)
463 return (0);
464
465 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
466}
467
468static void
469bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
470{
471 uint64_t offset;
472
473 while (bgx->lmac[lmac].dmac > 0) {
474 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
475 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
476 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
477 bgx->lmac[lmac].dmac--;
478 }
479}
480
481void
482bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
483{
484 uint64_t offset;
485 struct bgx *bgx;
486
487#ifdef BGX_IN_PROMISCUOUS_MODE
488 return;
489#endif
490
491 bgx_idx += node * MAX_BGX_PER_CN88XX;
492 bgx = bgx_vnic[bgx_idx];
493
494 if (!bgx) {
495 device_printf(bgx->dev,
496 "BGX%d not yet initialized, ignoring DMAC addition\n",
497 bgx_idx);
498 return;
499 }
500
501 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
502 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
503 device_printf(bgx->dev,
504 "Max DMAC filters for LMAC%d reached, ignoring\n",
505 lmac);
506 return;
507 }
508
509 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
510 bgx->lmac[lmac].dmac = 1;
511
512 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
513 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
514 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
515 bgx->lmac[lmac].dmac++;
516
517 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
518 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
519 (BCAST_ACCEPT << 0));
520}
521
522/* Configure BGX LMAC in internal loopback mode */
523void
524bgx_lmac_internal_loopback(int node, int bgx_idx,
525 int lmac_idx, boolean_t enable)
526{
527 struct bgx *bgx;
528 struct lmac *lmac;
529 uint64_t cfg;
530
531 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
532 if (bgx == NULL)
533 return;
534
535 lmac = &bgx->lmac[lmac_idx];
536 if (lmac->is_sgmii) {
537 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
538 if (enable)
539 cfg |= PCS_MRX_CTL_LOOPBACK1;
540 else
541 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
542 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
543 } else {
544 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
545 if (enable)
546 cfg |= SPU_CTL_LOOPBACK;
547 else
548 cfg &= ~SPU_CTL_LOOPBACK;
549 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
550 }
551}
552
553static int
554bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
555{
556 uint64_t cfg;
557
558 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
559 /* max packet size */
560 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
561
562 /* Disable frame alignment if using preamble */
563 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
564 if (cfg & 1)
565 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
566
567 /* Enable lmac */
568 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
569
570 /* PCS reset */
571 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
572 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
573 PCS_MRX_CTL_RESET, TRUE) != 0) {
574 device_printf(bgx->dev, "BGX PCS reset not completed\n");
575 return (ENXIO);
576 }
577
578 /* power down, reset autoneg, autoneg enable */
579 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
580 cfg &= ~PCS_MRX_CTL_PWR_DN;
581 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
582 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
583
584 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
585 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
586 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
587 return (ENXIO);
588 }
589
590 return (0);
591}
592
593static int
594bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
595{
596 uint64_t cfg;
597
598 /* Reset SPU */
599 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
600 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
601 SPU_CTL_RESET, TRUE) != 0) {
602 device_printf(bgx->dev, "BGX SPU reset not completed\n");
603 return (ENXIO);
604 }
605
606 /* Disable LMAC */
607 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
608 cfg &= ~CMR_EN;
609 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
610
611 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
612 /* Set interleaved running disparity for RXAUI */
613 if (bgx->lmac_type != BGX_MODE_RXAUI) {
614 bgx_reg_modify(bgx, lmacid,
615 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
616 } else {
617 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
618 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
619 }
620
621 /* clear all interrupts */
622 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
623 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
624 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
625 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
626 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
627 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
628
629 if (bgx->use_training) {
630 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
631 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
632 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
633 /* training enable */
634 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
635 SPU_PMD_CRTL_TRAIN_EN);
636 }
637
638 /* Append FCS to each packet */
639 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
640
641 /* Disable forward error correction */
642 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
643 cfg &= ~SPU_FEC_CTL_FEC_EN;
644 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
645
646 /* Disable autoneg */
647 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
648 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
649 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
650
651 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
652 if (bgx->lmac_type == BGX_MODE_10G_KR)
653 cfg |= (1 << 23);
654 else if (bgx->lmac_type == BGX_MODE_40G_KR)
655 cfg |= (1 << 24);
656 else
657 cfg &= ~((1 << 23) | (1 << 24));
658 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
659 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
660
661 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
662 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
663 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
664
665 /* Enable lmac */
666 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
667
668 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
669 cfg &= ~SPU_CTL_LOW_POWER;
670 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
671
672 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
673 cfg &= ~SMU_TX_CTL_UNI_EN;
674 cfg |= SMU_TX_CTL_DIC_EN;
675 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
676
677 /* take lmac_count into account */
678 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
679 /* max packet size */
680 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
681
682 return (0);
683}
684
685static int
686bgx_xaui_check_link(struct lmac *lmac)
687{
688 struct bgx *bgx = lmac->bgx;
689 int lmacid = lmac->lmacid;
690 int lmac_type = bgx->lmac_type;
691 uint64_t cfg;
692
693 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
694 if (bgx->use_training) {
695 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
696 if ((cfg & (1UL << 13)) == 0) {
697 cfg = (1UL << 13) | (1UL << 14);
698 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
699 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
700 cfg |= (1UL << 0);
701 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
702 return (ENXIO);
703 }
704 }
705
706 /* wait for PCS to come out of reset */
707 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
708 SPU_CTL_RESET, TRUE) != 0) {
709 device_printf(bgx->dev, "BGX SPU reset not completed\n");
710 return (ENXIO);
711 }
712
713 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
714 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
715 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
716 SPU_BR_STATUS_BLK_LOCK, FALSE)) {
717 device_printf(bgx->dev,
718 "SPU_BR_STATUS_BLK_LOCK not completed\n");
719 return (ENXIO);
720 }
721 } else {
722 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
723 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
724 device_printf(bgx->dev,
725 "SPU_BX_STATUS_RX_ALIGN not completed\n");
726 return (ENXIO);
727 }
728 }
729
730 /* Clear rcvflt bit (latching high) and read it back */
731 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
732 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
733 device_printf(bgx->dev, "Receive fault, retry training\n");
734 if (bgx->use_training) {
735 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
736 if ((cfg & (1UL << 13)) == 0) {
737 cfg = (1UL << 13) | (1UL << 14);
738 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
739 cfg = bgx_reg_read(bgx, lmacid,
740 BGX_SPUX_BR_PMD_CRTL);
741 cfg |= (1UL << 0);
742 bgx_reg_write(bgx, lmacid,
743 BGX_SPUX_BR_PMD_CRTL, cfg);
744 return (ENXIO);
745 }
746 }
747 return (ENXIO);
748 }
749
750 /* Wait for MAC RX to be ready */
751 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
752 SMU_RX_CTL_STATUS, TRUE) != 0) {
753 device_printf(bgx->dev, "SMU RX link not okay\n");
754 return (ENXIO);
755 }
756
757 /* Wait for BGX RX to be idle */
758 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
759 SMU_CTL_RX_IDLE, FALSE) != 0) {
760 device_printf(bgx->dev, "SMU RX not idle\n");
761 return (ENXIO);
762 }
763
764 /* Wait for BGX TX to be idle */
765 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
766 SMU_CTL_TX_IDLE, FALSE) != 0) {
767 device_printf(bgx->dev, "SMU TX not idle\n");
768 return (ENXIO);
769 }
770
771 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
772 SPU_STATUS2_RCVFLT) != 0) {
773 device_printf(bgx->dev, "Receive fault\n");
774 return (ENXIO);
775 }
776
777 /* Receive link is latching low. Force it high and verify it */
778 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
779 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
780 SPU_STATUS1_RCV_LNK, FALSE) != 0) {
781 device_printf(bgx->dev, "SPU receive link down\n");
782 return (ENXIO);
783 }
784
785 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
786 cfg &= ~SPU_MISC_CTL_RX_DIS;
787 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
788 return (0);
789}
790
791static void
792bgx_poll_for_link(void *arg)
793{
794 struct lmac *lmac;
795 uint64_t link;
796
797 lmac = (struct lmac *)arg;
798
799 /* Receive link is latching low. Force it high and verify it */
800 bgx_reg_modify(lmac->bgx, lmac->lmacid,
801 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
802 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
803 SPU_STATUS1_RCV_LNK, false);
804
805 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
806 if (link & SPU_STATUS1_RCV_LNK) {
807 lmac->link_up = 1;
808 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
809 lmac->last_speed = 40000;
810 else
811 lmac->last_speed = 10000;
812 lmac->last_duplex = 1;
813 } else {
814 lmac->link_up = 0;
815 }
816
817 if (lmac->last_link != lmac->link_up) {
818 lmac->last_link = lmac->link_up;
819 if (lmac->link_up)
820 bgx_xaui_check_link(lmac);
821 }
822
823 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
824}
825
826static int
827bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
828{
829 uint64_t __unused dmac_bcast = (1UL << 48) - 1;
830 struct lmac *lmac;
831 uint64_t cfg;
832
833 lmac = &bgx->lmac[lmacid];
834 lmac->bgx = bgx;
835
836 if (bgx->lmac_type == BGX_MODE_SGMII) {
837 lmac->is_sgmii = 1;
838 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
839 return -1;
840 } else {
841 lmac->is_sgmii = 0;
842 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
843 return -1;
844 }
845
846 if (lmac->is_sgmii) {
847 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
848 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
849 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
850 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
851 } else {
852 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
853 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
854 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
855 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
856 }
857
858 /* Enable lmac */
859 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
860 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
861
862 /* Restore default cfg, incase low level firmware changed it */
863 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
864
865 /* Add broadcast MAC into all LMAC's DMAC filters */
866 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
867
868 if ((bgx->lmac_type != BGX_MODE_XFI) &&
869 (bgx->lmac_type != BGX_MODE_XAUI) &&
870 (bgx->lmac_type != BGX_MODE_XLAUI) &&
871 (bgx->lmac_type != BGX_MODE_40G_KR) &&
872 (bgx->lmac_type != BGX_MODE_10G_KR)) {
873 if (lmac->phy_if_dev == NULL) {
874 device_printf(bgx->dev,
875 "LMAC%d missing interface to PHY\n", lmacid);
876 return (ENXIO);
877 }
878
879 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
880 lmacid) != 0) {
881 device_printf(bgx->dev,
882 "LMAC%d could not connect to PHY\n", lmacid);
883 return (ENXIO);
884 }
885 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
886 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
887 mtx_lock(&lmac->check_link_mtx);
888 bgx_lmac_handler(lmac);
889 mtx_unlock(&lmac->check_link_mtx);
890 } else {
891 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
892 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
893 mtx_lock(&lmac->check_link_mtx);
894 bgx_poll_for_link(lmac);
895 mtx_unlock(&lmac->check_link_mtx);
896 }
897
898 return (0);
899}
900
901static void
902bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
903{
904 struct lmac *lmac;
905 uint64_t cmrx_cfg;
906
907 lmac = &bgx->lmac[lmacid];
908
909 /* Stop callout */
910 callout_drain(&lmac->check_link);
911 mtx_destroy(&lmac->check_link_mtx);
912
913 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
914 cmrx_cfg &= ~(1 << 15);
915 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
916 bgx_flush_dmac_addrs(bgx, lmacid);
917
918 if ((bgx->lmac_type != BGX_MODE_XFI) &&
919 (bgx->lmac_type != BGX_MODE_XLAUI) &&
920 (bgx->lmac_type != BGX_MODE_40G_KR) &&
921 (bgx->lmac_type != BGX_MODE_10G_KR)) {
922 if (lmac->phy_if_dev == NULL) {
923 device_printf(bgx->dev,
924 "LMAC%d missing interface to PHY\n", lmacid);
925 return;
926 }
927 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
928 lmacid) != 0) {
929 device_printf(bgx->dev,
930 "LMAC%d could not disconnect PHY\n", lmacid);
931 return;
932 }
933 lmac->phy_if_dev = NULL;
934 }
935}
936
937static void
938bgx_set_num_ports(struct bgx *bgx)
939{
940 uint64_t lmac_count;
941
942 switch (bgx->qlm_mode) {
943 case QLM_MODE_SGMII:
944 bgx->lmac_count = 4;
945 bgx->lmac_type = BGX_MODE_SGMII;
946 bgx->lane_to_sds = 0;
947 break;
948 case QLM_MODE_XAUI_1X4:
949 bgx->lmac_count = 1;
950 bgx->lmac_type = BGX_MODE_XAUI;
951 bgx->lane_to_sds = 0xE4;
952 break;
953 case QLM_MODE_RXAUI_2X2:
954 bgx->lmac_count = 2;
955 bgx->lmac_type = BGX_MODE_RXAUI;
956 bgx->lane_to_sds = 0xE4;
957 break;
958 case QLM_MODE_XFI_4X1:
959 bgx->lmac_count = 4;
960 bgx->lmac_type = BGX_MODE_XFI;
961 bgx->lane_to_sds = 0;
962 break;
963 case QLM_MODE_XLAUI_1X4:
964 bgx->lmac_count = 1;
965 bgx->lmac_type = BGX_MODE_XLAUI;
966 bgx->lane_to_sds = 0xE4;
967 break;
968 case QLM_MODE_10G_KR_4X1:
969 bgx->lmac_count = 4;
970 bgx->lmac_type = BGX_MODE_10G_KR;
971 bgx->lane_to_sds = 0;
972 bgx->use_training = 1;
973 break;
974 case QLM_MODE_40G_KR4_1X4:
975 bgx->lmac_count = 1;
976 bgx->lmac_type = BGX_MODE_40G_KR;
977 bgx->lane_to_sds = 0xE4;
978 bgx->use_training = 1;
979 break;
980 default:
981 bgx->lmac_count = 0;
982 break;
983 }
984
985 /*
986 * Check if low level firmware has programmed LMAC count
987 * based on board type, if yes consider that otherwise
988 * the default static values
989 */
990 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
991 if (lmac_count != 4)
992 bgx->lmac_count = lmac_count;
993}
994
995static void
996bgx_init_hw(struct bgx *bgx)
997{
998 int i;
999
1000 bgx_set_num_ports(bgx);
1001
1002 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1003 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1004 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1005
1006 /* Set lmac type and lane2serdes mapping */
1007 for (i = 0; i < bgx->lmac_count; i++) {
1008 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1009 if (i)
1010 bgx->lane_to_sds = 0x0e;
1011 else
1012 bgx->lane_to_sds = 0x04;
1013 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1014 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1015 continue;
1016 }
1017 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1018 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1019 bgx->lmac[i].lmacid_bd = lmac_count;
1020 lmac_count++;
1021 }
1022
1023 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1024 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1025
1026 /* Set the backpressure AND mask */
1027 for (i = 0; i < bgx->lmac_count; i++) {
1028 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1029 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1030 (i * MAX_BGX_CHANS_PER_LMAC));
1031 }
1032
1033 /* Disable all MAC filtering */
1034 for (i = 0; i < RX_DMAC_COUNT; i++)
1035 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1036
1037 /* Disable MAC steering (NCSI traffic) */
1038 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1039 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1040}
1041
1042static void
1043bgx_get_qlm_mode(struct bgx *bgx)
1044{
1045 device_t dev = bgx->dev;;
1046 int lmac_type;
1047 int train_en;
1048
1049 /* Read LMAC0 type to figure out QLM mode
1050 * This is configured by low level firmware
1051 */
1052 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1053 lmac_type = (lmac_type >> 8) & 0x07;
1054
1055 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1056 SPU_PMD_CRTL_TRAIN_EN;
1057
1058 switch (lmac_type) {
1059 case BGX_MODE_SGMII:
1060 bgx->qlm_mode = QLM_MODE_SGMII;
1061 if (bootverbose) {
1062 device_printf(dev, "BGX%d QLM mode: SGMII\n",
1063 bgx->bgx_id);
1064 }
1065 break;
1066 case BGX_MODE_XAUI:
1067 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1068 if (bootverbose) {
1069 device_printf(dev, "BGX%d QLM mode: XAUI\n",
1070 bgx->bgx_id);
1071 }
1072 break;
1073 case BGX_MODE_RXAUI:
1074 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1075 if (bootverbose) {
1076 device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1077 bgx->bgx_id);
1078 }
1079 break;
1080 case BGX_MODE_XFI:
1081 if (!train_en) {
1082 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1083 if (bootverbose) {
1084 device_printf(dev, "BGX%d QLM mode: XFI\n",
1085 bgx->bgx_id);
1086 }
1087 } else {
1088 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1089 if (bootverbose) {
1090 device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1091 bgx->bgx_id);
1092 }
1093 }
1094 break;
1095 case BGX_MODE_XLAUI:
1096 if (!train_en) {
1097 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1098 if (bootverbose) {
1099 device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1100 bgx->bgx_id);
1101 }
1102 } else {
1103 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1104 if (bootverbose) {
1105 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1106 bgx->bgx_id);
1107 }
1108 }
1109 break;
1110 default:
1111 bgx->qlm_mode = QLM_MODE_SGMII;
1112 if (bootverbose) {
1113 device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1114 bgx->bgx_id);
1115 }
1116 }
1117}
1118
1119static int
1120bgx_init_phy(struct bgx *bgx)
1121{
1122 int err;
1123
1124 /* By default we fail */
1125 err = ENXIO;
1126#ifdef FDT
1127 err = bgx_fdt_init_phy(bgx);
1128#endif
1129#ifdef ACPI
1130 if (err != 0) {
1131 /* ARM64TODO: Add ACPI function here */
1132 }
1133#endif
1134 return (err);
1135}
116
117static int
118thunder_bgx_probe(device_t dev)
119{
120 uint16_t vendor_id;
121 uint16_t device_id;
122
123 vendor_id = pci_get_vendor(dev);
124 device_id = pci_get_device(dev);
125
126 if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
127 device_id == PCI_DEVICE_ID_THUNDER_BGX) {
128 device_set_desc(dev, THUNDER_BGX_DEVSTR);
129 return (BUS_PROBE_DEFAULT);
130 }
131
132 return (ENXIO);
133}
134
135static int
136thunder_bgx_attach(device_t dev)
137{
138 struct bgx *bgx;
139 uint8_t lmac;
140 int err;
141 int rid;
142
143 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
144 bgx->dev = dev;
145 /* Enable bus mastering */
146 pci_enable_busmaster(dev);
147 /* Allocate resources - configuration registers */
148 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
149 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
150 RF_ACTIVE);
151 if (bgx->reg_base == NULL) {
152 device_printf(dev, "Could not allocate CSR memory space\n");
153 err = ENXIO;
154 goto err_disable_device;
155 }
156
157 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
158 BGX_NODE_ID_MASK;
159 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
160
161 bgx_vnic[bgx->bgx_id] = bgx;
162 bgx_get_qlm_mode(bgx);
163
164 err = bgx_init_phy(bgx);
165 if (err != 0)
166 goto err_free_res;
167
168 bgx_init_hw(bgx);
169
170 /* Enable all LMACs */
171 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
172 err = bgx_lmac_enable(bgx, lmac);
173 if (err) {
174 device_printf(dev, "BGX%d failed to enable lmac%d\n",
175 bgx->bgx_id, lmac);
176 goto err_free_res;
177 }
178 }
179
180 return (0);
181
182err_free_res:
183 bgx_vnic[bgx->bgx_id] = NULL;
184 bus_release_resource(dev, SYS_RES_MEMORY,
185 rman_get_rid(bgx->reg_base), bgx->reg_base);
186err_disable_device:
187 free(bgx, M_BGX);
188 pci_disable_busmaster(dev);
189
190 return (err);
191}
192
193static int
194thunder_bgx_detach(device_t dev)
195{
196 struct lmac *lmac;
197 struct bgx *bgx;
198 uint8_t lmacid;
199
200 lmac = device_get_softc(dev);
201 bgx = lmac->bgx;
202 /* Disable all LMACs */
203 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
204 bgx_lmac_disable(bgx, lmacid);
205
206 return (0);
207}
208
209/* Register read/write APIs */
210static uint64_t
211bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
212{
213 bus_space_handle_t addr;
214
215 addr = ((uint32_t)lmac << 20) + offset;
216
217 return (bus_read_8(bgx->reg_base, addr));
218}
219
220static void
221bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
222{
223 bus_space_handle_t addr;
224
225 addr = ((uint32_t)lmac << 20) + offset;
226
227 bus_write_8(bgx->reg_base, addr, val);
228}
229
230static void
231bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
232{
233 bus_space_handle_t addr;
234
235 addr = ((uint32_t)lmac << 20) + offset;
236
237 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
238}
239
240static int
241bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
242 boolean_t zero)
243{
244 int timeout = 10;
245 uint64_t reg_val;
246
247 while (timeout) {
248 reg_val = bgx_reg_read(bgx, lmac, reg);
249 if (zero && !(reg_val & mask))
250 return (0);
251 if (!zero && (reg_val & mask))
252 return (0);
253
254 DELAY(100);
255 timeout--;
256 }
257 return (ETIMEDOUT);
258}
259
260/* Return number of BGX present in HW */
261u_int
262bgx_get_map(int node)
263{
264 int i;
265 u_int map = 0;
266
267 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
268 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
269 map |= (1 << i);
270 }
271
272 return (map);
273}
274
275/* Return number of LMAC configured for this BGX */
276int
277bgx_get_lmac_count(int node, int bgx_idx)
278{
279 struct bgx *bgx;
280
281 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
282 if (bgx != NULL)
283 return (bgx->lmac_count);
284
285 return (0);
286}
287
288/* Returns the current link status of LMAC */
289void
290bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
291{
292 struct bgx_link_status *link = (struct bgx_link_status *)status;
293 struct bgx *bgx;
294 struct lmac *lmac;
295
296 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
297 if (bgx == NULL)
298 return;
299
300 lmac = &bgx->lmac[lmacid];
301 link->link_up = lmac->link_up;
302 link->duplex = lmac->last_duplex;
303 link->speed = lmac->last_speed;
304}
305
306const uint8_t
307*bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
308{
309 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
310
311 if (bgx != NULL)
312 return (bgx->lmac[lmacid].mac);
313
314 return (NULL);
315}
316
317void
318bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
319{
320 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
321
322 if (bgx == NULL)
323 return;
324
325 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
326}
327
328static void
329bgx_sgmii_change_link_state(struct lmac *lmac)
330{
331 struct bgx *bgx = lmac->bgx;
332 uint64_t cmr_cfg;
333 uint64_t port_cfg = 0;
334 uint64_t misc_ctl = 0;
335
336 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
337 cmr_cfg &= ~CMR_EN;
338 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
339
340 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
341 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
342
343 if (lmac->link_up) {
344 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
345 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
346 port_cfg |= (lmac->last_duplex << 2);
347 } else {
348 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
349 }
350
351 switch (lmac->last_speed) {
352 case 10:
353 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
354 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
355 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
356 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
357 misc_ctl |= 50; /* samp_pt */
358 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
359 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
360 break;
361 case 100:
362 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
363 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
364 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
365 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
366 misc_ctl |= 5; /* samp_pt */
367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
368 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
369 break;
370 case 1000:
371 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
372 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
373 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
374 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
375 misc_ctl |= 1; /* samp_pt */
376 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
377 if (lmac->last_duplex)
378 bgx_reg_write(bgx, lmac->lmacid,
379 BGX_GMP_GMI_TXX_BURST, 0);
380 else
381 bgx_reg_write(bgx, lmac->lmacid,
382 BGX_GMP_GMI_TXX_BURST, 8192);
383 break;
384 default:
385 break;
386 }
387 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
388 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
389
390 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
391
392 /* renable lmac */
393 cmr_cfg |= CMR_EN;
394 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
395}
396
397static void
398bgx_lmac_handler(void *arg)
399{
400 struct lmac *lmac;
401 int link, duplex, speed;
402 int link_changed = 0;
403 int err;
404
405 lmac = (struct lmac *)arg;
406
407 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
408 &link, &duplex, &speed);
409 if (err != 0)
410 goto out;
411
412 if (!link && lmac->last_link)
413 link_changed = -1;
414
415 if (link &&
416 (lmac->last_duplex != duplex ||
417 lmac->last_link != link ||
418 lmac->last_speed != speed)) {
419 link_changed = 1;
420 }
421
422 lmac->last_link = link;
423 lmac->last_speed = speed;
424 lmac->last_duplex = duplex;
425
426 if (!link_changed)
427 goto out;
428
429 if (link_changed > 0)
430 lmac->link_up = true;
431 else
432 lmac->link_up = false;
433
434 if (lmac->is_sgmii)
435 bgx_sgmii_change_link_state(lmac);
436 else
437 bgx_xaui_check_link(lmac);
438
439out:
440 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
441}
442
443uint64_t
444bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
445{
446 struct bgx *bgx;
447
448 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
449 if (bgx == NULL)
450 return (0);
451
452 if (idx > 8)
453 lmac = (0);
454 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
455}
456
457uint64_t
458bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
459{
460 struct bgx *bgx;
461
462 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
463 if (bgx == NULL)
464 return (0);
465
466 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
467}
468
469static void
470bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
471{
472 uint64_t offset;
473
474 while (bgx->lmac[lmac].dmac > 0) {
475 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
476 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
477 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
478 bgx->lmac[lmac].dmac--;
479 }
480}
481
482void
483bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
484{
485 uint64_t offset;
486 struct bgx *bgx;
487
488#ifdef BGX_IN_PROMISCUOUS_MODE
489 return;
490#endif
491
492 bgx_idx += node * MAX_BGX_PER_CN88XX;
493 bgx = bgx_vnic[bgx_idx];
494
495 if (!bgx) {
496 device_printf(bgx->dev,
497 "BGX%d not yet initialized, ignoring DMAC addition\n",
498 bgx_idx);
499 return;
500 }
501
502 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
503 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
504 device_printf(bgx->dev,
505 "Max DMAC filters for LMAC%d reached, ignoring\n",
506 lmac);
507 return;
508 }
509
510 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
511 bgx->lmac[lmac].dmac = 1;
512
513 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
514 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
515 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
516 bgx->lmac[lmac].dmac++;
517
518 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
519 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
520 (BCAST_ACCEPT << 0));
521}
522
523/* Configure BGX LMAC in internal loopback mode */
524void
525bgx_lmac_internal_loopback(int node, int bgx_idx,
526 int lmac_idx, boolean_t enable)
527{
528 struct bgx *bgx;
529 struct lmac *lmac;
530 uint64_t cfg;
531
532 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
533 if (bgx == NULL)
534 return;
535
536 lmac = &bgx->lmac[lmac_idx];
537 if (lmac->is_sgmii) {
538 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
539 if (enable)
540 cfg |= PCS_MRX_CTL_LOOPBACK1;
541 else
542 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
543 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
544 } else {
545 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
546 if (enable)
547 cfg |= SPU_CTL_LOOPBACK;
548 else
549 cfg &= ~SPU_CTL_LOOPBACK;
550 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
551 }
552}
553
554static int
555bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
556{
557 uint64_t cfg;
558
559 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
560 /* max packet size */
561 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
562
563 /* Disable frame alignment if using preamble */
564 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
565 if (cfg & 1)
566 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
567
568 /* Enable lmac */
569 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
570
571 /* PCS reset */
572 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
573 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
574 PCS_MRX_CTL_RESET, TRUE) != 0) {
575 device_printf(bgx->dev, "BGX PCS reset not completed\n");
576 return (ENXIO);
577 }
578
579 /* power down, reset autoneg, autoneg enable */
580 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
581 cfg &= ~PCS_MRX_CTL_PWR_DN;
582 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
583 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
584
585 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
586 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
587 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
588 return (ENXIO);
589 }
590
591 return (0);
592}
593
594static int
595bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
596{
597 uint64_t cfg;
598
599 /* Reset SPU */
600 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
601 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
602 SPU_CTL_RESET, TRUE) != 0) {
603 device_printf(bgx->dev, "BGX SPU reset not completed\n");
604 return (ENXIO);
605 }
606
607 /* Disable LMAC */
608 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
609 cfg &= ~CMR_EN;
610 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
611
612 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
613 /* Set interleaved running disparity for RXAUI */
614 if (bgx->lmac_type != BGX_MODE_RXAUI) {
615 bgx_reg_modify(bgx, lmacid,
616 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
617 } else {
618 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
619 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
620 }
621
622 /* clear all interrupts */
623 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
624 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
625 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
626 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
627 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
628 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
629
630 if (bgx->use_training) {
631 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
632 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
633 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
634 /* training enable */
635 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
636 SPU_PMD_CRTL_TRAIN_EN);
637 }
638
639 /* Append FCS to each packet */
640 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
641
642 /* Disable forward error correction */
643 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
644 cfg &= ~SPU_FEC_CTL_FEC_EN;
645 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
646
647 /* Disable autoneg */
648 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
649 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
650 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
651
652 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
653 if (bgx->lmac_type == BGX_MODE_10G_KR)
654 cfg |= (1 << 23);
655 else if (bgx->lmac_type == BGX_MODE_40G_KR)
656 cfg |= (1 << 24);
657 else
658 cfg &= ~((1 << 23) | (1 << 24));
659 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
660 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
661
662 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
663 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
664 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
665
666 /* Enable lmac */
667 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
668
669 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
670 cfg &= ~SPU_CTL_LOW_POWER;
671 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
672
673 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
674 cfg &= ~SMU_TX_CTL_UNI_EN;
675 cfg |= SMU_TX_CTL_DIC_EN;
676 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
677
678 /* take lmac_count into account */
679 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
680 /* max packet size */
681 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
682
683 return (0);
684}
685
686static int
687bgx_xaui_check_link(struct lmac *lmac)
688{
689 struct bgx *bgx = lmac->bgx;
690 int lmacid = lmac->lmacid;
691 int lmac_type = bgx->lmac_type;
692 uint64_t cfg;
693
694 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
695 if (bgx->use_training) {
696 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
697 if ((cfg & (1UL << 13)) == 0) {
698 cfg = (1UL << 13) | (1UL << 14);
699 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
700 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
701 cfg |= (1UL << 0);
702 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
703 return (ENXIO);
704 }
705 }
706
707 /* wait for PCS to come out of reset */
708 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
709 SPU_CTL_RESET, TRUE) != 0) {
710 device_printf(bgx->dev, "BGX SPU reset not completed\n");
711 return (ENXIO);
712 }
713
714 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
715 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
716 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
717 SPU_BR_STATUS_BLK_LOCK, FALSE)) {
718 device_printf(bgx->dev,
719 "SPU_BR_STATUS_BLK_LOCK not completed\n");
720 return (ENXIO);
721 }
722 } else {
723 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
724 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
725 device_printf(bgx->dev,
726 "SPU_BX_STATUS_RX_ALIGN not completed\n");
727 return (ENXIO);
728 }
729 }
730
731 /* Clear rcvflt bit (latching high) and read it back */
732 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
733 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
734 device_printf(bgx->dev, "Receive fault, retry training\n");
735 if (bgx->use_training) {
736 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
737 if ((cfg & (1UL << 13)) == 0) {
738 cfg = (1UL << 13) | (1UL << 14);
739 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
740 cfg = bgx_reg_read(bgx, lmacid,
741 BGX_SPUX_BR_PMD_CRTL);
742 cfg |= (1UL << 0);
743 bgx_reg_write(bgx, lmacid,
744 BGX_SPUX_BR_PMD_CRTL, cfg);
745 return (ENXIO);
746 }
747 }
748 return (ENXIO);
749 }
750
751 /* Wait for MAC RX to be ready */
752 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
753 SMU_RX_CTL_STATUS, TRUE) != 0) {
754 device_printf(bgx->dev, "SMU RX link not okay\n");
755 return (ENXIO);
756 }
757
758 /* Wait for BGX RX to be idle */
759 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
760 SMU_CTL_RX_IDLE, FALSE) != 0) {
761 device_printf(bgx->dev, "SMU RX not idle\n");
762 return (ENXIO);
763 }
764
765 /* Wait for BGX TX to be idle */
766 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
767 SMU_CTL_TX_IDLE, FALSE) != 0) {
768 device_printf(bgx->dev, "SMU TX not idle\n");
769 return (ENXIO);
770 }
771
772 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
773 SPU_STATUS2_RCVFLT) != 0) {
774 device_printf(bgx->dev, "Receive fault\n");
775 return (ENXIO);
776 }
777
778 /* Receive link is latching low. Force it high and verify it */
779 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
780 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
781 SPU_STATUS1_RCV_LNK, FALSE) != 0) {
782 device_printf(bgx->dev, "SPU receive link down\n");
783 return (ENXIO);
784 }
785
786 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
787 cfg &= ~SPU_MISC_CTL_RX_DIS;
788 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
789 return (0);
790}
791
792static void
793bgx_poll_for_link(void *arg)
794{
795 struct lmac *lmac;
796 uint64_t link;
797
798 lmac = (struct lmac *)arg;
799
800 /* Receive link is latching low. Force it high and verify it */
801 bgx_reg_modify(lmac->bgx, lmac->lmacid,
802 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
803 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
804 SPU_STATUS1_RCV_LNK, false);
805
806 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
807 if (link & SPU_STATUS1_RCV_LNK) {
808 lmac->link_up = 1;
809 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
810 lmac->last_speed = 40000;
811 else
812 lmac->last_speed = 10000;
813 lmac->last_duplex = 1;
814 } else {
815 lmac->link_up = 0;
816 }
817
818 if (lmac->last_link != lmac->link_up) {
819 lmac->last_link = lmac->link_up;
820 if (lmac->link_up)
821 bgx_xaui_check_link(lmac);
822 }
823
824 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
825}
826
827static int
828bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
829{
830 uint64_t __unused dmac_bcast = (1UL << 48) - 1;
831 struct lmac *lmac;
832 uint64_t cfg;
833
834 lmac = &bgx->lmac[lmacid];
835 lmac->bgx = bgx;
836
837 if (bgx->lmac_type == BGX_MODE_SGMII) {
838 lmac->is_sgmii = 1;
839 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
840 return -1;
841 } else {
842 lmac->is_sgmii = 0;
843 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
844 return -1;
845 }
846
847 if (lmac->is_sgmii) {
848 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
849 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
850 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
851 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
852 } else {
853 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
854 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
855 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
856 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
857 }
858
859 /* Enable lmac */
860 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
861 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
862
863 /* Restore default cfg, incase low level firmware changed it */
864 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
865
866 /* Add broadcast MAC into all LMAC's DMAC filters */
867 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
868
869 if ((bgx->lmac_type != BGX_MODE_XFI) &&
870 (bgx->lmac_type != BGX_MODE_XAUI) &&
871 (bgx->lmac_type != BGX_MODE_XLAUI) &&
872 (bgx->lmac_type != BGX_MODE_40G_KR) &&
873 (bgx->lmac_type != BGX_MODE_10G_KR)) {
874 if (lmac->phy_if_dev == NULL) {
875 device_printf(bgx->dev,
876 "LMAC%d missing interface to PHY\n", lmacid);
877 return (ENXIO);
878 }
879
880 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
881 lmacid) != 0) {
882 device_printf(bgx->dev,
883 "LMAC%d could not connect to PHY\n", lmacid);
884 return (ENXIO);
885 }
886 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
887 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
888 mtx_lock(&lmac->check_link_mtx);
889 bgx_lmac_handler(lmac);
890 mtx_unlock(&lmac->check_link_mtx);
891 } else {
892 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
893 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
894 mtx_lock(&lmac->check_link_mtx);
895 bgx_poll_for_link(lmac);
896 mtx_unlock(&lmac->check_link_mtx);
897 }
898
899 return (0);
900}
901
902static void
903bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
904{
905 struct lmac *lmac;
906 uint64_t cmrx_cfg;
907
908 lmac = &bgx->lmac[lmacid];
909
910 /* Stop callout */
911 callout_drain(&lmac->check_link);
912 mtx_destroy(&lmac->check_link_mtx);
913
914 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
915 cmrx_cfg &= ~(1 << 15);
916 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
917 bgx_flush_dmac_addrs(bgx, lmacid);
918
919 if ((bgx->lmac_type != BGX_MODE_XFI) &&
920 (bgx->lmac_type != BGX_MODE_XLAUI) &&
921 (bgx->lmac_type != BGX_MODE_40G_KR) &&
922 (bgx->lmac_type != BGX_MODE_10G_KR)) {
923 if (lmac->phy_if_dev == NULL) {
924 device_printf(bgx->dev,
925 "LMAC%d missing interface to PHY\n", lmacid);
926 return;
927 }
928 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
929 lmacid) != 0) {
930 device_printf(bgx->dev,
931 "LMAC%d could not disconnect PHY\n", lmacid);
932 return;
933 }
934 lmac->phy_if_dev = NULL;
935 }
936}
937
938static void
939bgx_set_num_ports(struct bgx *bgx)
940{
941 uint64_t lmac_count;
942
943 switch (bgx->qlm_mode) {
944 case QLM_MODE_SGMII:
945 bgx->lmac_count = 4;
946 bgx->lmac_type = BGX_MODE_SGMII;
947 bgx->lane_to_sds = 0;
948 break;
949 case QLM_MODE_XAUI_1X4:
950 bgx->lmac_count = 1;
951 bgx->lmac_type = BGX_MODE_XAUI;
952 bgx->lane_to_sds = 0xE4;
953 break;
954 case QLM_MODE_RXAUI_2X2:
955 bgx->lmac_count = 2;
956 bgx->lmac_type = BGX_MODE_RXAUI;
957 bgx->lane_to_sds = 0xE4;
958 break;
959 case QLM_MODE_XFI_4X1:
960 bgx->lmac_count = 4;
961 bgx->lmac_type = BGX_MODE_XFI;
962 bgx->lane_to_sds = 0;
963 break;
964 case QLM_MODE_XLAUI_1X4:
965 bgx->lmac_count = 1;
966 bgx->lmac_type = BGX_MODE_XLAUI;
967 bgx->lane_to_sds = 0xE4;
968 break;
969 case QLM_MODE_10G_KR_4X1:
970 bgx->lmac_count = 4;
971 bgx->lmac_type = BGX_MODE_10G_KR;
972 bgx->lane_to_sds = 0;
973 bgx->use_training = 1;
974 break;
975 case QLM_MODE_40G_KR4_1X4:
976 bgx->lmac_count = 1;
977 bgx->lmac_type = BGX_MODE_40G_KR;
978 bgx->lane_to_sds = 0xE4;
979 bgx->use_training = 1;
980 break;
981 default:
982 bgx->lmac_count = 0;
983 break;
984 }
985
986 /*
987 * Check if low level firmware has programmed LMAC count
988 * based on board type, if yes consider that otherwise
989 * the default static values
990 */
991 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
992 if (lmac_count != 4)
993 bgx->lmac_count = lmac_count;
994}
995
996static void
997bgx_init_hw(struct bgx *bgx)
998{
999 int i;
1000
1001 bgx_set_num_ports(bgx);
1002
1003 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1004 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1005 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1006
1007 /* Set lmac type and lane2serdes mapping */
1008 for (i = 0; i < bgx->lmac_count; i++) {
1009 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1010 if (i)
1011 bgx->lane_to_sds = 0x0e;
1012 else
1013 bgx->lane_to_sds = 0x04;
1014 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1015 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1016 continue;
1017 }
1018 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1019 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1020 bgx->lmac[i].lmacid_bd = lmac_count;
1021 lmac_count++;
1022 }
1023
1024 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1025 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1026
1027 /* Set the backpressure AND mask */
1028 for (i = 0; i < bgx->lmac_count; i++) {
1029 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1030 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1031 (i * MAX_BGX_CHANS_PER_LMAC));
1032 }
1033
1034 /* Disable all MAC filtering */
1035 for (i = 0; i < RX_DMAC_COUNT; i++)
1036 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1037
1038 /* Disable MAC steering (NCSI traffic) */
1039 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1040 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1041}
1042
1043static void
1044bgx_get_qlm_mode(struct bgx *bgx)
1045{
1046 device_t dev = bgx->dev;;
1047 int lmac_type;
1048 int train_en;
1049
1050 /* Read LMAC0 type to figure out QLM mode
1051 * This is configured by low level firmware
1052 */
1053 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1054 lmac_type = (lmac_type >> 8) & 0x07;
1055
1056 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1057 SPU_PMD_CRTL_TRAIN_EN;
1058
1059 switch (lmac_type) {
1060 case BGX_MODE_SGMII:
1061 bgx->qlm_mode = QLM_MODE_SGMII;
1062 if (bootverbose) {
1063 device_printf(dev, "BGX%d QLM mode: SGMII\n",
1064 bgx->bgx_id);
1065 }
1066 break;
1067 case BGX_MODE_XAUI:
1068 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1069 if (bootverbose) {
1070 device_printf(dev, "BGX%d QLM mode: XAUI\n",
1071 bgx->bgx_id);
1072 }
1073 break;
1074 case BGX_MODE_RXAUI:
1075 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1076 if (bootverbose) {
1077 device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1078 bgx->bgx_id);
1079 }
1080 break;
1081 case BGX_MODE_XFI:
1082 if (!train_en) {
1083 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1084 if (bootverbose) {
1085 device_printf(dev, "BGX%d QLM mode: XFI\n",
1086 bgx->bgx_id);
1087 }
1088 } else {
1089 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1090 if (bootverbose) {
1091 device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1092 bgx->bgx_id);
1093 }
1094 }
1095 break;
1096 case BGX_MODE_XLAUI:
1097 if (!train_en) {
1098 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1099 if (bootverbose) {
1100 device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1101 bgx->bgx_id);
1102 }
1103 } else {
1104 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1105 if (bootverbose) {
1106 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1107 bgx->bgx_id);
1108 }
1109 }
1110 break;
1111 default:
1112 bgx->qlm_mode = QLM_MODE_SGMII;
1113 if (bootverbose) {
1114 device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1115 bgx->bgx_id);
1116 }
1117 }
1118}
1119
1120static int
1121bgx_init_phy(struct bgx *bgx)
1122{
1123 int err;
1124
1125 /* By default we fail */
1126 err = ENXIO;
1127#ifdef FDT
1128 err = bgx_fdt_init_phy(bgx);
1129#endif
1130#ifdef ACPI
1131 if (err != 0) {
1132 /* ARM64TODO: Add ACPI function here */
1133 }
1134#endif
1135 return (err);
1136}