Deleted Added
sdiff udiff text old ( 297707 ) new ( 300294 )
full compact
1/*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/vnic/thunder_bgx.c 300294 2016-05-20 11:00:06Z wma $
27 *
28 */
29#include "opt_platform.h"
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/vnic/thunder_bgx.c 300294 2016-05-20 11:00:06Z wma $");
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bitset.h>
37#include <sys/bitstring.h>
38#include <sys/bus.h>
39#include <sys/endian.h>
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/pciio.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/socket.h>
48#include <sys/sockio.h>
49#include <sys/cpuset.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_media.h>
56
57#include <machine/bus.h>
58
59#include <dev/pci/pcireg.h>
60#include <dev/pci/pcivar.h>
61
62#include "thunder_bgx.h"
63#include "thunder_bgx_var.h"
64#include "nic_reg.h"
65#include "nic.h"
66
67#include "lmac_if.h"
68
69#define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface"
70
71MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72
73#define BGX_NODE_ID_MASK 0x1
74#define BGX_NODE_ID_SHIFT 24
75
76#define DRV_NAME "thunder-BGX"
77#define DRV_VERSION "1.0"
78
79static int bgx_init_phy(struct bgx *);
80
81static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82static int lmac_count __unused; /* Total no of LMACs in system */
83
84static int bgx_xaui_check_link(struct lmac *lmac);
85static void bgx_get_qlm_mode(struct bgx *);
86static void bgx_init_hw(struct bgx *);
87static int bgx_lmac_enable(struct bgx *, uint8_t);
88static void bgx_lmac_disable(struct bgx *, uint8_t);
89
90static int thunder_bgx_probe(device_t);
91static int thunder_bgx_attach(device_t);
92static int thunder_bgx_detach(device_t);
93
94static device_method_t thunder_bgx_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, thunder_bgx_probe),
97 DEVMETHOD(device_attach, thunder_bgx_attach),
98 DEVMETHOD(device_detach, thunder_bgx_detach),
99
100 DEVMETHOD_END,
101};
102
103static driver_t thunder_bgx_driver = {
104 "bgx",
105 thunder_bgx_methods,
106 sizeof(struct lmac),
107};
108
109static devclass_t thunder_bgx_devclass;
110
111DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
112MODULE_VERSION(thunder_bgx, 1);
113MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
114MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
115MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
116
117static int
118thunder_bgx_probe(device_t dev)
119{
120 uint16_t vendor_id;
121 uint16_t device_id;
122
123 vendor_id = pci_get_vendor(dev);
124 device_id = pci_get_device(dev);
125
126 if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
127 device_id == PCI_DEVICE_ID_THUNDER_BGX) {
128 device_set_desc(dev, THUNDER_BGX_DEVSTR);
129 return (BUS_PROBE_DEFAULT);
130 }
131
132 return (ENXIO);
133}
134
135static int
136thunder_bgx_attach(device_t dev)
137{
138 struct bgx *bgx;
139 uint8_t lmac;
140 int err;
141 int rid;
142
143 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
144 bgx->dev = dev;
145 /* Enable bus mastering */
146 pci_enable_busmaster(dev);
147 /* Allocate resources - configuration registers */
148 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
149 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
150 RF_ACTIVE);
151 if (bgx->reg_base == NULL) {
152 device_printf(dev, "Could not allocate CSR memory space\n");
153 err = ENXIO;
154 goto err_disable_device;
155 }
156
157 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
158 BGX_NODE_ID_MASK;
159 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
160
161 bgx_vnic[bgx->bgx_id] = bgx;
162 bgx_get_qlm_mode(bgx);
163
164 err = bgx_init_phy(bgx);
165 if (err != 0)
166 goto err_free_res;
167
168 bgx_init_hw(bgx);
169
170 /* Enable all LMACs */
171 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
172 err = bgx_lmac_enable(bgx, lmac);
173 if (err) {
174 device_printf(dev, "BGX%d failed to enable lmac%d\n",
175 bgx->bgx_id, lmac);
176 goto err_free_res;
177 }
178 }
179
180 return (0);
181
182err_free_res:
183 bgx_vnic[bgx->bgx_id] = NULL;
184 bus_release_resource(dev, SYS_RES_MEMORY,
185 rman_get_rid(bgx->reg_base), bgx->reg_base);
186err_disable_device:
187 free(bgx, M_BGX);
188 pci_disable_busmaster(dev);
189
190 return (err);
191}
192
193static int
194thunder_bgx_detach(device_t dev)
195{
196 struct lmac *lmac;
197 struct bgx *bgx;
198 uint8_t lmacid;
199
200 lmac = device_get_softc(dev);
201 bgx = lmac->bgx;
202 /* Disable all LMACs */
203 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
204 bgx_lmac_disable(bgx, lmacid);
205
206 return (0);
207}
208
209/* Register read/write APIs */
210static uint64_t
211bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
212{
213 bus_space_handle_t addr;
214
215 addr = ((uint32_t)lmac << 20) + offset;
216
217 return (bus_read_8(bgx->reg_base, addr));
218}
219
220static void
221bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
222{
223 bus_space_handle_t addr;
224
225 addr = ((uint32_t)lmac << 20) + offset;
226
227 bus_write_8(bgx->reg_base, addr, val);
228}
229
230static void
231bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
232{
233 bus_space_handle_t addr;
234
235 addr = ((uint32_t)lmac << 20) + offset;
236
237 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
238}
239
240static int
241bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
242 boolean_t zero)
243{
244 int timeout = 10;
245 uint64_t reg_val;
246
247 while (timeout) {
248 reg_val = bgx_reg_read(bgx, lmac, reg);
249 if (zero && !(reg_val & mask))
250 return (0);
251 if (!zero && (reg_val & mask))
252 return (0);
253
254 DELAY(100);
255 timeout--;
256 }
257 return (ETIMEDOUT);
258}
259
260/* Return number of BGX present in HW */
261u_int
262bgx_get_map(int node)
263{
264 int i;
265 u_int map = 0;
266
267 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
268 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
269 map |= (1 << i);
270 }
271
272 return (map);
273}
274
275/* Return number of LMAC configured for this BGX */
276int
277bgx_get_lmac_count(int node, int bgx_idx)
278{
279 struct bgx *bgx;
280
281 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
282 if (bgx != NULL)
283 return (bgx->lmac_count);
284
285 return (0);
286}
287
288/* Returns the current link status of LMAC */
289void
290bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
291{
292 struct bgx_link_status *link = (struct bgx_link_status *)status;
293 struct bgx *bgx;
294 struct lmac *lmac;
295
296 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
297 if (bgx == NULL)
298 return;
299
300 lmac = &bgx->lmac[lmacid];
301 link->link_up = lmac->link_up;
302 link->duplex = lmac->last_duplex;
303 link->speed = lmac->last_speed;
304}
305
306const uint8_t
307*bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
308{
309 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
310
311 if (bgx != NULL)
312 return (bgx->lmac[lmacid].mac);
313
314 return (NULL);
315}
316
317void
318bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
319{
320 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
321
322 if (bgx == NULL)
323 return;
324
325 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
326}
327
328static void
329bgx_sgmii_change_link_state(struct lmac *lmac)
330{
331 struct bgx *bgx = lmac->bgx;
332 uint64_t cmr_cfg;
333 uint64_t port_cfg = 0;
334 uint64_t misc_ctl = 0;
335
336 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
337 cmr_cfg &= ~CMR_EN;
338 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
339
340 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
341 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
342
343 if (lmac->link_up) {
344 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
345 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
346 port_cfg |= (lmac->last_duplex << 2);
347 } else {
348 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
349 }
350
351 switch (lmac->last_speed) {
352 case 10:
353 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
354 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
355 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
356 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
357 misc_ctl |= 50; /* samp_pt */
358 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
359 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
360 break;
361 case 100:
362 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
363 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
364 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
365 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
366 misc_ctl |= 5; /* samp_pt */
367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
368 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
369 break;
370 case 1000:
371 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
372 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
373 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
374 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
375 misc_ctl |= 1; /* samp_pt */
376 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
377 if (lmac->last_duplex)
378 bgx_reg_write(bgx, lmac->lmacid,
379 BGX_GMP_GMI_TXX_BURST, 0);
380 else
381 bgx_reg_write(bgx, lmac->lmacid,
382 BGX_GMP_GMI_TXX_BURST, 8192);
383 break;
384 default:
385 break;
386 }
387 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
388 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
389
390 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
391
392 /* renable lmac */
393 cmr_cfg |= CMR_EN;
394 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
395}
396
397static void
398bgx_lmac_handler(void *arg)
399{
400 struct lmac *lmac;
401 int link, duplex, speed;
402 int link_changed = 0;
403 int err;
404
405 lmac = (struct lmac *)arg;
406
407 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
408 &link, &duplex, &speed);
409 if (err != 0)
410 goto out;
411
412 if (!link && lmac->last_link)
413 link_changed = -1;
414
415 if (link &&
416 (lmac->last_duplex != duplex ||
417 lmac->last_link != link ||
418 lmac->last_speed != speed)) {
419 link_changed = 1;
420 }
421
422 lmac->last_link = link;
423 lmac->last_speed = speed;
424 lmac->last_duplex = duplex;
425
426 if (!link_changed)
427 goto out;
428
429 if (link_changed > 0)
430 lmac->link_up = true;
431 else
432 lmac->link_up = false;
433
434 if (lmac->is_sgmii)
435 bgx_sgmii_change_link_state(lmac);
436 else
437 bgx_xaui_check_link(lmac);
438
439out:
440 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
441}
442
443uint64_t
444bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
445{
446 struct bgx *bgx;
447
448 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
449 if (bgx == NULL)
450 return (0);
451
452 if (idx > 8)
453 lmac = (0);
454 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
455}
456
457uint64_t
458bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
459{
460 struct bgx *bgx;
461
462 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
463 if (bgx == NULL)
464 return (0);
465
466 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
467}
468
469static void
470bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
471{
472 uint64_t offset;
473
474 while (bgx->lmac[lmac].dmac > 0) {
475 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
476 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
477 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
478 bgx->lmac[lmac].dmac--;
479 }
480}
481
482void
483bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
484{
485 uint64_t offset;
486 struct bgx *bgx;
487
488#ifdef BGX_IN_PROMISCUOUS_MODE
489 return;
490#endif
491
492 bgx_idx += node * MAX_BGX_PER_CN88XX;
493 bgx = bgx_vnic[bgx_idx];
494
495 if (!bgx) {
496 device_printf(bgx->dev,
497 "BGX%d not yet initialized, ignoring DMAC addition\n",
498 bgx_idx);
499 return;
500 }
501
502 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
503 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
504 device_printf(bgx->dev,
505 "Max DMAC filters for LMAC%d reached, ignoring\n",
506 lmac);
507 return;
508 }
509
510 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
511 bgx->lmac[lmac].dmac = 1;
512
513 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
514 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
515 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
516 bgx->lmac[lmac].dmac++;
517
518 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
519 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
520 (BCAST_ACCEPT << 0));
521}
522
523/* Configure BGX LMAC in internal loopback mode */
524void
525bgx_lmac_internal_loopback(int node, int bgx_idx,
526 int lmac_idx, boolean_t enable)
527{
528 struct bgx *bgx;
529 struct lmac *lmac;
530 uint64_t cfg;
531
532 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
533 if (bgx == NULL)
534 return;
535
536 lmac = &bgx->lmac[lmac_idx];
537 if (lmac->is_sgmii) {
538 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
539 if (enable)
540 cfg |= PCS_MRX_CTL_LOOPBACK1;
541 else
542 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
543 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
544 } else {
545 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
546 if (enable)
547 cfg |= SPU_CTL_LOOPBACK;
548 else
549 cfg &= ~SPU_CTL_LOOPBACK;
550 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
551 }
552}
553
554static int
555bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
556{
557 uint64_t cfg;
558
559 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
560 /* max packet size */
561 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
562
563 /* Disable frame alignment if using preamble */
564 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
565 if (cfg & 1)
566 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
567
568 /* Enable lmac */
569 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
570
571 /* PCS reset */
572 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
573 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
574 PCS_MRX_CTL_RESET, TRUE) != 0) {
575 device_printf(bgx->dev, "BGX PCS reset not completed\n");
576 return (ENXIO);
577 }
578
579 /* power down, reset autoneg, autoneg enable */
580 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
581 cfg &= ~PCS_MRX_CTL_PWR_DN;
582 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
583 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
584
585 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
586 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
587 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
588 return (ENXIO);
589 }
590
591 return (0);
592}
593
594static int
595bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
596{
597 uint64_t cfg;
598
599 /* Reset SPU */
600 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
601 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
602 SPU_CTL_RESET, TRUE) != 0) {
603 device_printf(bgx->dev, "BGX SPU reset not completed\n");
604 return (ENXIO);
605 }
606
607 /* Disable LMAC */
608 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
609 cfg &= ~CMR_EN;
610 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
611
612 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
613 /* Set interleaved running disparity for RXAUI */
614 if (bgx->lmac_type != BGX_MODE_RXAUI) {
615 bgx_reg_modify(bgx, lmacid,
616 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
617 } else {
618 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
619 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
620 }
621
622 /* clear all interrupts */
623 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
624 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
625 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
626 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
627 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
628 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
629
630 if (bgx->use_training) {
631 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
632 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
633 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
634 /* training enable */
635 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
636 SPU_PMD_CRTL_TRAIN_EN);
637 }
638
639 /* Append FCS to each packet */
640 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
641
642 /* Disable forward error correction */
643 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
644 cfg &= ~SPU_FEC_CTL_FEC_EN;
645 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
646
647 /* Disable autoneg */
648 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
649 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
650 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
651
652 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
653 if (bgx->lmac_type == BGX_MODE_10G_KR)
654 cfg |= (1 << 23);
655 else if (bgx->lmac_type == BGX_MODE_40G_KR)
656 cfg |= (1 << 24);
657 else
658 cfg &= ~((1 << 23) | (1 << 24));
659 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
660 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
661
662 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
663 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
664 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
665
666 /* Enable lmac */
667 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
668
669 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
670 cfg &= ~SPU_CTL_LOW_POWER;
671 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
672
673 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
674 cfg &= ~SMU_TX_CTL_UNI_EN;
675 cfg |= SMU_TX_CTL_DIC_EN;
676 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
677
678 /* take lmac_count into account */
679 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
680 /* max packet size */
681 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
682
683 return (0);
684}
685
686static int
687bgx_xaui_check_link(struct lmac *lmac)
688{
689 struct bgx *bgx = lmac->bgx;
690 int lmacid = lmac->lmacid;
691 int lmac_type = bgx->lmac_type;
692 uint64_t cfg;
693
694 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
695 if (bgx->use_training) {
696 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
697 if ((cfg & (1UL << 13)) == 0) {
698 cfg = (1UL << 13) | (1UL << 14);
699 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
700 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
701 cfg |= (1UL << 0);
702 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
703 return (ENXIO);
704 }
705 }
706
707 /* wait for PCS to come out of reset */
708 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
709 SPU_CTL_RESET, TRUE) != 0) {
710 device_printf(bgx->dev, "BGX SPU reset not completed\n");
711 return (ENXIO);
712 }
713
714 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
715 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
716 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
717 SPU_BR_STATUS_BLK_LOCK, FALSE)) {
718 device_printf(bgx->dev,
719 "SPU_BR_STATUS_BLK_LOCK not completed\n");
720 return (ENXIO);
721 }
722 } else {
723 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
724 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
725 device_printf(bgx->dev,
726 "SPU_BX_STATUS_RX_ALIGN not completed\n");
727 return (ENXIO);
728 }
729 }
730
731 /* Clear rcvflt bit (latching high) and read it back */
732 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
733 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
734 device_printf(bgx->dev, "Receive fault, retry training\n");
735 if (bgx->use_training) {
736 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
737 if ((cfg & (1UL << 13)) == 0) {
738 cfg = (1UL << 13) | (1UL << 14);
739 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
740 cfg = bgx_reg_read(bgx, lmacid,
741 BGX_SPUX_BR_PMD_CRTL);
742 cfg |= (1UL << 0);
743 bgx_reg_write(bgx, lmacid,
744 BGX_SPUX_BR_PMD_CRTL, cfg);
745 return (ENXIO);
746 }
747 }
748 return (ENXIO);
749 }
750
751 /* Wait for MAC RX to be ready */
752 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
753 SMU_RX_CTL_STATUS, TRUE) != 0) {
754 device_printf(bgx->dev, "SMU RX link not okay\n");
755 return (ENXIO);
756 }
757
758 /* Wait for BGX RX to be idle */
759 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
760 SMU_CTL_RX_IDLE, FALSE) != 0) {
761 device_printf(bgx->dev, "SMU RX not idle\n");
762 return (ENXIO);
763 }
764
765 /* Wait for BGX TX to be idle */
766 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
767 SMU_CTL_TX_IDLE, FALSE) != 0) {
768 device_printf(bgx->dev, "SMU TX not idle\n");
769 return (ENXIO);
770 }
771
772 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
773 SPU_STATUS2_RCVFLT) != 0) {
774 device_printf(bgx->dev, "Receive fault\n");
775 return (ENXIO);
776 }
777
778 /* Receive link is latching low. Force it high and verify it */
779 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
780 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
781 SPU_STATUS1_RCV_LNK, FALSE) != 0) {
782 device_printf(bgx->dev, "SPU receive link down\n");
783 return (ENXIO);
784 }
785
786 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
787 cfg &= ~SPU_MISC_CTL_RX_DIS;
788 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
789 return (0);
790}
791
792static void
793bgx_poll_for_link(void *arg)
794{
795 struct lmac *lmac;
796 uint64_t link;
797
798 lmac = (struct lmac *)arg;
799
800 /* Receive link is latching low. Force it high and verify it */
801 bgx_reg_modify(lmac->bgx, lmac->lmacid,
802 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
803 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
804 SPU_STATUS1_RCV_LNK, false);
805
806 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
807 if (link & SPU_STATUS1_RCV_LNK) {
808 lmac->link_up = 1;
809 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
810 lmac->last_speed = 40000;
811 else
812 lmac->last_speed = 10000;
813 lmac->last_duplex = 1;
814 } else {
815 lmac->link_up = 0;
816 }
817
818 if (lmac->last_link != lmac->link_up) {
819 lmac->last_link = lmac->link_up;
820 if (lmac->link_up)
821 bgx_xaui_check_link(lmac);
822 }
823
824 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
825}
826
827static int
828bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
829{
830 uint64_t __unused dmac_bcast = (1UL << 48) - 1;
831 struct lmac *lmac;
832 uint64_t cfg;
833
834 lmac = &bgx->lmac[lmacid];
835 lmac->bgx = bgx;
836
837 if (bgx->lmac_type == BGX_MODE_SGMII) {
838 lmac->is_sgmii = 1;
839 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
840 return -1;
841 } else {
842 lmac->is_sgmii = 0;
843 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
844 return -1;
845 }
846
847 if (lmac->is_sgmii) {
848 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
849 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
850 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
851 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
852 } else {
853 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
854 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
855 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
856 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
857 }
858
859 /* Enable lmac */
860 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
861 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
862
863 /* Restore default cfg, incase low level firmware changed it */
864 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
865
866 /* Add broadcast MAC into all LMAC's DMAC filters */
867 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
868
869 if ((bgx->lmac_type != BGX_MODE_XFI) &&
870 (bgx->lmac_type != BGX_MODE_XAUI) &&
871 (bgx->lmac_type != BGX_MODE_XLAUI) &&
872 (bgx->lmac_type != BGX_MODE_40G_KR) &&
873 (bgx->lmac_type != BGX_MODE_10G_KR)) {
874 if (lmac->phy_if_dev == NULL) {
875 device_printf(bgx->dev,
876 "LMAC%d missing interface to PHY\n", lmacid);
877 return (ENXIO);
878 }
879
880 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
881 lmacid) != 0) {
882 device_printf(bgx->dev,
883 "LMAC%d could not connect to PHY\n", lmacid);
884 return (ENXIO);
885 }
886 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
887 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
888 mtx_lock(&lmac->check_link_mtx);
889 bgx_lmac_handler(lmac);
890 mtx_unlock(&lmac->check_link_mtx);
891 } else {
892 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
893 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
894 mtx_lock(&lmac->check_link_mtx);
895 bgx_poll_for_link(lmac);
896 mtx_unlock(&lmac->check_link_mtx);
897 }
898
899 return (0);
900}
901
902static void
903bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
904{
905 struct lmac *lmac;
906 uint64_t cmrx_cfg;
907
908 lmac = &bgx->lmac[lmacid];
909
910 /* Stop callout */
911 callout_drain(&lmac->check_link);
912 mtx_destroy(&lmac->check_link_mtx);
913
914 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
915 cmrx_cfg &= ~(1 << 15);
916 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
917 bgx_flush_dmac_addrs(bgx, lmacid);
918
919 if ((bgx->lmac_type != BGX_MODE_XFI) &&
920 (bgx->lmac_type != BGX_MODE_XLAUI) &&
921 (bgx->lmac_type != BGX_MODE_40G_KR) &&
922 (bgx->lmac_type != BGX_MODE_10G_KR)) {
923 if (lmac->phy_if_dev == NULL) {
924 device_printf(bgx->dev,
925 "LMAC%d missing interface to PHY\n", lmacid);
926 return;
927 }
928 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
929 lmacid) != 0) {
930 device_printf(bgx->dev,
931 "LMAC%d could not disconnect PHY\n", lmacid);
932 return;
933 }
934 lmac->phy_if_dev = NULL;
935 }
936}
937
938static void
939bgx_set_num_ports(struct bgx *bgx)
940{
941 uint64_t lmac_count;
942
943 switch (bgx->qlm_mode) {
944 case QLM_MODE_SGMII:
945 bgx->lmac_count = 4;
946 bgx->lmac_type = BGX_MODE_SGMII;
947 bgx->lane_to_sds = 0;
948 break;
949 case QLM_MODE_XAUI_1X4:
950 bgx->lmac_count = 1;
951 bgx->lmac_type = BGX_MODE_XAUI;
952 bgx->lane_to_sds = 0xE4;
953 break;
954 case QLM_MODE_RXAUI_2X2:
955 bgx->lmac_count = 2;
956 bgx->lmac_type = BGX_MODE_RXAUI;
957 bgx->lane_to_sds = 0xE4;
958 break;
959 case QLM_MODE_XFI_4X1:
960 bgx->lmac_count = 4;
961 bgx->lmac_type = BGX_MODE_XFI;
962 bgx->lane_to_sds = 0;
963 break;
964 case QLM_MODE_XLAUI_1X4:
965 bgx->lmac_count = 1;
966 bgx->lmac_type = BGX_MODE_XLAUI;
967 bgx->lane_to_sds = 0xE4;
968 break;
969 case QLM_MODE_10G_KR_4X1:
970 bgx->lmac_count = 4;
971 bgx->lmac_type = BGX_MODE_10G_KR;
972 bgx->lane_to_sds = 0;
973 bgx->use_training = 1;
974 break;
975 case QLM_MODE_40G_KR4_1X4:
976 bgx->lmac_count = 1;
977 bgx->lmac_type = BGX_MODE_40G_KR;
978 bgx->lane_to_sds = 0xE4;
979 bgx->use_training = 1;
980 break;
981 default:
982 bgx->lmac_count = 0;
983 break;
984 }
985
986 /*
987 * Check if low level firmware has programmed LMAC count
988 * based on board type, if yes consider that otherwise
989 * the default static values
990 */
991 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
992 if (lmac_count != 4)
993 bgx->lmac_count = lmac_count;
994}
995
996static void
997bgx_init_hw(struct bgx *bgx)
998{
999 int i;
1000
1001 bgx_set_num_ports(bgx);
1002
1003 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1004 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1005 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1006
1007 /* Set lmac type and lane2serdes mapping */
1008 for (i = 0; i < bgx->lmac_count; i++) {
1009 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1010 if (i)
1011 bgx->lane_to_sds = 0x0e;
1012 else
1013 bgx->lane_to_sds = 0x04;
1014 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1015 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1016 continue;
1017 }
1018 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1019 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1020 bgx->lmac[i].lmacid_bd = lmac_count;
1021 lmac_count++;
1022 }
1023
1024 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1025 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1026
1027 /* Set the backpressure AND mask */
1028 for (i = 0; i < bgx->lmac_count; i++) {
1029 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1030 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1031 (i * MAX_BGX_CHANS_PER_LMAC));
1032 }
1033
1034 /* Disable all MAC filtering */
1035 for (i = 0; i < RX_DMAC_COUNT; i++)
1036 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1037
1038 /* Disable MAC steering (NCSI traffic) */
1039 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1040 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1041}
1042
1043static void
1044bgx_get_qlm_mode(struct bgx *bgx)
1045{
1046 device_t dev = bgx->dev;;
1047 int lmac_type;
1048 int train_en;
1049
1050 /* Read LMAC0 type to figure out QLM mode
1051 * This is configured by low level firmware
1052 */
1053 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1054 lmac_type = (lmac_type >> 8) & 0x07;
1055
1056 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1057 SPU_PMD_CRTL_TRAIN_EN;
1058
1059 switch (lmac_type) {
1060 case BGX_MODE_SGMII:
1061 bgx->qlm_mode = QLM_MODE_SGMII;
1062 if (bootverbose) {
1063 device_printf(dev, "BGX%d QLM mode: SGMII\n",
1064 bgx->bgx_id);
1065 }
1066 break;
1067 case BGX_MODE_XAUI:
1068 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1069 if (bootverbose) {
1070 device_printf(dev, "BGX%d QLM mode: XAUI\n",
1071 bgx->bgx_id);
1072 }
1073 break;
1074 case BGX_MODE_RXAUI:
1075 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1076 if (bootverbose) {
1077 device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1078 bgx->bgx_id);
1079 }
1080 break;
1081 case BGX_MODE_XFI:
1082 if (!train_en) {
1083 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1084 if (bootverbose) {
1085 device_printf(dev, "BGX%d QLM mode: XFI\n",
1086 bgx->bgx_id);
1087 }
1088 } else {
1089 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1090 if (bootverbose) {
1091 device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1092 bgx->bgx_id);
1093 }
1094 }
1095 break;
1096 case BGX_MODE_XLAUI:
1097 if (!train_en) {
1098 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1099 if (bootverbose) {
1100 device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1101 bgx->bgx_id);
1102 }
1103 } else {
1104 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1105 if (bootverbose) {
1106 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1107 bgx->bgx_id);
1108 }
1109 }
1110 break;
1111 default:
1112 bgx->qlm_mode = QLM_MODE_SGMII;
1113 if (bootverbose) {
1114 device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1115 bgx->bgx_id);
1116 }
1117 }
1118}
1119
1120static int
1121bgx_init_phy(struct bgx *bgx)
1122{
1123 int err;
1124
1125 /* By default we fail */
1126 err = ENXIO;
1127#ifdef FDT
1128 err = bgx_fdt_init_phy(bgx);
1129#endif
1130#ifdef ACPI
1131 if (err != 0) {
1132 /* ARM64TODO: Add ACPI function here */
1133 }
1134#endif
1135 return (err);
1136}