1289550Szbb/* 2289550Szbb * Copyright (C) 2015 Cavium Inc. 3289550Szbb * All rights reserved. 4289550Szbb * 5289550Szbb * Redistribution and use in source and binary forms, with or without 6289550Szbb * modification, are permitted provided that the following conditions 7289550Szbb * are met: 8289550Szbb * 1. Redistributions of source code must retain the above copyright 9289550Szbb * notice, this list of conditions and the following disclaimer. 10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright 11289550Szbb * notice, this list of conditions and the following disclaimer in the 12289550Szbb * documentation and/or other materials provided with the distribution. 13289550Szbb * 14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17289550Szbb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24289550Szbb * SUCH DAMAGE. 25289550Szbb * 26289550Szbb * $FreeBSD$ 27289550Szbb * 28289550Szbb */ 29289550Szbb 30289551Szbb#include <sys/cdefs.h> 31289551Szbb__FBSDID("$FreeBSD$"); 32289550Szbb 33289551Szbb#include <sys/param.h> 34289551Szbb#include <sys/systm.h> 35289551Szbb#include <sys/bitset.h> 36289551Szbb#include <sys/bitstring.h> 37289551Szbb#include <sys/bus.h> 38289551Szbb#include <sys/endian.h> 39289551Szbb#include <sys/kernel.h> 40289551Szbb#include <sys/malloc.h> 41289551Szbb#include <sys/module.h> 42289551Szbb#include <sys/rman.h> 43289551Szbb#include <sys/pciio.h> 44289551Szbb#include <sys/pcpu.h> 45289551Szbb#include <sys/proc.h> 46289551Szbb#include <sys/socket.h> 47289551Szbb#include <sys/sockio.h> 48289551Szbb#include <sys/cpuset.h> 49289551Szbb#include <sys/lock.h> 50289551Szbb#include <sys/mutex.h> 51289551Szbb 52289551Szbb#include <net/ethernet.h> 53289551Szbb#include <net/if.h> 54289551Szbb#include <net/if_media.h> 55289551Szbb 56289551Szbb#include <machine/bus.h> 57289551Szbb#include <machine/_inttypes.h> 58289551Szbb 59289551Szbb#include <dev/pci/pcireg.h> 60289551Szbb#include <dev/pci/pcivar.h> 61289551Szbb 62289551Szbb#include <sys/dnv.h> 63289551Szbb#include <sys/nv.h> 64289551Szbb#ifdef PCI_IOV 65289551Szbb#include <sys/iov_schema.h> 66289551Szbb#include <dev/pci/pci_iov.h> 67289551Szbb#endif 68289551Szbb 69289551Szbb#include "thunder_bgx.h" 70289550Szbb#include "nic_reg.h" 71289550Szbb#include "nic.h" 72289550Szbb#include "q_struct.h" 73289550Szbb 74289551Szbb#define VNIC_PF_DEVSTR "Cavium Thunder NIC Physical Function Driver" 75289550Szbb 76289551Szbb#define VNIC_PF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM) 77289551Szbb 78289551Szbb#define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF)) 79289551Szbb#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF) 80289551Szbb#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF) 81289551Szbb 82289551Szbb/* Structure to be used by the SR-IOV for VF configuration schemas */ 83289551Szbbstruct nicvf_info { 84289551Szbb boolean_t vf_enabled; 85289551Szbb int vf_flags; 86289551Szbb}; 87289551Szbb 88289550Szbbstruct nicpf { 89289551Szbb device_t dev; 90289551Szbb uint8_t node; 91289551Szbb u_int flags; 92289551Szbb uint8_t num_vf_en; /* No of VF enabled */ 93289551Szbb struct nicvf_info vf_info[MAX_NUM_VFS_SUPPORTED]; 94289551Szbb struct resource * reg_base; /* Register start address */ 95289550Szbb struct pkind_cfg pkind; 96289551Szbb uint8_t vf_lmac_map[MAX_LMAC]; 97289551Szbb boolean_t mbx_lock[MAX_NUM_VFS_SUPPORTED]; 98289550Szbb 99289551Szbb struct callout check_link; 100289551Szbb struct mtx check_link_mtx; 101289551Szbb 102289551Szbb uint8_t link[MAX_LMAC]; 103289551Szbb uint8_t duplex[MAX_LMAC]; 104289551Szbb uint32_t speed[MAX_LMAC]; 105289551Szbb uint16_t cpi_base[MAX_NUM_VFS_SUPPORTED]; 106299444Szbb uint16_t rssi_base[MAX_NUM_VFS_SUPPORTED]; 107289551Szbb uint16_t rss_ind_tbl_size; 108289551Szbb 109289550Szbb /* MSI-X */ 110289551Szbb boolean_t msix_enabled; 111289551Szbb uint8_t num_vec; 112289550Szbb struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 113289551Szbb struct resource * msix_table_res; 114289550Szbb}; 115289550Szbb 116289551Szbbstatic int nicpf_probe(device_t); 117289551Szbbstatic int nicpf_attach(device_t); 118289551Szbbstatic int nicpf_detach(device_t); 119289551Szbb 120289551Szbb#ifdef PCI_IOV 121289551Szbbstatic int nicpf_iov_init(device_t, uint16_t, const nvlist_t *); 122289551Szbbstatic void nicpf_iov_uninit(device_t); 123297448Szbbstatic int nicpf_iov_add_vf(device_t, uint16_t, const nvlist_t *); 124289551Szbb#endif 125289551Szbb 126289551Szbbstatic device_method_t nicpf_methods[] = { 127289551Szbb /* Device interface */ 128289551Szbb DEVMETHOD(device_probe, nicpf_probe), 129289551Szbb DEVMETHOD(device_attach, nicpf_attach), 130289551Szbb DEVMETHOD(device_detach, nicpf_detach), 131289551Szbb /* PCI SR-IOV interface */ 132289551Szbb#ifdef PCI_IOV 133289551Szbb DEVMETHOD(pci_iov_init, nicpf_iov_init), 134289551Szbb DEVMETHOD(pci_iov_uninit, nicpf_iov_uninit), 135297448Szbb DEVMETHOD(pci_iov_add_vf, nicpf_iov_add_vf), 136289551Szbb#endif 137289551Szbb DEVMETHOD_END, 138289550Szbb}; 139289550Szbb 140300294Swmastatic driver_t vnicpf_driver = { 141289551Szbb "vnicpf", 142289551Szbb nicpf_methods, 143289551Szbb sizeof(struct nicpf), 144289551Szbb}; 145289550Szbb 146300294Swmastatic devclass_t vnicpf_devclass; 147289551Szbb 148300294SwmaDRIVER_MODULE(vnicpf, pci, vnicpf_driver, vnicpf_devclass, 0, 0); 149300294SwmaMODULE_VERSION(vnicpf, 1); 150300294SwmaMODULE_DEPEND(vnicpf, pci, 1, 1, 1); 151300294SwmaMODULE_DEPEND(vnicpf, ether, 1, 1, 1); 152300294SwmaMODULE_DEPEND(vnicpf, thunder_bgx, 1, 1, 1); 153289551Szbb 154289551Szbbstatic int nicpf_alloc_res(struct nicpf *); 155289551Szbbstatic void nicpf_free_res(struct nicpf *); 156289551Szbbstatic void nic_set_lmac_vf_mapping(struct nicpf *); 157289551Szbbstatic void nic_init_hw(struct nicpf *); 158289551Szbbstatic int nic_sriov_init(device_t, struct nicpf *); 159289551Szbbstatic void nic_poll_for_link(void *); 160289551Szbbstatic int nic_register_interrupts(struct nicpf *); 161289551Szbbstatic void nic_unregister_interrupts(struct nicpf *); 162289551Szbb 163289551Szbb/* 164289551Szbb * Device interface 165289550Szbb */ 166289551Szbbstatic int 167289551Szbbnicpf_probe(device_t dev) 168289551Szbb{ 169289551Szbb uint16_t vendor_id; 170289551Szbb uint16_t device_id; 171289550Szbb 172289551Szbb vendor_id = pci_get_vendor(dev); 173289551Szbb device_id = pci_get_device(dev); 174289551Szbb 175289551Szbb if (vendor_id == PCI_VENDOR_ID_CAVIUM && 176289551Szbb device_id == PCI_DEVICE_ID_THUNDER_NIC_PF) { 177289551Szbb device_set_desc(dev, VNIC_PF_DEVSTR); 178289551Szbb return (BUS_PROBE_DEFAULT); 179289551Szbb } 180289551Szbb 181289551Szbb return (ENXIO); 182289551Szbb} 183289551Szbb 184289551Szbbstatic int 185289551Szbbnicpf_attach(device_t dev) 186289551Szbb{ 187289551Szbb struct nicpf *nic; 188289551Szbb int err; 189289551Szbb 190289551Szbb nic = device_get_softc(dev); 191289551Szbb nic->dev = dev; 192289551Szbb 193289551Szbb /* Enable bus mastering */ 194289551Szbb pci_enable_busmaster(dev); 195289551Szbb 196289551Szbb /* Allocate PCI resources */ 197289551Szbb err = nicpf_alloc_res(nic); 198289551Szbb if (err != 0) { 199289551Szbb device_printf(dev, "Could not allocate PCI resources\n"); 200289551Szbb return (err); 201289551Szbb } 202289551Szbb 203289551Szbb nic->node = nic_get_node_id(nic->reg_base); 204289551Szbb 205289551Szbb /* Enable Traffic Network Switch (TNS) bypass mode by default */ 206289551Szbb nic->flags &= ~NIC_TNS_ENABLED; 207289551Szbb nic_set_lmac_vf_mapping(nic); 208289551Szbb 209289551Szbb /* Initialize hardware */ 210289551Szbb nic_init_hw(nic); 211289551Szbb 212289551Szbb /* Set RSS TBL size for each VF */ 213289551Szbb nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; 214289551Szbb 215289551Szbb /* Setup interrupts */ 216289551Szbb err = nic_register_interrupts(nic); 217289551Szbb if (err != 0) 218289551Szbb goto err_free_res; 219289551Szbb 220289551Szbb /* Configure SRIOV */ 221289551Szbb err = nic_sriov_init(dev, nic); 222289551Szbb if (err != 0) 223289551Szbb goto err_free_intr; 224289551Szbb 225289551Szbb if (nic->flags & NIC_TNS_ENABLED) 226289551Szbb return (0); 227289551Szbb 228289551Szbb mtx_init(&nic->check_link_mtx, "VNIC PF link poll", NULL, MTX_DEF); 229289551Szbb /* Register physical link status poll callout */ 230289551Szbb callout_init_mtx(&nic->check_link, &nic->check_link_mtx, 0); 231289551Szbb mtx_lock(&nic->check_link_mtx); 232289551Szbb nic_poll_for_link(nic); 233289551Szbb mtx_unlock(&nic->check_link_mtx); 234289551Szbb 235289551Szbb return (0); 236289551Szbb 237289551Szbberr_free_intr: 238289551Szbb nic_unregister_interrupts(nic); 239289551Szbberr_free_res: 240289551Szbb nicpf_free_res(nic); 241289551Szbb pci_disable_busmaster(dev); 242289551Szbb 243289551Szbb return (err); 244289551Szbb} 245289551Szbb 246289551Szbbstatic int 247289551Szbbnicpf_detach(device_t dev) 248289551Szbb{ 249289551Szbb struct nicpf *nic; 250300295Swma int err; 251289551Szbb 252300295Swma err = 0; 253289551Szbb nic = device_get_softc(dev); 254289551Szbb 255289551Szbb callout_drain(&nic->check_link); 256289551Szbb mtx_destroy(&nic->check_link_mtx); 257289551Szbb 258289551Szbb nic_unregister_interrupts(nic); 259289551Szbb nicpf_free_res(nic); 260289551Szbb pci_disable_busmaster(dev); 261289551Szbb 262300295Swma#ifdef PCI_IOV 263300295Swma err = pci_iov_detach(dev); 264300295Swma if (err != 0) 265300295Swma device_printf(dev, "SR-IOV in use. Detach first.\n"); 266300295Swma#endif 267300295Swma return (err); 268289551Szbb} 269289551Szbb 270289551Szbb/* 271289551Szbb * SR-IOV interface 272289551Szbb */ 273289551Szbb#ifdef PCI_IOV 274289551Szbbstatic int 275289551Szbbnicpf_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 276289551Szbb{ 277289551Szbb struct nicpf *nic; 278289551Szbb 279289551Szbb nic = device_get_softc(dev); 280289551Szbb 281289551Szbb if (num_vfs == 0) 282289551Szbb return (ENXIO); 283289551Szbb 284289551Szbb nic->flags |= NIC_SRIOV_ENABLED; 285289551Szbb 286289551Szbb return (0); 287289551Szbb} 288289551Szbb 289289551Szbbstatic void 290289551Szbbnicpf_iov_uninit(device_t dev) 291289551Szbb{ 292289551Szbb 293289551Szbb /* ARM64TODO: Implement this function */ 294289551Szbb} 295289551Szbb 296289551Szbbstatic int 297297448Szbbnicpf_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 298289551Szbb{ 299289551Szbb const void *mac; 300289551Szbb struct nicpf *nic; 301289551Szbb size_t size; 302289551Szbb int bgx, lmac; 303289551Szbb 304289551Szbb nic = device_get_softc(dev); 305289551Szbb 306289551Szbb if ((nic->flags & NIC_SRIOV_ENABLED) == 0) 307289551Szbb return (ENXIO); 308289551Szbb 309297457Szbb if (vfnum > (nic->num_vf_en - 1)) 310297457Szbb return (EINVAL); 311297457Szbb 312289551Szbb if (nvlist_exists_binary(params, "mac-addr") != 0) { 313289551Szbb mac = nvlist_get_binary(params, "mac-addr", &size); 314289551Szbb bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 315289551Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vfnum]); 316289551Szbb bgx_set_lmac_mac(nic->node, bgx, lmac, mac); 317289551Szbb } 318289551Szbb 319289551Szbb return (0); 320289551Szbb} 321289551Szbb#endif 322289551Szbb 323289551Szbb/* 324289551Szbb * Helper routines 325289551Szbb */ 326289551Szbbstatic int 327289551Szbbnicpf_alloc_res(struct nicpf *nic) 328289551Szbb{ 329289551Szbb device_t dev; 330289551Szbb int rid; 331289551Szbb 332289551Szbb dev = nic->dev; 333289551Szbb 334289551Szbb rid = VNIC_PF_REG_RID; 335289551Szbb nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 336289551Szbb RF_ACTIVE); 337289551Szbb if (nic->reg_base == NULL) { 338289551Szbb /* For verbose output print some more details */ 339289551Szbb if (bootverbose) { 340289551Szbb device_printf(dev, 341289551Szbb "Could not allocate registers memory\n"); 342289551Szbb } 343289551Szbb return (ENXIO); 344289551Szbb } 345289551Szbb 346289551Szbb return (0); 347289551Szbb} 348289551Szbb 349289551Szbbstatic void 350289551Szbbnicpf_free_res(struct nicpf *nic) 351289551Szbb{ 352289551Szbb device_t dev; 353289551Szbb 354289551Szbb dev = nic->dev; 355289551Szbb 356289551Szbb if (nic->reg_base != NULL) { 357289551Szbb bus_release_resource(dev, SYS_RES_MEMORY, 358289551Szbb rman_get_rid(nic->reg_base), nic->reg_base); 359289551Szbb } 360289551Szbb} 361289551Szbb 362289550Szbb/* Register read/write APIs */ 363289551Szbbstatic __inline void 364289551Szbbnic_reg_write(struct nicpf *nic, bus_space_handle_t offset, 365289551Szbb uint64_t val) 366289550Szbb{ 367289551Szbb 368289551Szbb bus_write_8(nic->reg_base, offset, val); 369289550Szbb} 370289550Szbb 371289551Szbbstatic __inline uint64_t 372289551Szbbnic_reg_read(struct nicpf *nic, uint64_t offset) 373289550Szbb{ 374289551Szbb uint64_t val; 375289551Szbb 376289551Szbb val = bus_read_8(nic->reg_base, offset); 377289551Szbb return (val); 378289550Szbb} 379289550Szbb 380289550Szbb/* PF -> VF mailbox communication APIs */ 381289551Szbbstatic void 382289551Szbbnic_enable_mbx_intr(struct nicpf *nic) 383289550Szbb{ 384289551Szbb 385289550Szbb /* Enable mailbox interrupt for all 128 VFs */ 386289551Szbb nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0UL); 387289551Szbb nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(uint64_t), ~0UL); 388289550Szbb} 389289550Szbb 390289551Szbbstatic void 391289551Szbbnic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) 392289550Szbb{ 393289551Szbb 394289551Szbb nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), (1UL << vf)); 395289550Szbb} 396289550Szbb 397289551Szbbstatic uint64_t 398289551Szbbnic_get_mbx_addr(int vf) 399289550Szbb{ 400289551Szbb 401289551Szbb return (NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT)); 402289550Szbb} 403289550Szbb 404289551Szbb/* 405289551Szbb * Send a mailbox message to VF 406289550Szbb * @vf: vf to which this message to be sent 407289550Szbb * @mbx: Message to be sent 408289550Szbb */ 409289551Szbbstatic void 410289551Szbbnic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) 411289550Szbb{ 412289551Szbb bus_space_handle_t mbx_addr = nic_get_mbx_addr(vf); 413289551Szbb uint64_t *msg = (uint64_t *)mbx; 414289550Szbb 415289551Szbb /* 416289551Szbb * In first revision HW, mbox interrupt is triggerred 417289550Szbb * when PF writes to MBOX(1), in next revisions when 418289550Szbb * PF writes to MBOX(0) 419289550Szbb */ 420296037Szbb if (pass1_silicon(nic->dev)) { 421289551Szbb nic_reg_write(nic, mbx_addr + 0, msg[0]); 422289551Szbb nic_reg_write(nic, mbx_addr + 8, msg[1]); 423289550Szbb } else { 424289551Szbb nic_reg_write(nic, mbx_addr + 8, msg[1]); 425289551Szbb nic_reg_write(nic, mbx_addr + 0, msg[0]); 426289550Szbb } 427289550Szbb} 428289550Szbb 429289551Szbb/* 430289551Szbb * Responds to VF's READY message with VF's 431289550Szbb * ID, node, MAC address e.t.c 432289550Szbb * @vf: VF which sent READY message 433289550Szbb */ 434289551Szbbstatic void 435289551Szbbnic_mbx_send_ready(struct nicpf *nic, int vf) 436289550Szbb{ 437289550Szbb union nic_mbx mbx = {}; 438289550Szbb int bgx_idx, lmac; 439289550Szbb const char *mac; 440289550Szbb 441289550Szbb mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; 442289550Szbb mbx.nic_cfg.vf_id = vf; 443289550Szbb 444289550Szbb if (nic->flags & NIC_TNS_ENABLED) 445289550Szbb mbx.nic_cfg.tns_mode = NIC_TNS_MODE; 446289550Szbb else 447289550Szbb mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; 448289550Szbb 449289550Szbb if (vf < MAX_LMAC) { 450289550Szbb bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 451289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 452289550Szbb 453289550Szbb mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); 454289551Szbb if (mac) { 455289551Szbb memcpy((uint8_t *)&mbx.nic_cfg.mac_addr, mac, 456289551Szbb ETHER_ADDR_LEN); 457289551Szbb } 458289550Szbb } 459289550Szbb mbx.nic_cfg.node_id = nic->node; 460289550Szbb 461289550Szbb mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; 462289550Szbb 463289550Szbb nic_send_msg_to_vf(nic, vf, &mbx); 464289550Szbb} 465289550Szbb 466289551Szbb/* 467289551Szbb * ACKs VF's mailbox message 468289550Szbb * @vf: VF to which ACK to be sent 469289550Szbb */ 470289551Szbbstatic void 471289551Szbbnic_mbx_send_ack(struct nicpf *nic, int vf) 472289550Szbb{ 473289550Szbb union nic_mbx mbx = {}; 474289550Szbb 475289550Szbb mbx.msg.msg = NIC_MBOX_MSG_ACK; 476289550Szbb nic_send_msg_to_vf(nic, vf, &mbx); 477289550Szbb} 478289550Szbb 479289551Szbb/* 480289551Szbb * NACKs VF's mailbox message that PF is not able to 481289550Szbb * complete the action 482289550Szbb * @vf: VF to which ACK to be sent 483289550Szbb */ 484289551Szbbstatic void 485289551Szbbnic_mbx_send_nack(struct nicpf *nic, int vf) 486289550Szbb{ 487289550Szbb union nic_mbx mbx = {}; 488289550Szbb 489289550Szbb mbx.msg.msg = NIC_MBOX_MSG_NACK; 490289550Szbb nic_send_msg_to_vf(nic, vf, &mbx); 491289550Szbb} 492289550Szbb 493289551Szbb/* 494289551Szbb * Flush all in flight receive packets to memory and 495289550Szbb * bring down an active RQ 496289550Szbb */ 497289551Szbbstatic int 498289551Szbbnic_rcv_queue_sw_sync(struct nicpf *nic) 499289550Szbb{ 500289551Szbb uint16_t timeout = ~0x00; 501289550Szbb 502289550Szbb nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); 503289550Szbb /* Wait till sync cycle is finished */ 504289550Szbb while (timeout) { 505289550Szbb if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) 506289550Szbb break; 507289550Szbb timeout--; 508289550Szbb } 509289550Szbb nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); 510289550Szbb if (!timeout) { 511289551Szbb device_printf(nic->dev, "Receive queue software sync failed\n"); 512289551Szbb return (ETIMEDOUT); 513289550Szbb } 514289551Szbb return (0); 515289550Szbb} 516289550Szbb 517289550Szbb/* Get BGX Rx/Tx stats and respond to VF's request */ 518289551Szbbstatic void 519289551Szbbnic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) 520289550Szbb{ 521289550Szbb int bgx_idx, lmac; 522289550Szbb union nic_mbx mbx = {}; 523289550Szbb 524289550Szbb bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 525289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); 526289550Szbb 527289550Szbb mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 528289550Szbb mbx.bgx_stats.vf_id = bgx->vf_id; 529289550Szbb mbx.bgx_stats.rx = bgx->rx; 530289550Szbb mbx.bgx_stats.idx = bgx->idx; 531289551Szbb if (bgx->rx != 0) { 532289551Szbb mbx.bgx_stats.stats = 533289551Szbb bgx_get_rx_stats(nic->node, bgx_idx, lmac, bgx->idx); 534289551Szbb } else { 535289551Szbb mbx.bgx_stats.stats = 536289551Szbb bgx_get_tx_stats(nic->node, bgx_idx, lmac, bgx->idx); 537289551Szbb } 538289550Szbb nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); 539289550Szbb} 540289550Szbb 541289550Szbb/* Update hardware min/max frame size */ 542289551Szbbstatic int 543289551Szbbnic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 544289550Szbb{ 545289551Szbb 546289550Szbb if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 547289551Szbb device_printf(nic->dev, 548289551Szbb "Invalid MTU setting from VF%d rejected, " 549289551Szbb "should be between %d and %d\n", 550289551Szbb vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 551289551Szbb return (EINVAL); 552289550Szbb } 553289551Szbb new_frs += ETHER_HDR_LEN; 554289550Szbb if (new_frs <= nic->pkind.maxlen) 555289551Szbb return (0); 556289550Szbb 557289550Szbb nic->pkind.maxlen = new_frs; 558289551Szbb nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(uint64_t *)&nic->pkind); 559289551Szbb return (0); 560289550Szbb} 561289550Szbb 562289550Szbb/* Set minimum transmit packet size */ 563289551Szbbstatic void 564289551Szbbnic_set_tx_pkt_pad(struct nicpf *nic, int size) 565289550Szbb{ 566289550Szbb int lmac; 567289551Szbb uint64_t lmac_cfg; 568289550Szbb 569289550Szbb /* Max value that can be set is 60 */ 570289550Szbb if (size > 60) 571289550Szbb size = 60; 572289550Szbb 573289550Szbb for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { 574289550Szbb lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); 575289550Szbb lmac_cfg &= ~(0xF << 2); 576289550Szbb lmac_cfg |= ((size / 4) << 2); 577289550Szbb nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); 578289550Szbb } 579289550Szbb} 580289550Szbb 581289551Szbb/* 582289551Szbb * Function to check number of LMACs present and set VF::LMAC mapping. 583289550Szbb * Mapping will be used while initializing channels. 584289550Szbb */ 585289551Szbbstatic void 586289551Szbbnic_set_lmac_vf_mapping(struct nicpf *nic) 587289550Szbb{ 588289550Szbb unsigned bgx_map = bgx_get_map(nic->node); 589289550Szbb int bgx, next_bgx_lmac = 0; 590289550Szbb int lmac, lmac_cnt = 0; 591289551Szbb uint64_t lmac_credit; 592289550Szbb 593289550Szbb nic->num_vf_en = 0; 594289550Szbb if (nic->flags & NIC_TNS_ENABLED) { 595289550Szbb nic->num_vf_en = DEFAULT_NUM_VF_ENABLED; 596289550Szbb return; 597289550Szbb } 598289550Szbb 599289550Szbb for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { 600289551Szbb if ((bgx_map & (1 << bgx)) == 0) 601289550Szbb continue; 602289550Szbb lmac_cnt = bgx_get_lmac_count(nic->node, bgx); 603289550Szbb for (lmac = 0; lmac < lmac_cnt; lmac++) 604289550Szbb nic->vf_lmac_map[next_bgx_lmac++] = 605289550Szbb NIC_SET_VF_LMAC_MAP(bgx, lmac); 606289550Szbb nic->num_vf_en += lmac_cnt; 607289550Szbb 608289550Szbb /* Program LMAC credits */ 609289551Szbb lmac_credit = (1UL << 1); /* channel credit enable */ 610289550Szbb lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ 611289550Szbb /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ 612289550Szbb lmac_credit |= (((((48 * 1024) / lmac_cnt) - 613289551Szbb NIC_HW_MAX_FRS) / 16) << 12); 614289550Szbb lmac = bgx * MAX_LMAC_PER_BGX; 615289551Szbb for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) { 616289551Szbb nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), 617289551Szbb lmac_credit); 618289551Szbb } 619289550Szbb } 620289550Szbb} 621289550Szbb 622289550Szbb#define TNS_PORT0_BLOCK 6 623289550Szbb#define TNS_PORT1_BLOCK 7 624289550Szbb#define BGX0_BLOCK 8 625289550Szbb#define BGX1_BLOCK 9 626289550Szbb 627289551Szbbstatic void 628289551Szbbnic_init_hw(struct nicpf *nic) 629289550Szbb{ 630289550Szbb int i; 631289550Szbb 632289550Szbb /* Enable NIC HW block */ 633289550Szbb nic_reg_write(nic, NIC_PF_CFG, 0x3); 634289550Szbb 635289550Szbb /* Enable backpressure */ 636289551Szbb nic_reg_write(nic, NIC_PF_BP_CFG, (1UL << 6) | 0x03); 637289550Szbb 638289550Szbb if (nic->flags & NIC_TNS_ENABLED) { 639289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 640289551Szbb (NIC_TNS_MODE << 7) | TNS_PORT0_BLOCK); 641289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 642289551Szbb (NIC_TNS_MODE << 7) | TNS_PORT1_BLOCK); 643289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 644289551Szbb (1UL << 63) | TNS_PORT0_BLOCK); 645289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 646289551Szbb (1UL << 63) | TNS_PORT1_BLOCK); 647289550Szbb 648289550Szbb } else { 649289550Szbb /* Disable TNS mode on both interfaces */ 650289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, 651289551Szbb (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); 652289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), 653289551Szbb (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); 654289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, 655289551Szbb (1UL << 63) | BGX0_BLOCK); 656289550Szbb nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), 657289551Szbb (1UL << 63) | BGX1_BLOCK); 658289550Szbb } 659289550Szbb 660289550Szbb /* PKIND configuration */ 661289550Szbb nic->pkind.minlen = 0; 662289551Szbb nic->pkind.maxlen = NIC_HW_MAX_FRS + ETHER_HDR_LEN; 663289550Szbb nic->pkind.lenerr_en = 1; 664289550Szbb nic->pkind.rx_hdr = 0; 665289550Szbb nic->pkind.hdr_sl = 0; 666289550Szbb 667289551Szbb for (i = 0; i < NIC_MAX_PKIND; i++) { 668289550Szbb nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), 669289551Szbb *(uint64_t *)&nic->pkind); 670289551Szbb } 671289550Szbb 672289550Szbb nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); 673289550Szbb 674289550Szbb /* Timer config */ 675289550Szbb nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); 676289550Szbb 677289550Szbb /* Enable VLAN ethertype matching and stripping */ 678289550Szbb nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 679289551Szbb (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETHERTYPE_VLAN); 680289550Szbb} 681289550Szbb 682289550Szbb/* Channel parse index configuration */ 683289551Szbbstatic void 684289551Szbbnic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) 685289550Szbb{ 686289551Szbb uint32_t vnic, bgx, lmac, chan; 687289551Szbb uint32_t padd, cpi_count = 0; 688289551Szbb uint64_t cpi_base, cpi, rssi_base, rssi; 689289551Szbb uint8_t qset, rq_idx = 0; 690289550Szbb 691289550Szbb vnic = cfg->vf_id; 692289550Szbb bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 693289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); 694289550Szbb 695289550Szbb chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 696289550Szbb cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); 697289550Szbb rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); 698289550Szbb 699289550Szbb /* Rx channel configuration */ 700289550Szbb nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), 701289551Szbb (1UL << 63) | (vnic << 0)); 702289550Szbb nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), 703289551Szbb ((uint64_t)cfg->cpi_alg << 62) | (cpi_base << 48)); 704289550Szbb 705289550Szbb if (cfg->cpi_alg == CPI_ALG_NONE) 706289550Szbb cpi_count = 1; 707289550Szbb else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ 708289550Szbb cpi_count = 8; 709289550Szbb else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ 710289550Szbb cpi_count = 16; 711289550Szbb else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ 712289550Szbb cpi_count = NIC_MAX_CPI_PER_LMAC; 713289550Szbb 714289550Szbb /* RSS Qset, Qidx mapping */ 715289550Szbb qset = cfg->vf_id; 716289550Szbb rssi = rssi_base; 717289550Szbb for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { 718289550Szbb nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 719289551Szbb (qset << 3) | rq_idx); 720289550Szbb rq_idx++; 721289550Szbb } 722289550Szbb 723289550Szbb rssi = 0; 724289550Szbb cpi = cpi_base; 725289550Szbb for (; cpi < (cpi_base + cpi_count); cpi++) { 726289550Szbb /* Determine port to channel adder */ 727289550Szbb if (cfg->cpi_alg != CPI_ALG_DIFF) 728289550Szbb padd = cpi % cpi_count; 729289550Szbb else 730289550Szbb padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 731289550Szbb 732289550Szbb /* Leave RSS_SIZE as '0' to disable RSS */ 733296037Szbb if (pass1_silicon(nic->dev)) { 734296037Szbb nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 735296037Szbb (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 736296037Szbb } else { 737296037Szbb /* Set MPI_ALG to '0' to disable MCAM parsing */ 738296037Szbb nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 739296037Szbb (padd << 16)); 740296037Szbb /* MPI index is same as CPI if MPI_ALG is not enabled */ 741296037Szbb nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 742296037Szbb (vnic << 24) | (rssi_base + rssi)); 743296037Szbb } 744289550Szbb 745289550Szbb if ((rssi + 1) >= cfg->rq_cnt) 746289550Szbb continue; 747289550Szbb 748289550Szbb if (cfg->cpi_alg == CPI_ALG_VLAN) 749289550Szbb rssi++; 750289550Szbb else if (cfg->cpi_alg == CPI_ALG_VLAN16) 751289550Szbb rssi = ((cpi - cpi_base) & 0xe) >> 1; 752289550Szbb else if (cfg->cpi_alg == CPI_ALG_DIFF) 753289550Szbb rssi = ((cpi - cpi_base) & 0x38) >> 3; 754289550Szbb } 755289550Szbb nic->cpi_base[cfg->vf_id] = cpi_base; 756299444Szbb nic->rssi_base[cfg->vf_id] = rssi_base; 757289550Szbb} 758289550Szbb 759299444Szbb/* Responsds to VF with its RSS indirection table size */ 760299444Szbbstatic void 761299444Szbbnic_send_rss_size(struct nicpf *nic, int vf) 762299444Szbb{ 763299444Szbb union nic_mbx mbx = {}; 764299444Szbb uint64_t *msg; 765299444Szbb 766299444Szbb msg = (uint64_t *)&mbx; 767299444Szbb 768299444Szbb mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 769299444Szbb mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; 770299444Szbb nic_send_msg_to_vf(nic, vf, &mbx); 771299444Szbb} 772299444Szbb 773289551Szbb/* 774299444Szbb * Receive side scaling configuration 775299444Szbb * configure: 776299444Szbb * - RSS index 777299444Szbb * - indir table i.e hash::RQ mapping 778299444Szbb * - no of hash bits to consider 779299444Szbb */ 780299444Szbbstatic void 781299444Szbbnic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) 782299444Szbb{ 783299444Szbb uint8_t qset, idx; 784299444Szbb uint64_t cpi_cfg, cpi_base, rssi_base, rssi; 785299444Szbb uint64_t idx_addr; 786299444Szbb 787299444Szbb idx = 0; 788299444Szbb rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 789299444Szbb 790299444Szbb rssi = rssi_base; 791299444Szbb qset = cfg->vf_id; 792299444Szbb 793299444Szbb for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { 794299444Szbb nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), 795299444Szbb (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); 796299444Szbb idx++; 797299444Szbb } 798299444Szbb 799299444Szbb cpi_base = nic->cpi_base[cfg->vf_id]; 800299444Szbb if (pass1_silicon(nic->dev)) 801299444Szbb idx_addr = NIC_PF_CPI_0_2047_CFG; 802299444Szbb else 803299444Szbb idx_addr = NIC_PF_MPI_0_2047_CFG; 804299444Szbb cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 805299444Szbb cpi_cfg &= ~(0xFUL << 20); 806299444Szbb cpi_cfg |= (cfg->hash_bits << 20); 807299444Szbb nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 808299444Szbb} 809299444Szbb 810299444Szbb/* 811289551Szbb * 4 level transmit side scheduler configutation 812289550Szbb * for TNS bypass mode 813289550Szbb * 814289550Szbb * Sample configuration for SQ0 815289550Szbb * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 816289550Szbb * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 817289550Szbb * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 818289550Szbb * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 819289550Szbb * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 820289550Szbb * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 821289550Szbb * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 822289550Szbb * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 823289550Szbb */ 824289551Szbbstatic void 825289551Szbbnic_tx_channel_cfg(struct nicpf *nic, uint8_t vnic, struct sq_cfg_msg *sq) 826289550Szbb{ 827289551Szbb uint32_t bgx, lmac, chan; 828289551Szbb uint32_t tl2, tl3, tl4; 829289551Szbb uint32_t rr_quantum; 830289551Szbb uint8_t sq_idx = sq->sq_num; 831289551Szbb uint8_t pqs_vnic; 832289550Szbb 833289551Szbb pqs_vnic = vnic; 834289550Szbb 835289550Szbb bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 836289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]); 837289550Szbb 838289550Szbb /* 24 bytes for FCS, IPG and preamble */ 839289550Szbb rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 840289550Szbb 841289550Szbb tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 842289550Szbb tl4 += sq_idx; 843289550Szbb 844289550Szbb tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 845289550Szbb nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 846289551Szbb ((uint64_t)vnic << NIC_QS_ID_SHIFT) | 847289551Szbb ((uint32_t)sq_idx << NIC_Q_NUM_SHIFT), tl4); 848289550Szbb nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), 849289551Szbb ((uint64_t)vnic << 27) | ((uint32_t)sq_idx << 24) | rr_quantum); 850289550Szbb 851289550Szbb nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); 852289550Szbb chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); 853289550Szbb nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); 854289550Szbb /* Enable backpressure on the channel */ 855289550Szbb nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); 856289550Szbb 857289550Szbb tl2 = tl3 >> 2; 858289550Szbb nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); 859289550Szbb nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); 860289550Szbb /* No priorities as of now */ 861289550Szbb nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); 862289550Szbb} 863289550Szbb 864289551Szbbstatic int 865289551Szbbnic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) 866289550Szbb{ 867289550Szbb int bgx_idx, lmac_idx; 868289550Szbb 869289550Szbb if (lbk->vf_id > MAX_LMAC) 870289551Szbb return (ENXIO); 871289550Szbb 872289550Szbb bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 873289550Szbb lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); 874289550Szbb 875289550Szbb bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); 876289550Szbb 877289551Szbb return (0); 878289550Szbb} 879289550Szbb 880289550Szbb/* Interrupt handler to handle mailbox messages from VFs */ 881289551Szbbstatic void 882289551Szbbnic_handle_mbx_intr(struct nicpf *nic, int vf) 883289550Szbb{ 884289550Szbb union nic_mbx mbx = {}; 885289551Szbb uint64_t *mbx_data; 886289551Szbb uint64_t mbx_addr; 887289551Szbb uint64_t reg_addr; 888289551Szbb uint64_t cfg; 889289550Szbb int bgx, lmac; 890289550Szbb int i; 891289550Szbb int ret = 0; 892289550Szbb 893289551Szbb nic->mbx_lock[vf] = TRUE; 894289550Szbb 895289550Szbb mbx_addr = nic_get_mbx_addr(vf); 896289551Szbb mbx_data = (uint64_t *)&mbx; 897289550Szbb 898289550Szbb for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 899289550Szbb *mbx_data = nic_reg_read(nic, mbx_addr); 900289550Szbb mbx_data++; 901289551Szbb mbx_addr += sizeof(uint64_t); 902289550Szbb } 903289550Szbb 904289550Szbb switch (mbx.msg.msg) { 905289550Szbb case NIC_MBOX_MSG_READY: 906289550Szbb nic_mbx_send_ready(nic, vf); 907289550Szbb if (vf < MAX_LMAC) { 908289550Szbb nic->link[vf] = 0; 909289550Szbb nic->duplex[vf] = 0; 910289550Szbb nic->speed[vf] = 0; 911289550Szbb } 912289550Szbb ret = 1; 913289550Szbb break; 914289550Szbb case NIC_MBOX_MSG_QS_CFG: 915289550Szbb reg_addr = NIC_PF_QSET_0_127_CFG | 916289551Szbb (mbx.qs.num << NIC_QS_ID_SHIFT); 917289550Szbb cfg = mbx.qs.cfg; 918289550Szbb nic_reg_write(nic, reg_addr, cfg); 919289550Szbb break; 920289550Szbb case NIC_MBOX_MSG_RQ_CFG: 921289550Szbb reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | 922289551Szbb (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 923289551Szbb (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 924289550Szbb nic_reg_write(nic, reg_addr, mbx.rq.cfg); 925289550Szbb break; 926289550Szbb case NIC_MBOX_MSG_RQ_BP_CFG: 927289550Szbb reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | 928289551Szbb (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 929289551Szbb (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 930289550Szbb nic_reg_write(nic, reg_addr, mbx.rq.cfg); 931289550Szbb break; 932289550Szbb case NIC_MBOX_MSG_RQ_SW_SYNC: 933289550Szbb ret = nic_rcv_queue_sw_sync(nic); 934289550Szbb break; 935289550Szbb case NIC_MBOX_MSG_RQ_DROP_CFG: 936289550Szbb reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | 937289551Szbb (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | 938289551Szbb (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); 939289550Szbb nic_reg_write(nic, reg_addr, mbx.rq.cfg); 940289550Szbb break; 941289550Szbb case NIC_MBOX_MSG_SQ_CFG: 942289550Szbb reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | 943289551Szbb (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | 944289551Szbb (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); 945289550Szbb nic_reg_write(nic, reg_addr, mbx.sq.cfg); 946289550Szbb nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); 947289550Szbb break; 948289550Szbb case NIC_MBOX_MSG_SET_MAC: 949289550Szbb lmac = mbx.mac.vf_id; 950289550Szbb bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 951289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); 952289550Szbb bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); 953289550Szbb break; 954289550Szbb case NIC_MBOX_MSG_SET_MAX_FRS: 955289551Szbb ret = nic_update_hw_frs(nic, mbx.frs.max_frs, mbx.frs.vf_id); 956289550Szbb break; 957289550Szbb case NIC_MBOX_MSG_CPI_CFG: 958289550Szbb nic_config_cpi(nic, &mbx.cpi_cfg); 959289550Szbb break; 960299444Szbb case NIC_MBOX_MSG_RSS_SIZE: 961299444Szbb nic_send_rss_size(nic, vf); 962299444Szbb goto unlock; 963299444Szbb case NIC_MBOX_MSG_RSS_CFG: 964299444Szbb case NIC_MBOX_MSG_RSS_CFG_CONT: /* fall through */ 965299444Szbb nic_config_rss(nic, &mbx.rss_cfg); 966299444Szbb break; 967289550Szbb case NIC_MBOX_MSG_CFG_DONE: 968289550Szbb /* Last message of VF config msg sequence */ 969289551Szbb nic->vf_info[vf].vf_enabled = TRUE; 970289550Szbb goto unlock; 971289550Szbb case NIC_MBOX_MSG_SHUTDOWN: 972289550Szbb /* First msg in VF teardown sequence */ 973289551Szbb nic->vf_info[vf].vf_enabled = FALSE; 974289550Szbb break; 975289550Szbb case NIC_MBOX_MSG_BGX_STATS: 976289550Szbb nic_get_bgx_stats(nic, &mbx.bgx_stats); 977289550Szbb goto unlock; 978289550Szbb case NIC_MBOX_MSG_LOOPBACK: 979289550Szbb ret = nic_config_loopback(nic, &mbx.lbk); 980289550Szbb break; 981289550Szbb default: 982289551Szbb device_printf(nic->dev, 983289551Szbb "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); 984289550Szbb break; 985289550Szbb } 986289550Szbb 987289551Szbb if (ret == 0) 988289550Szbb nic_mbx_send_ack(nic, vf); 989289550Szbb else if (mbx.msg.msg != NIC_MBOX_MSG_READY) 990289550Szbb nic_mbx_send_nack(nic, vf); 991289550Szbbunlock: 992289551Szbb nic->mbx_lock[vf] = FALSE; 993289550Szbb} 994289550Szbb 995289551Szbbstatic void 996289551Szbbnic_mbx_intr_handler(struct nicpf *nic, int mbx) 997289550Szbb{ 998289551Szbb uint64_t intr; 999289551Szbb uint8_t vf, vf_per_mbx_reg = 64; 1000289550Szbb 1001289550Szbb intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); 1002289550Szbb for (vf = 0; vf < vf_per_mbx_reg; vf++) { 1003289551Szbb if (intr & (1UL << vf)) { 1004289550Szbb nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); 1005289550Szbb nic_clear_mbx_intr(nic, vf, mbx); 1006289550Szbb } 1007289550Szbb } 1008289550Szbb} 1009289550Szbb 1010289551Szbbstatic void 1011289551Szbbnic_mbx0_intr_handler (void *arg) 1012289550Szbb{ 1013289551Szbb struct nicpf *nic = (struct nicpf *)arg; 1014289550Szbb 1015289550Szbb nic_mbx_intr_handler(nic, 0); 1016289550Szbb} 1017289550Szbb 1018289551Szbbstatic void 1019289551Szbbnic_mbx1_intr_handler (void *arg) 1020289550Szbb{ 1021289551Szbb struct nicpf *nic = (struct nicpf *)arg; 1022289550Szbb 1023289550Szbb nic_mbx_intr_handler(nic, 1); 1024289550Szbb} 1025289550Szbb 1026289551Szbbstatic int 1027289551Szbbnic_enable_msix(struct nicpf *nic) 1028289550Szbb{ 1029289551Szbb struct pci_devinfo *dinfo; 1030289551Szbb int rid, count; 1031289551Szbb int ret; 1032289550Szbb 1033289551Szbb dinfo = device_get_ivars(nic->dev); 1034289551Szbb rid = dinfo->cfg.msix.msix_table_bar; 1035289551Szbb nic->msix_table_res = 1036289551Szbb bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1037289551Szbb if (nic->msix_table_res == NULL) { 1038289551Szbb device_printf(nic->dev, 1039289551Szbb "Could not allocate memory for MSI-X table\n"); 1040289551Szbb return (ENXIO); 1041289551Szbb } 1042289550Szbb 1043289551Szbb count = nic->num_vec = NIC_PF_MSIX_VECTORS; 1044289550Szbb 1045289551Szbb ret = pci_alloc_msix(nic->dev, &count); 1046289551Szbb if ((ret != 0) || (count != nic->num_vec)) { 1047289551Szbb device_printf(nic->dev, 1048289551Szbb "Request for #%d msix vectors failed, error: %d\n", 1049289551Szbb nic->num_vec, ret); 1050289551Szbb return (ret); 1051289550Szbb } 1052289550Szbb 1053289550Szbb nic->msix_enabled = 1; 1054289551Szbb return (0); 1055289550Szbb} 1056289550Szbb 1057289551Szbbstatic void 1058289551Szbbnic_disable_msix(struct nicpf *nic) 1059289550Szbb{ 1060289550Szbb if (nic->msix_enabled) { 1061289551Szbb pci_release_msi(nic->dev); 1062289550Szbb nic->msix_enabled = 0; 1063289550Szbb nic->num_vec = 0; 1064289550Szbb } 1065300295Swma 1066300295Swma bus_release_resource(nic->dev, SYS_RES_MEMORY, 1067300295Swma rman_get_rid(nic->msix_table_res), nic->msix_table_res); 1068289550Szbb} 1069289550Szbb 1070289551Szbbstatic void 1071289551Szbbnic_free_all_interrupts(struct nicpf *nic) 1072289550Szbb{ 1073289550Szbb int irq; 1074289550Szbb 1075289550Szbb for (irq = 0; irq < nic->num_vec; irq++) { 1076289551Szbb if (nic->msix_entries[irq].irq_res == NULL) 1077289551Szbb continue; 1078289551Szbb if (nic->msix_entries[irq].handle != NULL) { 1079289551Szbb bus_teardown_intr(nic->dev, 1080289551Szbb nic->msix_entries[irq].irq_res, 1081289551Szbb nic->msix_entries[irq].handle); 1082289551Szbb } 1083289551Szbb 1084300295Swma bus_release_resource(nic->dev, SYS_RES_IRQ, irq + 1, 1085289551Szbb nic->msix_entries[irq].irq_res); 1086289550Szbb } 1087289550Szbb} 1088289550Szbb 1089289551Szbbstatic int 1090289551Szbbnic_register_interrupts(struct nicpf *nic) 1091289550Szbb{ 1092289551Szbb int irq, rid; 1093289550Szbb int ret; 1094289550Szbb 1095289550Szbb /* Enable MSI-X */ 1096289550Szbb ret = nic_enable_msix(nic); 1097289551Szbb if (ret != 0) 1098289551Szbb return (ret); 1099289550Szbb 1100289550Szbb /* Register mailbox interrupt handlers */ 1101289551Szbb irq = NIC_PF_INTR_ID_MBOX0; 1102289551Szbb rid = irq + 1; 1103289551Szbb nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1104289551Szbb SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1105289551Szbb if (nic->msix_entries[irq].irq_res == NULL) { 1106289551Szbb ret = ENXIO; 1107289550Szbb goto fail; 1108289551Szbb } 1109289551Szbb ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1110289551Szbb (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx0_intr_handler, nic, 1111289551Szbb &nic->msix_entries[irq].handle); 1112289551Szbb if (ret != 0) 1113289551Szbb goto fail; 1114289550Szbb 1115289551Szbb irq = NIC_PF_INTR_ID_MBOX1; 1116289551Szbb rid = irq + 1; 1117289551Szbb nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev, 1118289551Szbb SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE)); 1119289551Szbb if (nic->msix_entries[irq].irq_res == NULL) { 1120289551Szbb ret = ENXIO; 1121289550Szbb goto fail; 1122289551Szbb } 1123289551Szbb ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res, 1124289551Szbb (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nic_mbx1_intr_handler, nic, 1125289551Szbb &nic->msix_entries[irq].handle); 1126289551Szbb if (ret != 0) 1127289551Szbb goto fail; 1128289550Szbb 1129289550Szbb /* Enable mailbox interrupt */ 1130289550Szbb nic_enable_mbx_intr(nic); 1131289551Szbb return (0); 1132289550Szbb 1133289550Szbbfail: 1134289550Szbb nic_free_all_interrupts(nic); 1135289551Szbb return (ret); 1136289550Szbb} 1137289550Szbb 1138289551Szbbstatic void 1139289551Szbbnic_unregister_interrupts(struct nicpf *nic) 1140289550Szbb{ 1141289551Szbb 1142289550Szbb nic_free_all_interrupts(nic); 1143289550Szbb nic_disable_msix(nic); 1144289550Szbb} 1145289550Szbb 1146289551Szbbstatic int nic_sriov_init(device_t dev, struct nicpf *nic) 1147289550Szbb{ 1148289551Szbb#ifdef PCI_IOV 1149289551Szbb nvlist_t *pf_schema, *vf_schema; 1150289551Szbb int iov_pos; 1151289550Szbb int err; 1152289551Szbb uint16_t total_vf_cnt; 1153289550Szbb 1154289551Szbb err = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos); 1155289551Szbb if (err != 0) { 1156289551Szbb device_printf(dev, 1157289551Szbb "SR-IOV capability is not found in PCIe config space\n"); 1158289551Szbb return (err); 1159289550Szbb } 1160289551Szbb /* Fix-up the number of enabled VFs */ 1161289551Szbb total_vf_cnt = pci_read_config(dev, iov_pos + PCIR_SRIOV_TOTAL_VFS, 2); 1162289551Szbb if (total_vf_cnt == 0) 1163297457Szbb return (ENXIO); 1164289550Szbb 1165289551Szbb /* Attach SR-IOV */ 1166289551Szbb pf_schema = pci_iov_schema_alloc_node(); 1167289551Szbb vf_schema = pci_iov_schema_alloc_node(); 1168289551Szbb pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 1169289551Szbb /* 1170289551Szbb * All VFs can change their MACs. 1171289551Szbb * This flag will be ignored but we set it just for the record. 1172289551Szbb */ 1173289551Szbb pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 1174289551Szbb IOV_SCHEMA_HASDEFAULT, TRUE); 1175289550Szbb 1176289551Szbb err = pci_iov_attach(dev, pf_schema, vf_schema); 1177289551Szbb if (err != 0) { 1178289551Szbb device_printf(dev, 1179289551Szbb "Failed to initialize SR-IOV (error=%d)\n", 1180289551Szbb err); 1181289551Szbb return (err); 1182289550Szbb } 1183289551Szbb#endif 1184289551Szbb return (0); 1185289550Szbb} 1186289550Szbb 1187289551Szbb/* 1188289551Szbb * Poll for BGX LMAC link status and update corresponding VF 1189289550Szbb * if there is a change, valid only if internal L2 switch 1190289550Szbb * is not present otherwise VF link is always treated as up 1191289550Szbb */ 1192289551Szbbstatic void 1193289551Szbbnic_poll_for_link(void *arg) 1194289550Szbb{ 1195289550Szbb union nic_mbx mbx = {}; 1196289550Szbb struct nicpf *nic; 1197289550Szbb struct bgx_link_status link; 1198289551Szbb uint8_t vf, bgx, lmac; 1199289550Szbb 1200289551Szbb nic = (struct nicpf *)arg; 1201289550Szbb 1202289550Szbb mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; 1203289550Szbb 1204289550Szbb for (vf = 0; vf < nic->num_vf_en; vf++) { 1205289550Szbb /* Poll only if VF is UP */ 1206289551Szbb if (!nic->vf_info[vf].vf_enabled) 1207289550Szbb continue; 1208289550Szbb 1209289550Szbb /* Get BGX, LMAC indices for the VF */ 1210289550Szbb bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1211289550Szbb lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); 1212289550Szbb /* Get interface link status */ 1213289550Szbb bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); 1214289550Szbb 1215289550Szbb /* Inform VF only if link status changed */ 1216289550Szbb if (nic->link[vf] == link.link_up) 1217289550Szbb continue; 1218289550Szbb 1219289550Szbb if (!nic->mbx_lock[vf]) { 1220289550Szbb nic->link[vf] = link.link_up; 1221289550Szbb nic->duplex[vf] = link.duplex; 1222289550Szbb nic->speed[vf] = link.speed; 1223289550Szbb 1224289550Szbb /* Send a mbox message to VF with current link status */ 1225289550Szbb mbx.link_status.link_up = link.link_up; 1226289550Szbb mbx.link_status.duplex = link.duplex; 1227289550Szbb mbx.link_status.speed = link.speed; 1228289550Szbb nic_send_msg_to_vf(nic, vf, &mbx); 1229289550Szbb } 1230289550Szbb } 1231289551Szbb callout_reset(&nic->check_link, hz * 2, nic_poll_for_link, nic); 1232289550Szbb} 1233