Deleted Added
full compact
1/*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/vnic/nicvf_main.c 299447 2016-05-11 13:42:20Z zbb $
26 * $FreeBSD: head/sys/dev/vnic/nicvf_main.c 300294 2016-05-20 11:00:06Z wma $
27 *
28 */
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_main.c 299447 2016-05-11 13:42:20Z zbb $");
30__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_main.c 300294 2016-05-20 11:00:06Z wma $");
31
32#include "opt_inet.h"
33#include "opt_inet6.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bitset.h>
38#include <sys/bitstring.h>
39#include <sys/bus.h>
40#include <sys/endian.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/module.h>
45#include <sys/rman.h>
46#include <sys/pciio.h>
47#include <sys/pcpu.h>
48#include <sys/proc.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/stdatomic.h>
52#include <sys/cpuset.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/smp.h>
56#include <sys/taskqueue.h>
57
58#include <net/bpf.h>
59#include <net/ethernet.h>
60#include <net/if.h>
61#include <net/if_var.h>
62#include <net/if_arp.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_vlan_var.h>
67
68#include <netinet/in.h>
69#include <netinet/ip.h>
70#include <netinet/if_ether.h>
71#include <netinet/tcp_lro.h>
72
73#include <dev/pci/pcireg.h>
74#include <dev/pci/pcivar.h>
75
76#include <sys/dnv.h>
77#include <sys/nv.h>
78#include <sys/iov_schema.h>
79
80#include <machine/bus.h>
81
82#include "thunder_bgx.h"
83#include "nic_reg.h"
84#include "nic.h"
85#include "nicvf_queues.h"
86
87#define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver"
88
89#define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM)
90
91/* Lock for core interface settings */
92#define NICVF_CORE_LOCK_INIT(nic) \
93 sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
94
95#define NICVF_CORE_LOCK_DESTROY(nic) \
96 sx_destroy(&(nic)->core_sx)
97
98#define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx)
99#define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx)
100
101#define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED)
102
103#define SPEED_10 10
104#define SPEED_100 100
105#define SPEED_1000 1000
106#define SPEED_10000 10000
107#define SPEED_40000 40000
108
109MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
110
111static int nicvf_probe(device_t);
112static int nicvf_attach(device_t);
113static int nicvf_detach(device_t);
114
115static device_method_t nicvf_methods[] = {
116 /* Device interface */
117 DEVMETHOD(device_probe, nicvf_probe),
118 DEVMETHOD(device_attach, nicvf_attach),
119 DEVMETHOD(device_detach, nicvf_detach),
120
121 DEVMETHOD_END,
122};
123
124static driver_t nicvf_driver = {
125 "vnic",
126 nicvf_methods,
127 sizeof(struct nicvf),
128};
129
130static devclass_t nicvf_devclass;
131
132DRIVER_MODULE(nicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
133MODULE_DEPEND(nicvf, pci, 1, 1, 1);
134MODULE_DEPEND(nicvf, ether, 1, 1, 1);
135MODULE_DEPEND(nicvf, vnic_pf, 1, 1, 1);
132DRIVER_MODULE(vnicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
133MODULE_VERSION(vnicvf, 1);
134MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
135MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
136MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
137
138static int nicvf_allocate_misc_interrupt(struct nicvf *);
139static int nicvf_enable_misc_interrupt(struct nicvf *);
140static int nicvf_allocate_net_interrupts(struct nicvf *);
141static void nicvf_release_all_interrupts(struct nicvf *);
142static int nicvf_update_hw_max_frs(struct nicvf *, int);
143static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
144static void nicvf_config_cpi(struct nicvf *);
145static int nicvf_rss_init(struct nicvf *);
146static int nicvf_init_resources(struct nicvf *);
147
148static int nicvf_setup_ifnet(struct nicvf *);
149static int nicvf_setup_ifmedia(struct nicvf *);
150static void nicvf_hw_addr_random(uint8_t *);
151
152static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
153static void nicvf_if_init(void *);
154static void nicvf_if_init_locked(struct nicvf *);
155static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
156static void nicvf_if_qflush(struct ifnet *);
157static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
158
159static int nicvf_stop_locked(struct nicvf *);
160
161static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
162static int nicvf_media_change(struct ifnet *);
163
164static void nicvf_tick_stats(void *);
165
166static int
167nicvf_probe(device_t dev)
168{
169 uint16_t vendor_id;
170 uint16_t device_id;
171
172 vendor_id = pci_get_vendor(dev);
173 device_id = pci_get_device(dev);
174
175 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
176 return (ENXIO);
177
178 if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
179 device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
180 device_set_desc(dev, VNIC_VF_DEVSTR);
181 return (BUS_PROBE_DEFAULT);
182 }
183
184 return (ENXIO);
185}
186
187static int
188nicvf_attach(device_t dev)
189{
190 struct nicvf *nic;
191 int rid, qcount;
192 int err = 0;
193 uint8_t hwaddr[ETHER_ADDR_LEN];
194 uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
195
196 nic = device_get_softc(dev);
197 nic->dev = dev;
198 nic->pnicvf = nic;
199
200 NICVF_CORE_LOCK_INIT(nic);
201 /* Enable HW TSO on Pass2 */
202 if (!pass1_silicon(dev))
203 nic->hw_tso = TRUE;
204
205 rid = VNIC_VF_REG_RID;
206 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
207 RF_ACTIVE);
208 if (nic->reg_base == NULL) {
209 device_printf(dev, "Could not allocate registers memory\n");
210 return (ENXIO);
211 }
212
213 qcount = MAX_CMP_QUEUES_PER_QS;
214 nic->max_queues = qcount;
215
216 err = nicvf_set_qset_resources(nic);
217 if (err != 0)
218 goto err_free_res;
219
220 /* Check if PF is alive and get MAC address for this VF */
221 err = nicvf_allocate_misc_interrupt(nic);
222 if (err != 0)
223 goto err_free_res;
224
225 NICVF_CORE_LOCK(nic);
226 err = nicvf_enable_misc_interrupt(nic);
227 NICVF_CORE_UNLOCK(nic);
228 if (err != 0)
229 goto err_release_intr;
230
231 err = nicvf_allocate_net_interrupts(nic);
232 if (err != 0) {
233 device_printf(dev,
234 "Could not allocate network interface interrupts\n");
235 goto err_free_ifnet;
236 }
237
238 /* If no MAC address was obtained we generate random one */
239 if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
240 nicvf_hw_addr_random(hwaddr);
241 memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
242 NICVF_CORE_LOCK(nic);
243 nicvf_hw_set_mac_addr(nic, hwaddr);
244 NICVF_CORE_UNLOCK(nic);
245 }
246
247 /* Configure CPI alorithm */
248 nic->cpi_alg = CPI_ALG_NONE;
249 NICVF_CORE_LOCK(nic);
250 nicvf_config_cpi(nic);
251 /* Configure receive side scaling */
252 if (nic->qs->rq_cnt > 1)
253 nicvf_rss_init(nic);
254 NICVF_CORE_UNLOCK(nic);
255
256 err = nicvf_setup_ifnet(nic);
257 if (err != 0) {
258 device_printf(dev, "Could not set-up ifnet\n");
259 goto err_release_intr;
260 }
261
262 err = nicvf_setup_ifmedia(nic);
263 if (err != 0) {
264 device_printf(dev, "Could not set-up ifmedia\n");
265 goto err_free_ifnet;
266 }
267
268 mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
269 callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
270
271 ether_ifattach(nic->ifp, nic->hwaddr);
272
273 return (0);
274
275err_free_ifnet:
276 if_free(nic->ifp);
277err_release_intr:
278 nicvf_release_all_interrupts(nic);
279err_free_res:
280 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
281 nic->reg_base);
282
283 return (err);
284}
285
286static int
287nicvf_detach(device_t dev)
288{
289 struct nicvf *nic;
290
291 nic = device_get_softc(dev);
292
293 NICVF_CORE_LOCK(nic);
294 /* Shut down the port and release ring resources */
295 nicvf_stop_locked(nic);
296 /* Release stats lock */
297 mtx_destroy(&nic->stats_mtx);
298 /* Release interrupts */
299 nicvf_release_all_interrupts(nic);
300 /* Release memory resource */
301 if (nic->reg_base != NULL) {
302 bus_release_resource(dev, SYS_RES_MEMORY,
303 rman_get_rid(nic->reg_base), nic->reg_base);
304 }
305
306 /* Remove all ifmedia configurations */
307 ifmedia_removeall(&nic->if_media);
308 /* Free this ifnet */
309 if_free(nic->ifp);
310 NICVF_CORE_UNLOCK(nic);
311 /* Finally destroy the lock */
312 NICVF_CORE_LOCK_DESTROY(nic);
313
314 return (0);
315}
316
317static void
318nicvf_hw_addr_random(uint8_t *hwaddr)
319{
320 uint32_t rnd;
321 uint8_t addr[ETHER_ADDR_LEN];
322
323 /*
324 * Create randomized MAC address.
325 * Set 'bsd' + random 24 low-order bits.
326 */
327 rnd = arc4random() & 0x00ffffff;
328 addr[0] = 'b';
329 addr[1] = 's';
330 addr[2] = 'd';
331 addr[3] = rnd >> 16;
332 addr[4] = rnd >> 8;
333 addr[5] = rnd >> 0;
334
335 memcpy(hwaddr, addr, ETHER_ADDR_LEN);
336}
337
338static int
339nicvf_setup_ifnet(struct nicvf *nic)
340{
341 struct ifnet *ifp;
342
343 ifp = if_alloc(IFT_ETHER);
344 if (ifp == NULL) {
345 device_printf(nic->dev, "Could not allocate ifnet structure\n");
346 return (ENOMEM);
347 }
348
349 nic->ifp = ifp;
350
351 if_setsoftc(ifp, nic);
352 if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
353 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX);
354
355 if_settransmitfn(ifp, nicvf_if_transmit);
356 if_setqflushfn(ifp, nicvf_if_qflush);
357 if_setioctlfn(ifp, nicvf_if_ioctl);
358 if_setinitfn(ifp, nicvf_if_init);
359 if_setgetcounterfn(ifp, nicvf_if_getcounter);
360
361 if_setmtu(ifp, ETHERMTU);
362
363 /* Reset caps */
364 if_setcapabilities(ifp, 0);
365
366 /* Set the default values */
367 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
368 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
369 if (nic->hw_tso) {
370 /* TSO */
371 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
372 /* TSO parameters */
373 ifp->if_hw_tsomax = NICVF_TSO_MAXSIZE;
374 ifp->if_hw_tsomaxsegcount = NICVF_TSO_NSEGS;
375 ifp->if_hw_tsomaxsegsize = MCLBYTES;
376 }
377 /* IP/TCP/UDP HW checksums */
378 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
379 if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
380 /*
381 * HW offload enable
382 */
383 if_clearhwassist(ifp);
384 if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
385 if (nic->hw_tso)
386 if_sethwassistbits(ifp, (CSUM_TSO), 0);
387 if_setcapenable(ifp, if_getcapabilities(ifp));
388
389 return (0);
390}
391
392static int
393nicvf_setup_ifmedia(struct nicvf *nic)
394{
395
396 ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
397 nicvf_media_status);
398
399 /*
400 * Advertise availability of all possible connection types,
401 * even though not all are possible at the same time.
402 */
403
404 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
405 0, NULL);
406 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
407 0, NULL);
408 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
409 0, NULL);
410 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
411 0, NULL);
412 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
413 0, NULL);
414 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
415 0, NULL);
416
417 ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
418
419 return (0);
420}
421
422static int
423nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
424{
425 struct nicvf *nic;
426 struct rcv_queue *rq;
427 struct ifreq *ifr;
428 uint32_t flags;
429 int mask, err;
430 int rq_idx;
431#if defined(INET) || defined(INET6)
432 struct ifaddr *ifa;
433 boolean_t avoid_reset = FALSE;
434#endif
435
436 nic = if_getsoftc(ifp);
437 ifr = (struct ifreq *)data;
438#if defined(INET) || defined(INET6)
439 ifa = (struct ifaddr *)data;
440#endif
441 err = 0;
442 switch (cmd) {
443 case SIOCSIFADDR:
444#ifdef INET
445 if (ifa->ifa_addr->sa_family == AF_INET)
446 avoid_reset = TRUE;
447#endif
448#ifdef INET6
449 if (ifa->ifa_addr->sa_family == AF_INET6)
450 avoid_reset = TRUE;
451#endif
452
453#if defined(INET) || defined(INET6)
454 /* Avoid reinitialization unless it's necessary */
455 if (avoid_reset) {
456 ifp->if_flags |= IFF_UP;
457 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
458 nicvf_if_init(nic);
459#ifdef INET
460 if (!(if_getflags(ifp) & IFF_NOARP))
461 arp_ifinit(ifp, ifa);
462#endif
463
464 return (0);
465 }
466#endif
467 err = ether_ioctl(ifp, cmd, data);
468 break;
469 case SIOCSIFMTU:
470 if (ifr->ifr_mtu < NIC_HW_MIN_FRS ||
471 ifr->ifr_mtu > NIC_HW_MAX_FRS) {
472 err = EINVAL;
473 } else {
474 NICVF_CORE_LOCK(nic);
475 err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu);
476 if (err == 0)
477 if_setmtu(ifp, ifr->ifr_mtu);
478 NICVF_CORE_UNLOCK(nic);
479 }
480 break;
481 case SIOCSIFFLAGS:
482 NICVF_CORE_LOCK(nic);
483 if (if_getflags(ifp) & IFF_UP) {
484 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
485 flags = ifp->if_flags ^ nic->if_flags;
486 if ((nic->if_flags & ifp->if_flags) &
487 IFF_PROMISC) {
488 /* Change promiscous mode */
489#if 0
490 /* ARM64TODO */
491 nicvf_set_promiscous(nic);
492#endif
493 }
494
495 if ((nic->if_flags ^ ifp->if_flags) &
496 IFF_ALLMULTI) {
497 /* Change multicasting settings */
498#if 0
499 /* ARM64TODO */
500 nicvf_set_multicast(nic);
501#endif
502 }
503 } else {
504 nicvf_if_init_locked(nic);
505 }
506 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
507 nicvf_stop_locked(nic);
508
509 nic->if_flags = ifp->if_flags;
510 NICVF_CORE_UNLOCK(nic);
511 break;
512
513 case SIOCADDMULTI:
514 case SIOCDELMULTI:
515 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
516#if 0
517 NICVF_CORE_LOCK(nic);
518 /* ARM64TODO */
519 nicvf_set_multicast(nic);
520 NICVF_CORE_UNLOCK(nic);
521#endif
522 }
523 break;
524
525 case SIOCSIFMEDIA:
526 case SIOCGIFMEDIA:
527 err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
528 break;
529
530 case SIOCSIFCAP:
531 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
532 if (mask & IFCAP_VLAN_MTU) {
533 /* No work to do except acknowledge the change took. */
534 ifp->if_capenable ^= IFCAP_VLAN_MTU;
535 }
536 if (mask & IFCAP_TXCSUM)
537 ifp->if_capenable ^= IFCAP_TXCSUM;
538 if (mask & IFCAP_RXCSUM)
539 ifp->if_capenable ^= IFCAP_RXCSUM;
540 if ((mask & IFCAP_TSO4) && nic->hw_tso)
541 ifp->if_capenable ^= IFCAP_TSO4;
542 if (mask & IFCAP_LRO) {
543 /*
544 * Lock the driver for a moment to avoid
545 * mismatch in per-queue settings.
546 */
547 NICVF_CORE_LOCK(nic);
548 ifp->if_capenable ^= IFCAP_LRO;
549 if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
550 /*
551 * Now disable LRO for subsequent packets.
552 * Atomicity of this change is not necessary
553 * as we don't need precise toggle of this
554 * feature for all threads processing the
555 * completion queue.
556 */
557 for (rq_idx = 0;
558 rq_idx < nic->qs->rq_cnt; rq_idx++) {
559 rq = &nic->qs->rq[rq_idx];
560 rq->lro_enabled = !rq->lro_enabled;
561 }
562 }
563 NICVF_CORE_UNLOCK(nic);
564 }
565
566 break;
567
568 default:
569 err = ether_ioctl(ifp, cmd, data);
570 break;
571 }
572
573 return (err);
574}
575
576static void
577nicvf_if_init_locked(struct nicvf *nic)
578{
579 struct queue_set *qs = nic->qs;
580 struct ifnet *ifp;
581 int qidx;
582 int err;
583 caddr_t if_addr;
584
585 NICVF_CORE_LOCK_ASSERT(nic);
586 ifp = nic->ifp;
587
588 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
589 nicvf_stop_locked(nic);
590
591 err = nicvf_enable_misc_interrupt(nic);
592 if (err != 0) {
593 if_printf(ifp, "Could not reenable Mbox interrupt\n");
594 return;
595 }
596
597 /* Get the latest MAC address */
598 if_addr = if_getlladdr(ifp);
599 /* Update MAC address if changed */
600 if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
601 memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
602 nicvf_hw_set_mac_addr(nic, if_addr);
603 }
604
605 /* Initialize the queues */
606 err = nicvf_init_resources(nic);
607 if (err != 0)
608 goto error;
609
610 /* Make sure queue initialization is written */
611 wmb();
612
613 nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
614 /* Enable Qset err interrupt */
615 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
616
617 /* Enable completion queue interrupt */
618 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
619 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
620
621 /* Enable RBDR threshold interrupt */
622 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
623 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
624
625 nic->drv_stats.txq_stop = 0;
626 nic->drv_stats.txq_wake = 0;
627
628 /* Activate network interface */
629 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
630
631 /* Schedule callout to update stats */
632 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
633
634 return;
635
636error:
637 /* Something went very wrong. Disable this ifnet for good */
638 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
639}
640
641static void
642nicvf_if_init(void *if_softc)
643{
644 struct nicvf *nic = if_softc;
645
646 NICVF_CORE_LOCK(nic);
647 nicvf_if_init_locked(nic);
648 NICVF_CORE_UNLOCK(nic);
649}
650
651static int
652nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
653{
654 struct nicvf *nic = if_getsoftc(ifp);
655 struct queue_set *qs = nic->qs;
656 struct snd_queue *sq;
657 struct mbuf *mtmp;
658 int qidx;
659 int err = 0;
660
661
662 if (__predict_false(qs == NULL)) {
663 panic("%s: missing queue set for %s", __func__,
664 device_get_nameunit(nic->dev));
665 }
666
667 /* Select queue */
668 if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
669 qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
670 else
671 qidx = curcpu % qs->sq_cnt;
672
673 sq = &qs->sq[qidx];
674
675 if (mbuf->m_next != NULL &&
676 (mbuf->m_pkthdr.csum_flags &
677 (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
678 if (M_WRITABLE(mbuf) == 0) {
679 mtmp = m_dup(mbuf, M_NOWAIT);
680 m_freem(mbuf);
681 if (mtmp == NULL)
682 return (ENOBUFS);
683 mbuf = mtmp;
684 }
685 }
686
687 err = drbr_enqueue(ifp, sq->br, mbuf);
688 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
689 IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) {
690 /*
691 * Try to enqueue packet to the ring buffer.
692 * If the driver is not active, link down or enqueue operation
693 * failed, return with the appropriate error code.
694 */
695 return (err);
696 }
697
698 if (NICVF_TX_TRYLOCK(sq) != 0) {
699 err = nicvf_xmit_locked(sq);
700 NICVF_TX_UNLOCK(sq);
701 return (err);
702 } else
703 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
704
705 return (0);
706}
707
708static void
709nicvf_if_qflush(struct ifnet *ifp)
710{
711 struct nicvf *nic;
712 struct queue_set *qs;
713 struct snd_queue *sq;
714 struct mbuf *mbuf;
715 size_t idx;
716
717 nic = if_getsoftc(ifp);
718 qs = nic->qs;
719
720 for (idx = 0; idx < qs->sq_cnt; idx++) {
721 sq = &qs->sq[idx];
722 NICVF_TX_LOCK(sq);
723 while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
724 m_freem(mbuf);
725 NICVF_TX_UNLOCK(sq);
726 }
727 if_qflush(ifp);
728}
729
730static uint64_t
731nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
732{
733 struct nicvf *nic;
734 struct nicvf_hw_stats *hw_stats;
735 struct nicvf_drv_stats *drv_stats;
736
737 nic = if_getsoftc(ifp);
738 hw_stats = &nic->hw_stats;
739 drv_stats = &nic->drv_stats;
740
741 switch (cnt) {
742 case IFCOUNTER_IPACKETS:
743 return (drv_stats->rx_frames_ok);
744 case IFCOUNTER_OPACKETS:
745 return (drv_stats->tx_frames_ok);
746 case IFCOUNTER_IBYTES:
747 return (hw_stats->rx_bytes);
748 case IFCOUNTER_OBYTES:
749 return (hw_stats->tx_bytes_ok);
750 case IFCOUNTER_IMCASTS:
751 return (hw_stats->rx_mcast_frames);
752 case IFCOUNTER_COLLISIONS:
753 return (0);
754 case IFCOUNTER_IQDROPS:
755 return (drv_stats->rx_drops);
756 case IFCOUNTER_OQDROPS:
757 return (drv_stats->tx_drops);
758 default:
759 return (if_get_counter_default(ifp, cnt));
760 }
761
762}
763
764static void
765nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
766{
767 struct nicvf *nic = if_getsoftc(ifp);
768
769 NICVF_CORE_LOCK(nic);
770
771 ifmr->ifm_status = IFM_AVALID;
772 ifmr->ifm_active = IFM_ETHER;
773
774 if (nic->link_up) {
775 /* Device attached to working network */
776 ifmr->ifm_status |= IFM_ACTIVE;
777 }
778
779 switch (nic->speed) {
780 case SPEED_10:
781 ifmr->ifm_active |= IFM_10_T;
782 break;
783 case SPEED_100:
784 ifmr->ifm_active |= IFM_100_TX;
785 break;
786 case SPEED_1000:
787 ifmr->ifm_active |= IFM_1000_T;
788 break;
789 case SPEED_10000:
790 ifmr->ifm_active |= IFM_10G_SR;
791 break;
792 case SPEED_40000:
793 ifmr->ifm_active |= IFM_40G_CR4;
794 break;
795 default:
796 ifmr->ifm_active |= IFM_AUTO;
797 break;
798 }
799
800 if (nic->duplex)
801 ifmr->ifm_active |= IFM_FDX;
802 else
803 ifmr->ifm_active |= IFM_HDX;
804
805 NICVF_CORE_UNLOCK(nic);
806}
807
808static int
809nicvf_media_change(struct ifnet *ifp __unused)
810{
811
812 return (0);
813}
814
815/* Register read/write APIs */
816void
817nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
818{
819
820 bus_write_8(nic->reg_base, offset, val);
821}
822
823uint64_t
824nicvf_reg_read(struct nicvf *nic, uint64_t offset)
825{
826
827 return (bus_read_8(nic->reg_base, offset));
828}
829
830void
831nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
832 uint64_t qidx, uint64_t val)
833{
834
835 bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
836}
837
838uint64_t
839nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
840 uint64_t qidx)
841{
842
843 return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
844}
845
846/* VF -> PF mailbox communication */
847static void
848nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
849{
850 uint64_t *msg = (uint64_t *)mbx;
851
852 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
853 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
854}
855
856int
857nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
858{
859 int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
860 int sleep = 2;
861
862 NICVF_CORE_LOCK_ASSERT(nic);
863
864 nic->pf_acked = FALSE;
865 nic->pf_nacked = FALSE;
866
867 nicvf_write_to_mbx(nic, mbx);
868
869 /* Wait for previous message to be acked, timeout 2sec */
870 while (!nic->pf_acked) {
871 if (nic->pf_nacked)
872 return (EINVAL);
873
874 DELAY(sleep * 1000);
875
876 if (nic->pf_acked)
877 break;
878 timeout -= sleep;
879 if (!timeout) {
880 device_printf(nic->dev,
881 "PF didn't ack to mbox msg %d from VF%d\n",
882 (mbx->msg.msg & 0xFF), nic->vf_id);
883
884 return (EBUSY);
885 }
886 }
887 return (0);
888}
889
890/*
891 * Checks if VF is able to comminicate with PF
892 * and also gets the VNIC number this VF is associated to.
893 */
894static int
895nicvf_check_pf_ready(struct nicvf *nic)
896{
897 union nic_mbx mbx = {};
898
899 mbx.msg.msg = NIC_MBOX_MSG_READY;
900 if (nicvf_send_msg_to_pf(nic, &mbx)) {
901 device_printf(nic->dev,
902 "PF didn't respond to READY msg\n");
903 return 0;
904 }
905
906 return 1;
907}
908
909static void
910nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
911{
912
913 if (bgx->rx)
914 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
915 else
916 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
917}
918
919static void
920nicvf_handle_mbx_intr(struct nicvf *nic)
921{
922 union nic_mbx mbx = {};
923 uint64_t *mbx_data;
924 uint64_t mbx_addr;
925 int i;
926
927 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
928 mbx_data = (uint64_t *)&mbx;
929
930 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
931 *mbx_data = nicvf_reg_read(nic, mbx_addr);
932 mbx_data++;
933 mbx_addr += sizeof(uint64_t);
934 }
935
936 switch (mbx.msg.msg) {
937 case NIC_MBOX_MSG_READY:
938 nic->pf_acked = TRUE;
939 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
940 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
941 nic->node = mbx.nic_cfg.node_id;
942 memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
943 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
944 nic->link_up = FALSE;
945 nic->duplex = 0;
946 nic->speed = 0;
947 break;
948 case NIC_MBOX_MSG_ACK:
949 nic->pf_acked = TRUE;
950 break;
951 case NIC_MBOX_MSG_NACK:
952 nic->pf_nacked = TRUE;
953 break;
954 case NIC_MBOX_MSG_RSS_SIZE:
955 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
956 nic->pf_acked = TRUE;
957 break;
958 case NIC_MBOX_MSG_BGX_STATS:
959 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
960 nic->pf_acked = TRUE;
961 break;
962 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
963 nic->pf_acked = TRUE;
964 nic->link_up = mbx.link_status.link_up;
965 nic->duplex = mbx.link_status.duplex;
966 nic->speed = mbx.link_status.speed;
967 if (nic->link_up) {
968 if_setbaudrate(nic->ifp, nic->speed * 1000000);
969 if_link_state_change(nic->ifp, LINK_STATE_UP);
970 } else {
971 if_setbaudrate(nic->ifp, 0);
972 if_link_state_change(nic->ifp, LINK_STATE_DOWN);
973 }
974 break;
975 default:
976 device_printf(nic->dev,
977 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
978 break;
979 }
980 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
981}
982
983static int
984nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
985{
986 union nic_mbx mbx = {};
987
988 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
989 mbx.frs.max_frs = mtu;
990 mbx.frs.vf_id = nic->vf_id;
991
992 return nicvf_send_msg_to_pf(nic, &mbx);
993}
994
995static int
996nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
997{
998 union nic_mbx mbx = {};
999
1000 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
1001 mbx.mac.vf_id = nic->vf_id;
1002 memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
1003
1004 return (nicvf_send_msg_to_pf(nic, &mbx));
1005}
1006
1007static void
1008nicvf_config_cpi(struct nicvf *nic)
1009{
1010 union nic_mbx mbx = {};
1011
1012 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
1013 mbx.cpi_cfg.vf_id = nic->vf_id;
1014 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
1015 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
1016
1017 nicvf_send_msg_to_pf(nic, &mbx);
1018}
1019
1020static void
1021nicvf_get_rss_size(struct nicvf *nic)
1022{
1023 union nic_mbx mbx = {};
1024
1025 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
1026 mbx.rss_size.vf_id = nic->vf_id;
1027 nicvf_send_msg_to_pf(nic, &mbx);
1028}
1029
1030static void
1031nicvf_config_rss(struct nicvf *nic)
1032{
1033 union nic_mbx mbx = {};
1034 struct nicvf_rss_info *rss;
1035 int ind_tbl_len;
1036 int i, nextq;
1037
1038 rss = &nic->rss_info;
1039 ind_tbl_len = rss->rss_size;
1040 nextq = 0;
1041
1042 mbx.rss_cfg.vf_id = nic->vf_id;
1043 mbx.rss_cfg.hash_bits = rss->hash_bits;
1044 while (ind_tbl_len != 0) {
1045 mbx.rss_cfg.tbl_offset = nextq;
1046 mbx.rss_cfg.tbl_len = MIN(ind_tbl_len,
1047 RSS_IND_TBL_LEN_PER_MBX_MSG);
1048 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
1049 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
1050
1051 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
1052 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
1053
1054 nicvf_send_msg_to_pf(nic, &mbx);
1055
1056 ind_tbl_len -= mbx.rss_cfg.tbl_len;
1057 }
1058}
1059
1060static void
1061nicvf_set_rss_key(struct nicvf *nic)
1062{
1063 struct nicvf_rss_info *rss;
1064 uint64_t key_addr;
1065 int idx;
1066
1067 rss = &nic->rss_info;
1068 key_addr = NIC_VNIC_RSS_KEY_0_4;
1069
1070 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
1071 nicvf_reg_write(nic, key_addr, rss->key[idx]);
1072 key_addr += sizeof(uint64_t);
1073 }
1074}
1075
1076static int
1077nicvf_rss_init(struct nicvf *nic)
1078{
1079 struct nicvf_rss_info *rss;
1080 int idx;
1081
1082 nicvf_get_rss_size(nic);
1083
1084 rss = &nic->rss_info;
1085 if (nic->cpi_alg != CPI_ALG_NONE) {
1086 rss->enable = FALSE;
1087 rss->hash_bits = 0;
1088 return (ENXIO);
1089 }
1090
1091 rss->enable = TRUE;
1092
1093 /* Using the HW reset value for now */
1094 rss->key[0] = 0xFEED0BADFEED0BADUL;
1095 rss->key[1] = 0xFEED0BADFEED0BADUL;
1096 rss->key[2] = 0xFEED0BADFEED0BADUL;
1097 rss->key[3] = 0xFEED0BADFEED0BADUL;
1098 rss->key[4] = 0xFEED0BADFEED0BADUL;
1099
1100 nicvf_set_rss_key(nic);
1101
1102 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
1103 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
1104
1105 rss->hash_bits = fls(rss->rss_size) - 1;
1106 for (idx = 0; idx < rss->rss_size; idx++)
1107 rss->ind_tbl[idx] = idx % nic->rx_queues;
1108
1109 nicvf_config_rss(nic);
1110
1111 return (0);
1112}
1113
1114static int
1115nicvf_init_resources(struct nicvf *nic)
1116{
1117 int err;
1118 union nic_mbx mbx = {};
1119
1120 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1121
1122 /* Enable Qset */
1123 nicvf_qset_config(nic, TRUE);
1124
1125 /* Initialize queues and HW for data transfer */
1126 err = nicvf_config_data_transfer(nic, TRUE);
1127 if (err) {
1128 device_printf(nic->dev,
1129 "Failed to alloc/config VF's QSet resources\n");
1130 return (err);
1131 }
1132
1133 /* Send VF config done msg to PF */
1134 nicvf_write_to_mbx(nic, &mbx);
1135
1136 return (0);
1137}
1138
1139static void
1140nicvf_misc_intr_handler(void *arg)
1141{
1142 struct nicvf *nic = (struct nicvf *)arg;
1143 uint64_t intr;
1144
1145 intr = nicvf_reg_read(nic, NIC_VF_INT);
1146 /* Check for spurious interrupt */
1147 if (!(intr & NICVF_INTR_MBOX_MASK))
1148 return;
1149
1150 nicvf_handle_mbx_intr(nic);
1151}
1152
1153static int
1154nicvf_intr_handler(void *arg)
1155{
1156 struct nicvf *nic;
1157 struct cmp_queue *cq;
1158 int qidx;
1159
1160 cq = (struct cmp_queue *)arg;
1161 nic = cq->nic;
1162 qidx = cq->idx;
1163
1164 /* Disable interrupts */
1165 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1166
1167 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
1168
1169 /* Clear interrupt */
1170 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1171
1172 return (FILTER_HANDLED);
1173}
1174
1175static void
1176nicvf_rbdr_intr_handler(void *arg)
1177{
1178 struct nicvf *nic;
1179 struct queue_set *qs;
1180 struct rbdr *rbdr;
1181 int qidx;
1182
1183 nic = (struct nicvf *)arg;
1184
1185 /* Disable RBDR interrupt and schedule softirq */
1186 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1187 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1188 continue;
1189 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1190
1191 qs = nic->qs;
1192 rbdr = &qs->rbdr[qidx];
1193 taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1194 /* Clear interrupt */
1195 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1196 }
1197}
1198
1199static void
1200nicvf_qs_err_intr_handler(void *arg)
1201{
1202 struct nicvf *nic = (struct nicvf *)arg;
1203 struct queue_set *qs = nic->qs;
1204
1205 /* Disable Qset err interrupt and schedule softirq */
1206 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1207 taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1208 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1209
1210}
1211
1212static int
1213nicvf_enable_msix(struct nicvf *nic)
1214{
1215 struct pci_devinfo *dinfo;
1216 int rid, count;
1217 int ret;
1218
1219 dinfo = device_get_ivars(nic->dev);
1220 rid = dinfo->cfg.msix.msix_table_bar;
1221 nic->msix_table_res =
1222 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1223 if (nic->msix_table_res == NULL) {
1224 device_printf(nic->dev,
1225 "Could not allocate memory for MSI-X table\n");
1226 return (ENXIO);
1227 }
1228
1229 count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1230
1231 ret = pci_alloc_msix(nic->dev, &count);
1232 if ((ret != 0) || (count != nic->num_vec)) {
1233 device_printf(nic->dev,
1234 "Request for #%d msix vectors failed, error: %d\n",
1235 nic->num_vec, ret);
1236 return (ret);
1237 }
1238
1239 nic->msix_enabled = 1;
1240 return (0);
1241}
1242
1243static void
1244nicvf_disable_msix(struct nicvf *nic)
1245{
1246
1247 if (nic->msix_enabled) {
1248 pci_release_msi(nic->dev);
1249 nic->msix_enabled = 0;
1250 nic->num_vec = 0;
1251 }
1252}
1253
1254static void
1255nicvf_release_all_interrupts(struct nicvf *nic)
1256{
1257 struct resource *res;
1258 int irq;
1259 int err;
1260
1261 /* Free registered interrupts */
1262 for (irq = 0; irq < nic->num_vec; irq++) {
1263 res = nic->msix_entries[irq].irq_res;
1264 if (res == NULL)
1265 continue;
1266 /* Teardown interrupt first */
1267 if (nic->msix_entries[irq].handle != NULL) {
1268 err = bus_teardown_intr(nic->dev,
1269 nic->msix_entries[irq].irq_res,
1270 nic->msix_entries[irq].handle);
1271 KASSERT(err == 0,
1272 ("ERROR: Unable to teardown interrupt %d", irq));
1273 nic->msix_entries[irq].handle = NULL;
1274 }
1275
1276 bus_release_resource(nic->dev, SYS_RES_IRQ,
1277 rman_get_rid(res), nic->msix_entries[irq].irq_res);
1278 nic->msix_entries[irq].irq_res = NULL;
1279 }
1280 /* Disable MSI-X */
1281 nicvf_disable_msix(nic);
1282}
1283
1284/*
1285 * Initialize MSIX vectors and register MISC interrupt.
1286 * Send READY message to PF to check if its alive
1287 */
1288static int
1289nicvf_allocate_misc_interrupt(struct nicvf *nic)
1290{
1291 struct resource *res;
1292 int irq, rid;
1293 int ret = 0;
1294
1295 /* Return if mailbox interrupt is already registered */
1296 if (nic->msix_enabled)
1297 return (0);
1298
1299 /* Enable MSI-X */
1300 if (nicvf_enable_msix(nic) != 0)
1301 return (ENXIO);
1302
1303 irq = NICVF_INTR_ID_MISC;
1304 rid = irq + 1;
1305 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1306 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1307 if (nic->msix_entries[irq].irq_res == NULL) {
1308 device_printf(nic->dev,
1309 "Could not allocate Mbox interrupt for VF%d\n",
1310 device_get_unit(nic->dev));
1311 return (ENXIO);
1312 }
1313
1314 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1315 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1316 &nic->msix_entries[irq].handle);
1317 if (ret != 0) {
1318 res = nic->msix_entries[irq].irq_res;
1319 bus_release_resource(nic->dev, SYS_RES_IRQ,
1320 rman_get_rid(res), res);
1321 nic->msix_entries[irq].irq_res = NULL;
1322 return (ret);
1323 }
1324
1325 return (0);
1326}
1327
1328static int
1329nicvf_enable_misc_interrupt(struct nicvf *nic)
1330{
1331
1332 /* Enable mailbox interrupt */
1333 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1334
1335 /* Check if VF is able to communicate with PF */
1336 if (!nicvf_check_pf_ready(nic)) {
1337 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1338 return (ENXIO);
1339 }
1340
1341 return (0);
1342}
1343
1344static void
1345nicvf_release_net_interrupts(struct nicvf *nic)
1346{
1347 struct resource *res;
1348 int irq;
1349 int err;
1350
1351 for_each_cq_irq(irq) {
1352 res = nic->msix_entries[irq].irq_res;
1353 if (res == NULL)
1354 continue;
1355 /* Teardown active interrupts first */
1356 if (nic->msix_entries[irq].handle != NULL) {
1357 err = bus_teardown_intr(nic->dev,
1358 nic->msix_entries[irq].irq_res,
1359 nic->msix_entries[irq].handle);
1360 KASSERT(err == 0,
1361 ("ERROR: Unable to teardown CQ interrupt %d",
1362 (irq - NICVF_INTR_ID_CQ)));
1363 if (err != 0)
1364 continue;
1365 }
1366
1367 /* Release resource */
1368 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1369 res);
1370 nic->msix_entries[irq].irq_res = NULL;
1371 }
1372
1373 for_each_rbdr_irq(irq) {
1374 res = nic->msix_entries[irq].irq_res;
1375 if (res == NULL)
1376 continue;
1377 /* Teardown active interrupts first */
1378 if (nic->msix_entries[irq].handle != NULL) {
1379 err = bus_teardown_intr(nic->dev,
1380 nic->msix_entries[irq].irq_res,
1381 nic->msix_entries[irq].handle);
1382 KASSERT(err == 0,
1383 ("ERROR: Unable to teardown RDBR interrupt %d",
1384 (irq - NICVF_INTR_ID_RBDR)));
1385 if (err != 0)
1386 continue;
1387 }
1388
1389 /* Release resource */
1390 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1391 res);
1392 nic->msix_entries[irq].irq_res = NULL;
1393 }
1394
1395 irq = NICVF_INTR_ID_QS_ERR;
1396 res = nic->msix_entries[irq].irq_res;
1397 if (res != NULL) {
1398 /* Teardown active interrupts first */
1399 if (nic->msix_entries[irq].handle != NULL) {
1400 err = bus_teardown_intr(nic->dev,
1401 nic->msix_entries[irq].irq_res,
1402 nic->msix_entries[irq].handle);
1403 KASSERT(err == 0,
1404 ("ERROR: Unable to teardown QS Error interrupt %d",
1405 irq));
1406 if (err != 0)
1407 return;
1408 }
1409
1410 /* Release resource */
1411 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1412 res);
1413 nic->msix_entries[irq].irq_res = NULL;
1414 }
1415}
1416
1417static int
1418nicvf_allocate_net_interrupts(struct nicvf *nic)
1419{
1420 u_int cpuid;
1421 int irq, rid;
1422 int qidx;
1423 int ret = 0;
1424
1425 /* MSI-X must be configured by now */
1426 if (!nic->msix_enabled) {
1427 device_printf(nic->dev, "Cannot alloacte queue interrups. "
1428 "MSI-X interrupts disabled.\n");
1429 return (ENXIO);
1430 }
1431
1432 /* Register CQ interrupts */
1433 for_each_cq_irq(irq) {
1434 if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1435 break;
1436
1437 qidx = irq - NICVF_INTR_ID_CQ;
1438 rid = irq + 1;
1439 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1440 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1441 if (nic->msix_entries[irq].irq_res == NULL) {
1442 device_printf(nic->dev,
1443 "Could not allocate CQ interrupt %d for VF%d\n",
1444 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1445 ret = ENXIO;
1446 goto error;
1447 }
1448 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1449 (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1450 NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1451 if (ret != 0) {
1452 device_printf(nic->dev,
1453 "Could not setup CQ interrupt %d for VF%d\n",
1454 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1455 goto error;
1456 }
1457 cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx;
1458 cpuid %= mp_ncpus;
1459 /*
1460 * Save CPU ID for later use when system-wide RSS is enabled.
1461 * It will be used to pit the CQ task to the same CPU that got
1462 * interrupted.
1463 */
1464 nic->qs->cq[qidx].cmp_cpuid = cpuid;
1465 if (bootverbose) {
1466 device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n",
1467 qidx, cpuid);
1468 }
1469 /* Bind interrupts to the given CPU */
1470 bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid);
1471 }
1472
1473 /* Register RBDR interrupt */
1474 for_each_rbdr_irq(irq) {
1475 if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1476 break;
1477
1478 rid = irq + 1;
1479 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1480 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1481 if (nic->msix_entries[irq].irq_res == NULL) {
1482 device_printf(nic->dev,
1483 "Could not allocate RBDR interrupt %d for VF%d\n",
1484 (irq - NICVF_INTR_ID_RBDR),
1485 device_get_unit(nic->dev));
1486 ret = ENXIO;
1487 goto error;
1488 }
1489 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1490 (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1491 nicvf_rbdr_intr_handler, nic,
1492 &nic->msix_entries[irq].handle);
1493 if (ret != 0) {
1494 device_printf(nic->dev,
1495 "Could not setup RBDR interrupt %d for VF%d\n",
1496 (irq - NICVF_INTR_ID_RBDR),
1497 device_get_unit(nic->dev));
1498 goto error;
1499 }
1500 }
1501
1502 /* Register QS error interrupt */
1503 irq = NICVF_INTR_ID_QS_ERR;
1504 rid = irq + 1;
1505 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1506 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1507 if (nic->msix_entries[irq].irq_res == NULL) {
1508 device_printf(nic->dev,
1509 "Could not allocate QS Error interrupt for VF%d\n",
1510 device_get_unit(nic->dev));
1511 ret = ENXIO;
1512 goto error;
1513 }
1514 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1515 (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1516 nic, &nic->msix_entries[irq].handle);
1517 if (ret != 0) {
1518 device_printf(nic->dev,
1519 "Could not setup QS Error interrupt for VF%d\n",
1520 device_get_unit(nic->dev));
1521 goto error;
1522 }
1523
1524 return (0);
1525error:
1526 nicvf_release_net_interrupts(nic);
1527 return (ret);
1528}
1529
1530static int
1531nicvf_stop_locked(struct nicvf *nic)
1532{
1533 struct ifnet *ifp;
1534 int qidx;
1535 struct queue_set *qs = nic->qs;
1536 union nic_mbx mbx = {};
1537
1538 NICVF_CORE_LOCK_ASSERT(nic);
1539 /* Stop callout. Can block here since holding SX lock */
1540 callout_drain(&nic->stats_callout);
1541
1542 ifp = nic->ifp;
1543
1544 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1545 nicvf_send_msg_to_pf(nic, &mbx);
1546
1547 /* Disable RBDR & QS error interrupts */
1548 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1549 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1550 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1551 }
1552 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1553 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1554
1555 /* Deactivate network interface */
1556 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1557
1558 /* Free resources */
1559 nicvf_config_data_transfer(nic, FALSE);
1560
1561 /* Disable HW Qset */
1562 nicvf_qset_config(nic, FALSE);
1563
1564 /* disable mailbox interrupt */
1565 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1566
1567 return (0);
1568}
1569
1570static void
1571nicvf_update_stats(struct nicvf *nic)
1572{
1573 int qidx;
1574 struct nicvf_hw_stats *stats = &nic->hw_stats;
1575 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1576 struct queue_set *qs = nic->qs;
1577
1578#define GET_RX_STATS(reg) \
1579 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1580#define GET_TX_STATS(reg) \
1581 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1582
1583 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1584 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1585 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1586 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1587 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1588 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1589 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1590 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1591 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1592 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1593 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1594 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1595 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1596 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1597
1598 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1599 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1600 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1601 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1602 stats->tx_drops = GET_TX_STATS(TX_DROP);
1603
1604 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1605 stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1606 drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1607 drv_stats->tx_drops = stats->tx_drops;
1608
1609 /* Update RQ and SQ stats */
1610 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1611 nicvf_update_rq_stats(nic, qidx);
1612 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1613 nicvf_update_sq_stats(nic, qidx);
1614}
1615
1616static void
1617nicvf_tick_stats(void *arg)
1618{
1619 struct nicvf *nic;
1620
1621 nic = (struct nicvf *)arg;
1622
1623 /* Read the statistics */
1624 nicvf_update_stats(nic);
1625
1626 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
1627}