Deleted Added
sdiff udiff text old ( 318657 ) new ( 318659 )
full compact
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 18 unchanged lines hidden (view full) ---

27
28
29/*
30 * File: qlnx_os.c
31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/11/sys/dev/qlnx/qlnxe/qlnx_os.c 318657 2017-05-22 19:22:06Z davidcs $");
36
37#include "qlnx_os.h"
38#include "bcm_osal.h"
39#include "reg_addr.h"
40#include "ecore_gtt_reg_addr.h"
41#include "ecore.h"
42#include "ecore_chain.h"
43#include "ecore_status.h"

--- 254 unchanged lines hidden (view full) ---

298
299 if (p_hwfn == NULL) {
300 printf("%s: spurious slowpath intr\n", __func__);
301 return;
302 }
303
304 ha = (qlnx_host_t *)p_hwfn->p_dev;
305
306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
307
308 for (i = 0; i < ha->cdev.num_hwfns; i++) {
309 if (&ha->cdev.hwfns[i] == p_hwfn) {
310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
311 break;
312 }
313 }
314 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
315
316 return;
317}
318
319static void
320qlnx_sp_taskqueue(void *context, int pending)
321{
322 struct ecore_hwfn *p_hwfn;

--- 25 unchanged lines hidden (view full) ---

348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
349
350 if (ha->sp_taskqueue[i] == NULL)
351 return (-1);
352
353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
354 tq_name);
355
356 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
357 ha->sp_taskqueue[i]));
358 }
359
360 return (0);
361}
362
363static void
364qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
365{

--- 147 unchanged lines hidden (view full) ---

513 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
514 } else {
515 if (fp->tx_ring_full) {
516 qlnx_mdelay(__func__, 100);
517 }
518 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
519 }
520
521 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
522 return;
523}
524
525static int
526qlnx_create_fp_taskqueues(qlnx_host_t *ha)
527{
528 int i;
529 uint8_t tq_name[32];

--- 13 unchanged lines hidden (view full) ---

543 &fp->fp_taskqueue);
544
545 if (fp->fp_taskqueue == NULL)
546 return (-1);
547
548 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
549 tq_name);
550
551 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
552 fp->fp_taskqueue));
553 }
554
555 return (0);
556}
557
558static void
559qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
560{

--- 145 unchanged lines hidden (view full) ---

706 goto qlnx_pci_attach_err;
707 }
708
709 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns ))
710 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns;
711 else
712 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns;
713
714 QL_DPRINT1(ha, (dev, "%s:\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]"
715 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]"
716 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]"
717 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n",
718 __func__, ha->pci_reg, rsrc_len_reg,
719 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
720 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
721 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc));
722
723 if (pci_alloc_msix(dev, &ha->msix_count)) {
724 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__,
725 ha->msix_count);
726 ha->msix_count = 0;
727 goto qlnx_pci_attach_err;
728 }
729
730 /*

--- 19 unchanged lines hidden (view full) ---

750 if (bus_setup_intr(dev, ha->sp_irq[i],
751 (INTR_TYPE_NET | INTR_MPSAFE), NULL,
752 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
753 device_printf(dev,
754 "could not setup slow path interrupt\n");
755 goto qlnx_pci_attach_err;
756 }
757
758 QL_DPRINT1(ha, (dev, "%s: p_hwfn [%p] sp_irq_rid %d"
759 " sp_irq %p sp_handle %p\n", __func__, p_hwfn,
760 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]));
761
762 }
763
764 /*
765 * initialize fast path interrupt
766 */
767 if (qlnx_create_fp_taskqueues(ha) != 0)
768 goto qlnx_pci_attach_err;

--- 26 unchanged lines hidden (view full) ---

795 for (i = 0; i < ha->cdev.num_hwfns; i++) {
796
797 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
798 goto qlnx_pci_attach_err;
799 if (ha->grcdump_size[i] == 0)
800 goto qlnx_pci_attach_err;
801
802 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
803 QL_DPRINT1(ha, (dev, "grcdump_size[%d] = 0x%08x\n",
804 i, ha->grcdump_size[i]));
805
806 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
807 if (ha->grcdump[i] == NULL) {
808 device_printf(dev, "grcdump alloc[%d] failed\n", i);
809 goto qlnx_pci_attach_err;
810 }
811
812 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
813 goto qlnx_pci_attach_err;
814 if (ha->idle_chk_size[i] == 0)
815 goto qlnx_pci_attach_err;
816
817 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
818 QL_DPRINT1(ha, (dev, "idle_chk_size[%d] = 0x%08x\n",
819 i, ha->idle_chk_size[i]));
820
821 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
822
823 if (ha->idle_chk[i] == NULL) {
824 device_printf(dev, "idle_chk alloc failed\n");
825 goto qlnx_pci_attach_err;
826 }
827 }

--- 22 unchanged lines hidden (view full) ---

850 }
851 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
852 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF),
853 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF));
854 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
855 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
856 FW_ENGINEERING_VERSION);
857
858 QL_DPRINT1(ha, (dev, "%s: STORM_FW version %s MFW version %s\n",
859 __func__, ha->stormfw_ver, ha->mfw_ver));
860
861 qlnx_init_ifnet(dev, ha);
862
863 /*
864 * add sysctls
865 */
866 qlnx_add_sysctls(ha);
867
868qlnx_pci_attach_err0:
869 /*
870 * create ioctl device interface
871 */
872 if (qlnx_make_cdev(ha)) {
873 device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
874 goto qlnx_pci_attach_err;
875 }
876
877 QL_DPRINT2(ha, (dev, "%s: success\n", __func__));
878
879 return (0);
880
881qlnx_pci_attach_err:
882
883 qlnx_release(ha);
884
885 return (ENXIO);

--- 67 unchanged lines hidden (view full) ---

953static void
954qlnx_release(qlnx_host_t *ha)
955{
956 device_t dev;
957 int i;
958
959 dev = ha->pci_dev;
960
961 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
962
963 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) {
964 if (ha->idle_chk[i] != NULL) {
965 free(ha->idle_chk[i], M_QLNXBUF);
966 ha->idle_chk[i] = NULL;
967 }
968
969 if (ha->grcdump[i] != NULL) {

--- 66 unchanged lines hidden (view full) ---

1036 if (ha->pci_dbells)
1037 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1038 ha->pci_dbells);
1039
1040 if (ha->msix_bar)
1041 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1042 ha->msix_bar);
1043
1044 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
1045 return;
1046}
1047
1048static void
1049qlnx_trigger_dump(qlnx_host_t *ha)
1050{
1051 int i;
1052
1053 if (ha->ifp != NULL)
1054 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1055
1056 QL_DPRINT2(ha, (ha->pci_dev, "%s: start\n", __func__));
1057
1058 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1059 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1060 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1061 }
1062
1063 QL_DPRINT2(ha, (ha->pci_dev, "%s: end\n", __func__));
1064
1065 return;
1066}
1067
1068static int
1069qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS)
1070{
1071 int err, ret = 0;

--- 702 unchanged lines hidden (view full) ---

1774 OID_AUTO, "personality", CTLFLAG_RD,
1775 &ha->personality, ha->personality,
1776 "\tpersonality = 0 => Ethernet Only\n"
1777 "\tpersonality = 3 => Ethernet and RoCE\n"
1778 "\tpersonality = 4 => Ethernet and iWARP\n"
1779 "\tpersonality = 6 => Default in Shared Memory\n");
1780
1781 ha->dbg_level = 0;
1782
1783 SYSCTL_ADD_UINT(ctx, children,
1784 OID_AUTO, "debug", CTLFLAG_RW,
1785 &ha->dbg_level, ha->dbg_level, "Debug Level");
1786
1787 ha->dp_level = 0;
1788 SYSCTL_ADD_UINT(ctx, children,
1789 OID_AUTO, "dp_level", CTLFLAG_RW,
1790 &ha->dp_level, ha->dp_level, "DP Level");
1791
1792 ha->dbg_trace_lro_cnt = 0;
1793 SYSCTL_ADD_UINT(ctx, children,
1794 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW,
1795 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,

--- 198 unchanged lines hidden (view full) ---

1994 }
1995
1996 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
1997 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
1998
1999
2000 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2001
2002 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
2003
2004 return;
2005}
2006
2007static void
2008qlnx_init_locked(qlnx_host_t *ha)
2009{
2010 struct ifnet *ifp = ha->ifp;
2011
2012 qlnx_stop(ha);
2013
2014 if (qlnx_load(ha) == 0) {
2015 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2016 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2017 }
2018
2019 return;
2020}
2021
2022static void
2023qlnx_init(void *arg)
2024{
2025 qlnx_host_t *ha;
2026
2027 ha = (qlnx_host_t *)arg;
2028
2029 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2030
2031 QLNX_LOCK(ha);
2032 qlnx_init_locked(ha);
2033 QLNX_UNLOCK(ha);
2034
2035 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
2036
2037 return;
2038}
2039
2040static int
2041qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2042{
2043 struct ecore_filter_mcast *mcast;

--- 172 unchanged lines hidden (view full) ---

2216 struct ifreq *ifr = (struct ifreq *)data;
2217 struct ifaddr *ifa = (struct ifaddr *)data;
2218 qlnx_host_t *ha;
2219
2220 ha = (qlnx_host_t *)ifp->if_softc;
2221
2222 switch (cmd) {
2223 case SIOCSIFADDR:
2224 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
2225 __func__, cmd));
2226
2227 if (ifa->ifa_addr->sa_family == AF_INET) {
2228 ifp->if_flags |= IFF_UP;
2229 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2230 QLNX_LOCK(ha);
2231 qlnx_init_locked(ha);
2232 QLNX_UNLOCK(ha);
2233 }
2234 QL_DPRINT4(ha, (ha->pci_dev,
2235 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2236 __func__, cmd,
2237 ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
2238
2239 arp_ifinit(ifp, ifa);
2240 } else {
2241 ether_ioctl(ifp, cmd, data);
2242 }
2243 break;
2244
2245 case SIOCSIFMTU:
2246 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
2247 __func__, cmd));
2248
2249 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2250 ret = EINVAL;
2251 } else {
2252 QLNX_LOCK(ha);
2253 ifp->if_mtu = ifr->ifr_mtu;
2254 ha->max_frame_size =
2255 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2256 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2257 qlnx_init_locked(ha);
2258 }
2259
2260 QLNX_UNLOCK(ha);
2261 }
2262
2263 break;
2264
2265 case SIOCSIFFLAGS:
2266 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
2267 __func__, cmd));
2268
2269 QLNX_LOCK(ha);
2270
2271 if (ifp->if_flags & IFF_UP) {
2272 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2273 if ((ifp->if_flags ^ ha->if_flags) &
2274 IFF_PROMISC) {
2275 ret = qlnx_set_promisc(ha);

--- 11 unchanged lines hidden (view full) ---

2287 qlnx_stop(ha);
2288 ha->if_flags = ifp->if_flags;
2289 }
2290
2291 QLNX_UNLOCK(ha);
2292 break;
2293
2294 case SIOCADDMULTI:
2295 QL_DPRINT4(ha, (ha->pci_dev,
2296 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
2297
2298 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2299 if (qlnx_set_multi(ha, 1))
2300 ret = EINVAL;
2301 }
2302 break;
2303
2304 case SIOCDELMULTI:
2305 QL_DPRINT4(ha, (ha->pci_dev,
2306 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
2307
2308 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2309 if (qlnx_set_multi(ha, 0))
2310 ret = EINVAL;
2311 }
2312 break;
2313
2314 case SIOCSIFMEDIA:
2315 case SIOCGIFMEDIA:
2316 QL_DPRINT4(ha, (ha->pci_dev,
2317 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
2318 __func__, cmd));
2319 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2320 break;
2321
2322 case SIOCSIFCAP:
2323
2324 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2325
2326 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
2327 __func__, cmd));
2328
2329 if (mask & IFCAP_HWCSUM)
2330 ifp->if_capenable ^= IFCAP_HWCSUM;
2331 if (mask & IFCAP_TSO4)
2332 ifp->if_capenable ^= IFCAP_TSO4;
2333 if (mask & IFCAP_TSO6)
2334 ifp->if_capenable ^= IFCAP_TSO6;
2335 if (mask & IFCAP_VLAN_HWTAGGING)

--- 26 unchanged lines hidden (view full) ---

2362 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) {
2363 ret = EINVAL;
2364 break;
2365 }
2366
2367 p_ptt = ecore_ptt_acquire(p_hwfn);
2368
2369 if (!p_ptt) {
2370 QL_DPRINT1(ha, (ha->pci_dev, "%s :"
2371 " ecore_ptt_acquire failed\n", __func__));
2372 ret = -1;
2373 break;
2374 }
2375
2376 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt,
2377 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2378 i2c.len, &i2c.data[0]);
2379
2380 ecore_ptt_release(p_hwfn, p_ptt);
2381
2382 if (ret) {
2383 ret = -1;
2384 break;
2385 }
2386
2387 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2388
2389 QL_DPRINT8(ha, (ha->pci_dev, "SIOCGI2C copyout ret = %d"
2390 " len = %d addr = 0x%02x offset = 0x%04x"
2391 " data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x"
2392 " 0x%02x 0x%02x 0x%02x\n",
2393 ret, i2c.len, i2c.dev_addr, i2c.offset,
2394 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3],
2395 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]));
2396 break;
2397 }
2398#endif /* #if (__FreeBSD_version >= 1100101) */
2399
2400 default:
2401 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
2402 __func__, cmd));
2403 ret = ether_ioctl(ifp, cmd, data);
2404 break;
2405 }
2406
2407 return (ret);
2408}
2409
2410static int
2411qlnx_media_change(struct ifnet *ifp)
2412{
2413 qlnx_host_t *ha;
2414 struct ifmedia *ifm;
2415 int ret = 0;
2416
2417 ha = (qlnx_host_t *)ifp->if_softc;
2418
2419 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2420
2421 ifm = &ha->media;
2422
2423 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2424 ret = EINVAL;
2425
2426 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
2427
2428 return (ret);
2429}
2430
2431static void
2432qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2433{
2434 qlnx_host_t *ha;
2435
2436 ha = (qlnx_host_t *)ifp->if_softc;
2437
2438 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2439
2440 ifmr->ifm_status = IFM_AVALID;
2441 ifmr->ifm_active = IFM_ETHER;
2442
2443 if (ha->link_up) {
2444 ifmr->ifm_status |= IFM_ACTIVE;
2445 ifmr->ifm_active |=
2446 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2447
2448 if (ha->if_link.link_partner_caps &
2449 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause))
2450 ifmr->ifm_active |=
2451 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
2452 }
2453
2454 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,
2455 (ha->link_up ? "link_up" : "link_down")));
2456
2457 return;
2458}
2459
2460
2461static void
2462qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2463 struct qlnx_tx_queue *txq)

--- 9 unchanged lines hidden (view full) ---

2473 idx = txq->sw_tx_cons;
2474 mp = txq->sw_tx_ring[idx].mp;
2475 map = txq->sw_tx_ring[idx].map;
2476
2477 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2478
2479 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2480
2481 QL_DPRINT1(ha, (ha->pci_dev, "%s: (mp == NULL) "
2482 " tx_idx = 0x%x"
2483 " ecore_prod_idx = 0x%x"
2484 " ecore_cons_idx = 0x%x"
2485 " hw_bd_cons = 0x%x"
2486 " txq_db_last = 0x%x"
2487 " elem_left = 0x%x\n",
2488 __func__,
2489 fp->rss_id,
2490 ecore_chain_get_prod_idx(&txq->tx_pbl),
2491 ecore_chain_get_cons_idx(&txq->tx_pbl),
2492 le16toh(*txq->hw_cons_ptr),
2493 txq->tx_db.raw,
2494 ecore_chain_get_elem_left(&txq->tx_pbl)));
2495
2496 fp->err_tx_free_pkt_null++;
2497
2498 //DEBUG
2499 qlnx_trigger_dump(ha);
2500
2501 return;
2502 } else {

--- 44 unchanged lines hidden (view full) ---

2547 } else {
2548 diff = hw_bd_cons - ecore_cons_idx;
2549 }
2550 if ((diff > TX_RING_SIZE) ||
2551 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2552
2553 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2554
2555 QL_DPRINT1(ha, (ha->pci_dev, "%s: (diff = 0x%x) "
2556 " tx_idx = 0x%x"
2557 " ecore_prod_idx = 0x%x"
2558 " ecore_cons_idx = 0x%x"
2559 " hw_bd_cons = 0x%x"
2560 " txq_db_last = 0x%x"
2561 " elem_left = 0x%x\n",
2562 __func__, diff,
2563 fp->rss_id,
2564 ecore_chain_get_prod_idx(&txq->tx_pbl),
2565 ecore_chain_get_cons_idx(&txq->tx_pbl),
2566 le16toh(*txq->hw_cons_ptr),
2567 txq->tx_db.raw,
2568 ecore_chain_get_elem_left(&txq->tx_pbl)));
2569
2570 fp->err_tx_cons_idx_conflict++;
2571
2572 //DEBUG
2573 qlnx_trigger_dump(ha);
2574 }
2575
2576 qlnx_free_tx_pkt(ha, fp, txq);

--- 5 unchanged lines hidden (view full) ---

2582
2583static int
2584qlnx_transmit(struct ifnet *ifp, struct mbuf *mp)
2585{
2586 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc;
2587 struct qlnx_fastpath *fp;
2588 int rss_id = 0, ret = 0;
2589
2590 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2591
2592#if __FreeBSD_version >= 1100000
2593 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
2594#else
2595 if (mp->m_flags & M_FLOWID)
2596#endif
2597 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
2598 ha->num_rss;

--- 11 unchanged lines hidden (view full) ---

2610
2611 if (fp->fp_taskqueue != NULL)
2612 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
2613
2614 ret = 0;
2615
2616qlnx_transmit_exit:
2617
2618 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
2619 return ret;
2620}
2621
2622static void
2623qlnx_qflush(struct ifnet *ifp)
2624{
2625 int rss_id;
2626 struct qlnx_fastpath *fp;
2627 struct mbuf *mp;
2628 qlnx_host_t *ha;
2629
2630 ha = (qlnx_host_t *)ifp->if_softc;
2631
2632 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
2633
2634 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
2635
2636 fp = &ha->fp_array[rss_id];
2637
2638 if (fp == NULL)
2639 continue;
2640
2641 if (fp->tx_br) {
2642 mtx_lock(&fp->tx_mtx);
2643
2644 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
2645 fp->tx_pkts_freed++;
2646 m_freem(mp);
2647 }
2648 mtx_unlock(&fp->tx_mtx);
2649 }
2650 }
2651 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
2652
2653 return;
2654}
2655
2656static void
2657qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
2658{
2659 struct ecore_dev *cdev;

--- 127 unchanged lines hidden (view full) ---

2787 struct eth_tx_2nd_bd *second_bd;
2788 struct eth_tx_3rd_bd *third_bd;
2789 struct eth_tx_bd *tx_data_bd;
2790
2791 int seg_idx = 0;
2792 uint32_t nbds_in_hdr = 0;
2793 uint32_t offset = 0;
2794
2795 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
2796
2797 if (!ha->link_up)
2798 return (-1);
2799
2800 first_bd = NULL;
2801 second_bd = NULL;
2802 third_bd = NULL;
2803 tx_data_bd = NULL;

--- 35 unchanged lines hidden (view full) ---

2839 if ((ret == EFBIG) ||
2840 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && (
2841 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
2842 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
2843 qlnx_tso_check(fp, segs, nsegs, offset))))) {
2844
2845 struct mbuf *m;
2846
2847 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
2848 m_head->m_pkthdr.len));
2849
2850 fp->tx_defrag++;
2851
2852 m = m_defrag(m_head, M_NOWAIT);
2853 if (m == NULL) {
2854 fp->err_tx_defrag++;
2855 fp->tx_pkts_freed++;
2856 m_freem(m_head);
2857 *m_headp = NULL;
2858 QL_DPRINT1(ha, (ha->pci_dev,
2859 "%s: m_defrag() = NULL [%d]\n",
2860 __func__, ret));
2861 return (ENOBUFS);
2862 }
2863
2864 m_head = m;
2865 *m_headp = m_head;
2866
2867 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
2868 segs, &nsegs, BUS_DMA_NOWAIT))) {
2869
2870 fp->err_tx_defrag_dmamap_load++;
2871
2872 QL_DPRINT1(ha, (ha->pci_dev,
2873 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
2874 __func__, ret, m_head->m_pkthdr.len));
2875
2876 fp->tx_pkts_freed++;
2877 m_freem(m_head);
2878 *m_headp = NULL;
2879
2880 return (ret);
2881 }
2882
2883 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) &&
2884 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
2885
2886 fp->err_tx_non_tso_max_seg++;
2887
2888 QL_DPRINT1(ha, (ha->pci_dev,
2889 "%s: (%d) nsegs too many for non-TSO[%d, %d]\n",
2890 __func__, ret, nsegs, m_head->m_pkthdr.len));
2891
2892 fp->tx_pkts_freed++;
2893 m_freem(m_head);
2894 *m_headp = NULL;
2895
2896 return (ret);
2897 }
2898 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
2899 offset = qlnx_tcp_offset(ha, m_head);
2900
2901 } else if (ret) {
2902
2903 fp->err_tx_dmamap_load++;
2904
2905 QL_DPRINT1(ha, (ha->pci_dev,
2906 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
2907 __func__, ret, m_head->m_pkthdr.len));
2908
2909 fp->tx_pkts_freed++;
2910 m_freem(m_head);
2911 *m_headp = NULL;
2912 return (ret);
2913 }
2914
2915 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
2916
2917 if (ha->dbg_trace_tso_pkt_len) {
2918 if (nsegs < QLNX_FP_MAX_SEGS)
2919 fp->tx_pkts[(nsegs - 1)]++;
2920 else
2921 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
2922 }
2923
2924 if ((nsegs + QLNX_TX_ELEM_RESERVE) >
2925 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
2926
2927 QL_DPRINT1(ha, (ha->pci_dev, "%s: (%d, 0x%x) insuffient BDs"
2928 "in chain[%d] trying to free packets\n",
2929 __func__, nsegs, elem_left, fp->rss_id));
2930
2931 fp->tx_nsegs_gt_elem_left++;
2932
2933 (void)qlnx_tx_int(ha, fp, txq);
2934
2935 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left =
2936 ecore_chain_get_elem_left(&txq->tx_pbl))) {
2937
2938 QL_DPRINT1(ha, (ha->pci_dev,
2939 "%s: (%d, 0x%x) insuffient BDs in chain[%d]\n",
2940 __func__, nsegs, elem_left, fp->rss_id));
2941
2942 fp->err_tx_nsegs_gt_elem_left++;
2943 fp->tx_ring_full = 1;
2944 ha->storm_stats_enable = 1;
2945 return (ENOBUFS);
2946 }
2947 }
2948

--- 211 unchanged lines hidden (view full) ---

3160 txq->sw_tx_ring[idx].nsegs = nsegs;
3161 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3162
3163 txq->tx_db.data.bd_prod =
3164 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3165
3166 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3167
3168 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
3169 return (0);
3170}
3171
3172static void
3173qlnx_stop(qlnx_host_t *ha)
3174{
3175 struct ifnet *ifp = ha->ifp;
3176 device_t dev;
3177 int i;
3178
3179 dev = ha->pci_dev;
3180
3181 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
3182
3183 /*
3184 * We simply lock and unlock each fp->tx_mtx to
3185 * propagate the if_drv_flags
3186 * state to each tx thread
3187 */
3188 if (ha->state == QLNX_STATE_OPEN) {
3189 for (i = 0; i < ha->num_rss; i++) {
3190 struct qlnx_fastpath *fp = &ha->fp_array[i];
3191
3192 mtx_lock(&fp->tx_mtx);
3193 mtx_unlock(&fp->tx_mtx);
3194
3195 if (fp->fp_taskqueue != NULL)

--- 76 unchanged lines hidden (view full) ---

3272 while (len) {
3273
3274 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3275
3276 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3277 mp = sw_rx_data->data;
3278
3279 if (mp == NULL) {
3280 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n",
3281 __func__));
3282 fp->err_rx_mp_null++;
3283 rxq->sw_rx_cons =
3284 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3285
3286 if (mpf != NULL)
3287 m_freem(mpf);
3288
3289 return (-1);
3290 }
3291 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3292 BUS_DMASYNC_POSTREAD);
3293
3294 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3295
3296 QL_DPRINT1(ha, (ha->pci_dev,
3297 "%s: New buffer allocation failed, dropping"
3298 " incoming packet and reusing its buffer\n",
3299 __func__));
3300
3301 qlnx_reuse_rx_data(rxq);
3302 fp->err_rx_alloc_errors++;
3303
3304 if (mpf != NULL)
3305 m_freem(mpf);
3306
3307 return (-1);

--- 43 unchanged lines hidden (view full) ---

3351 device_t dev;
3352#if __FreeBSD_version >= 1100000
3353 uint8_t hash_type;
3354#endif /* #if __FreeBSD_version >= 1100000 */
3355
3356 dev = ha->pci_dev;
3357 agg_index = cqe->tpa_agg_index;
3358
3359 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3360 "\t type = 0x%x\n"
3361 "\t bitfields = 0x%x\n"
3362 "\t seg_len = 0x%x\n"
3363 "\t pars_flags = 0x%x\n"
3364 "\t vlan_tag = 0x%x\n"
3365 "\t rss_hash = 0x%x\n"
3366 "\t len_on_first_bd = 0x%x\n"
3367 "\t placement_offset = 0x%x\n"
3368 "\t tpa_agg_index = 0x%x\n"
3369 "\t header_len = 0x%x\n"
3370 "\t ext_bd_len_list[0] = 0x%x\n"
3371 "\t ext_bd_len_list[1] = 0x%x\n"
3372 "\t ext_bd_len_list[2] = 0x%x\n"
3373 "\t ext_bd_len_list[3] = 0x%x\n"
3374 "\t ext_bd_len_list[4] = 0x%x\n",
3375 __func__, fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3376 cqe->pars_flags.flags, cqe->vlan_tag,
3377 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3378 cqe->tpa_agg_index, cqe->header_len,
3379 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3380 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3381 cqe->ext_bd_len_list[4]));
3382
3383 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3384 fp->err_rx_tpa_invalid_agg_num++;
3385 return;
3386 }
3387
3388 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3389 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3390 mp = sw_rx_data->data;
3391
3392 QL_DPRINT7(ha, (dev, "%s[%d]: mp = %p \n ", __func__, fp->rss_id, mp));
3393
3394 if (mp == NULL) {
3395 QL_DPRINT7(ha, (dev, "%s[%d]: mp = NULL\n", __func__,
3396 fp->rss_id));
3397 fp->err_rx_mp_null++;
3398 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3399
3400 return;
3401 }
3402
3403 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3404
3405 QL_DPRINT7(ha, (dev, "%s[%d]: CQE in CONS = %u has error,"
3406 " flags = %x, dropping incoming packet\n", __func__,
3407 fp->rss_id, rxq->sw_rx_cons,
3408 le16toh(cqe->pars_flags.flags)));
3409
3410 fp->err_rx_hw_errors++;
3411
3412 qlnx_reuse_rx_data(rxq);
3413
3414 QLNX_INC_IERRORS(ifp);
3415
3416 return;
3417 }
3418
3419 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3420
3421 QL_DPRINT7(ha, (dev, "%s[%d]: New buffer allocation failed,"
3422 " dropping incoming packet and reusing its buffer\n",
3423 __func__, fp->rss_id));
3424
3425 fp->err_rx_alloc_errors++;
3426 QLNX_INC_IQDROPS(ifp);
3427
3428 /*
3429 * Load the tpa mbuf into the rx ring and save the
3430 * posted mbuf
3431 */

--- 35 unchanged lines hidden (view full) ---

3467 }
3468
3469 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
3470 return;
3471 }
3472
3473 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3474
3475 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state,"
3476 " dropping incoming packet and reusing its buffer\n",
3477 __func__, fp->rss_id));
3478
3479 QLNX_INC_IQDROPS(ifp);
3480
3481 /* if we already have mbuf head in aggregation free it */
3482 if (rxq->tpa_info[agg_index].mpf) {
3483 m_freem(rxq->tpa_info[agg_index].mpf);
3484 rxq->tpa_info[agg_index].mpl = NULL;
3485 }

--- 20 unchanged lines hidden (view full) ---

3506 * first process the ext_bd_len_list
3507 * if this fails then we simply drop the packet
3508 */
3509 ecore_chain_consume(&rxq->rx_bd_ring);
3510 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3511
3512 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
3513
3514 QL_DPRINT7(ha, (dev, "%s[%d]: 4\n ", __func__, fp->rss_id));
3515
3516 if (cqe->ext_bd_len_list[i] == 0)
3517 break;
3518
3519 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3520 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3521 BUS_DMASYNC_POSTREAD);
3522
3523 mpc = sw_rx_data->data;
3524
3525 if (mpc == NULL) {
3526 QL_DPRINT7(ha, (ha->pci_dev, "%s[%d]: mpc = NULL\n",
3527 __func__, fp->rss_id));
3528 fp->err_rx_mp_null++;
3529 if (mpf != NULL)
3530 m_freem(mpf);
3531 mpf = mpl = NULL;
3532 rxq->tpa_info[agg_index].agg_state =
3533 QLNX_AGG_STATE_ERROR;
3534 ecore_chain_consume(&rxq->rx_bd_ring);
3535 rxq->sw_rx_cons =
3536 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3537 continue;
3538 }
3539
3540 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3541 QL_DPRINT7(ha, (dev,
3542 "%s[%d]: New buffer allocation failed, dropping"
3543 " incoming packet and reusing its buffer\n",
3544 __func__, fp->rss_id));
3545
3546 qlnx_reuse_rx_data(rxq);
3547
3548 if (mpf != NULL)
3549 m_freem(mpf);
3550 mpf = mpl = NULL;
3551
3552 rxq->tpa_info[agg_index].agg_state =

--- 21 unchanged lines hidden (view full) ---

3574
3575 ecore_chain_consume(&rxq->rx_bd_ring);
3576 rxq->sw_rx_cons =
3577 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3578 }
3579
3580 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
3581
3582 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state,"
3583 " dropping incoming packet and reusing its buffer\n",
3584 __func__, fp->rss_id));
3585
3586 QLNX_INC_IQDROPS(ifp);
3587
3588 rxq->tpa_info[agg_index].mpf = mp;
3589 rxq->tpa_info[agg_index].mpl = NULL;
3590
3591 return;
3592 }

--- 63 unchanged lines hidden (view full) ---

3656
3657 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
3658 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
3659 mp->m_flags |= M_VLANTAG;
3660 }
3661
3662 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
3663
3664 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n" "\tagg_state = %d\n"
3665 "\t mpf = %p mpl = %p\n", __func__, fp->rss_id,
3666 rxq->tpa_info[agg_index].agg_state,
3667 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl));
3668
3669 return;
3670}
3671
3672static void
3673qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3674 struct qlnx_rx_queue *rxq,
3675 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
3676{
3677 struct sw_rx_data *sw_rx_data;
3678 int i;
3679 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL;
3680 struct mbuf *mp;
3681 uint32_t agg_index;
3682 device_t dev;
3683
3684 dev = ha->pci_dev;
3685
3686 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3687 "\t type = 0x%x\n"
3688 "\t tpa_agg_index = 0x%x\n"
3689 "\t len_list[0] = 0x%x\n"
3690 "\t len_list[1] = 0x%x\n"
3691 "\t len_list[2] = 0x%x\n"
3692 "\t len_list[3] = 0x%x\n"
3693 "\t len_list[4] = 0x%x\n"
3694 "\t len_list[5] = 0x%x\n",
3695 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index,
3696 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3697 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]));
3698
3699 agg_index = cqe->tpa_agg_index;
3700
3701 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3702 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id));
3703 fp->err_rx_tpa_invalid_agg_num++;
3704 return;
3705 }
3706
3707
3708 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
3709
3710 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id));
3711
3712 if (cqe->len_list[i] == 0)
3713 break;
3714
3715 if (rxq->tpa_info[agg_index].agg_state !=
3716 QLNX_AGG_STATE_START) {
3717 qlnx_reuse_rx_data(rxq);
3718 continue;
3719 }
3720
3721 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3722 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3723 BUS_DMASYNC_POSTREAD);
3724
3725 mpc = sw_rx_data->data;
3726
3727 if (mpc == NULL) {
3728
3729 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n",
3730 __func__, fp->rss_id));
3731
3732 fp->err_rx_mp_null++;
3733 if (mpf != NULL)
3734 m_freem(mpf);
3735 mpf = mpl = NULL;
3736 rxq->tpa_info[agg_index].agg_state =
3737 QLNX_AGG_STATE_ERROR;
3738 ecore_chain_consume(&rxq->rx_bd_ring);
3739 rxq->sw_rx_cons =
3740 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3741 continue;
3742 }
3743
3744 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3745
3746 QL_DPRINT7(ha, (dev,
3747 "%s[%d]: New buffer allocation failed, dropping"
3748 " incoming packet and reusing its buffer\n",
3749 __func__, fp->rss_id));
3750
3751 qlnx_reuse_rx_data(rxq);
3752
3753 if (mpf != NULL)
3754 m_freem(mpf);
3755 mpf = mpl = NULL;
3756
3757 rxq->tpa_info[agg_index].agg_state =

--- 19 unchanged lines hidden (view full) ---

3777 mpl = mpc;
3778 }
3779
3780 ecore_chain_consume(&rxq->rx_bd_ring);
3781 rxq->sw_rx_cons =
3782 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3783 }
3784
3785 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n" "\tmpf = %p mpl = %p\n",
3786 __func__, fp->rss_id, mpf, mpl));
3787
3788 if (mpf != NULL) {
3789 mp = rxq->tpa_info[agg_index].mpl;
3790 mp->m_len = ha->rx_buf_size;
3791 mp->m_next = mpf;
3792 rxq->tpa_info[agg_index].mpl = mpl;
3793 }
3794

--- 11 unchanged lines hidden (view full) ---

3806 struct mbuf *mp;
3807 uint32_t agg_index;
3808 uint32_t len = 0;
3809 struct ifnet *ifp = ha->ifp;
3810 device_t dev;
3811
3812 dev = ha->pci_dev;
3813
3814 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n "
3815 "\t type = 0x%x\n"
3816 "\t tpa_agg_index = 0x%x\n"
3817 "\t total_packet_len = 0x%x\n"
3818 "\t num_of_bds = 0x%x\n"
3819 "\t end_reason = 0x%x\n"
3820 "\t num_of_coalesced_segs = 0x%x\n"
3821 "\t ts_delta = 0x%x\n"
3822 "\t len_list[0] = 0x%x\n"
3823 "\t len_list[1] = 0x%x\n"
3824 "\t len_list[2] = 0x%x\n"
3825 "\t len_list[3] = 0x%x\n",
3826 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index,
3827 cqe->total_packet_len, cqe->num_of_bds,
3828 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
3829 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
3830 cqe->len_list[3]));
3831
3832 agg_index = cqe->tpa_agg_index;
3833
3834 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) {
3835
3836 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id));
3837
3838 fp->err_rx_tpa_invalid_agg_num++;
3839 return (0);
3840 }
3841
3842
3843 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
3844
3845 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id));
3846
3847 if (cqe->len_list[i] == 0)
3848 break;
3849
3850 if (rxq->tpa_info[agg_index].agg_state !=
3851 QLNX_AGG_STATE_START) {
3852
3853 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n ", __func__,
3854 fp->rss_id));
3855
3856 qlnx_reuse_rx_data(rxq);
3857 continue;
3858 }
3859
3860 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3861 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3862 BUS_DMASYNC_POSTREAD);
3863
3864 mpc = sw_rx_data->data;
3865
3866 if (mpc == NULL) {
3867
3868 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n",
3869 __func__, fp->rss_id));
3870
3871 fp->err_rx_mp_null++;
3872 if (mpf != NULL)
3873 m_freem(mpf);
3874 mpf = mpl = NULL;
3875 rxq->tpa_info[agg_index].agg_state =
3876 QLNX_AGG_STATE_ERROR;
3877 ecore_chain_consume(&rxq->rx_bd_ring);
3878 rxq->sw_rx_cons =
3879 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3880 continue;
3881 }
3882
3883 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3884 QL_DPRINT7(ha, (dev,
3885 "%s[%d]: New buffer allocation failed, dropping"
3886 " incoming packet and reusing its buffer\n",
3887 __func__, fp->rss_id));
3888
3889 qlnx_reuse_rx_data(rxq);
3890
3891 if (mpf != NULL)
3892 m_freem(mpf);
3893 mpf = mpl = NULL;
3894
3895 rxq->tpa_info[agg_index].agg_state =

--- 19 unchanged lines hidden (view full) ---

3915 mpl = mpc;
3916 }
3917
3918 ecore_chain_consume(&rxq->rx_bd_ring);
3919 rxq->sw_rx_cons =
3920 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3921 }
3922
3923 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n ", __func__, fp->rss_id));
3924
3925 if (mpf != NULL) {
3926
3927 QL_DPRINT7(ha, (dev, "%s[%d]: 6\n ", __func__, fp->rss_id));
3928
3929 mp = rxq->tpa_info[agg_index].mpl;
3930 mp->m_len = ha->rx_buf_size;
3931 mp->m_next = mpf;
3932 }
3933
3934 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
3935
3936 QL_DPRINT7(ha, (dev, "%s[%d]: 7\n ", __func__, fp->rss_id));
3937
3938 if (rxq->tpa_info[agg_index].mpf != NULL)
3939 m_freem(rxq->tpa_info[agg_index].mpf);
3940 rxq->tpa_info[agg_index].mpf = NULL;
3941 rxq->tpa_info[agg_index].mpl = NULL;
3942 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3943 return (0);
3944 }

--- 16 unchanged lines hidden (view full) ---

3961 mpl = rxq->tpa_info[agg_index].mpl;
3962 mpl->m_len += (cqe->total_packet_len - len);
3963 }
3964 }
3965
3966 QLNX_INC_IPACKETS(ifp);
3967 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
3968
3969 QL_DPRINT7(ha, (dev, "%s[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n "
3970 "m_len = 0x%x m_pkthdr_len = 0x%x\n",
3971 __func__, fp->rss_id, mp->m_pkthdr.csum_data,
3972 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len));
3973
3974 (*ifp->if_input)(ifp, mp);
3975
3976 rxq->tpa_info[agg_index].mpf = NULL;
3977 rxq->tpa_info[agg_index].mpl = NULL;
3978 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
3979
3980 return (cqe->num_of_coalesced_segs);

--- 41 unchanged lines hidden (view full) ---

4022#endif /* #if __FreeBSD_version >= 1100000 */
4023
4024 /* Get the CQE from the completion ring */
4025 cqe = (union eth_rx_cqe *)
4026 ecore_chain_consume(&rxq->rx_comp_ring);
4027 cqe_type = cqe->fast_path_regular.type;
4028
4029 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
4030 QL_DPRINT3(ha, (ha->pci_dev, "Got a slowath CQE\n"));
4031
4032 ecore_eth_cqe_completion(p_hwfn,
4033 (struct eth_slow_path_rx_cqe *)cqe);
4034 goto next_cqe;
4035 }
4036
4037 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
4038

--- 24 unchanged lines hidden (view full) ---

4063 goto next_cqe;
4064 }
4065
4066 /* Get the data from the SW ring */
4067 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4068 mp = sw_rx_data->data;
4069
4070 if (mp == NULL) {
4071 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n",
4072 __func__));
4073 fp->err_rx_mp_null++;
4074 rxq->sw_rx_cons =
4075 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4076 goto next_cqe;
4077 }
4078 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4079 BUS_DMASYNC_POSTREAD);
4080
4081 /* non GRO */
4082 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4083 len = le16toh(fp_cqe->pkt_len);
4084 pad = fp_cqe->placement_offset;
4085
4086 QL_DPRINT3(ha,
4087 (ha->pci_dev, "CQE type = %x, flags = %x, vlan = %x,"
4088 " len %u, parsing flags = %d pad = %d\n",
4089 cqe_type, fp_cqe->bitfields,
4090 le16toh(fp_cqe->vlan_tag),
4091 len, le16toh(fp_cqe->pars_flags.flags), pad));
4092
4093 data = mtod(mp, uint8_t *);
4094 data = data + pad;
4095
4096 if (0)
4097 qlnx_dump_buf8(ha, __func__, data, len);
4098
4099 /* For every Rx BD consumed, we allocate a new BD so the BD ring
4100 * is always with a fixed size. If allocation fails, we take the
4101 * consumed BD and return it to the ring in the PROD position.
4102 * The packet that was received on that BD will be dropped (and
4103 * not passed to the upper stack).
4104 */
4105 /* If this is an error packet then drop it */
4106 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4107 CQE_FLAGS_ERR) {
4108
4109 QL_DPRINT1(ha, (ha->pci_dev,
4110 "CQE in CONS = %u has error, flags = %x,"
4111 " dropping incoming packet\n", sw_comp_cons,
4112 le16toh(cqe->fast_path_regular.pars_flags.flags)));
4113
4114 fp->err_rx_hw_errors++;
4115
4116 qlnx_reuse_rx_data(rxq);
4117
4118 QLNX_INC_IERRORS(ifp);
4119
4120 goto next_cqe;
4121 }
4122
4123 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4124
4125 QL_DPRINT1(ha, (ha->pci_dev,
4126 "New buffer allocation failed, dropping"
4127 " incoming packet and reusing its buffer\n"));
4128
4129 qlnx_reuse_rx_data(rxq);
4130
4131 fp->err_rx_alloc_errors++;
4132
4133 QLNX_INC_IQDROPS(ifp);
4134
4135 goto next_cqe;
4136 }
4137
4138 ecore_chain_consume(&rxq->rx_bd_ring);
4139
4140 len_on_first_bd = fp_cqe->len_on_first_bd;
4141 m_adj(mp, pad);
4142 mp->m_pkthdr.len = len;
4143
4144 QL_DPRINT1(ha,
4145 (ha->pci_dev, "%s: len = %d len_on_first_bd = %d\n",
4146 __func__, len, len_on_first_bd));
4147
4148 if ((len > 60 ) && (len > len_on_first_bd)) {
4149
4150 mp->m_len = len_on_first_bd;
4151
4152 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4153 (len - len_on_first_bd)) != 0) {
4154
4155 m_freem(mp);

--- 134 unchanged lines hidden (view full) ---

4290
4291 if (ha->state != QLNX_STATE_OPEN) {
4292 return;
4293 }
4294
4295 idx = ivec->rss_idx;
4296
4297 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4298 QL_DPRINT1(ha, (ha->pci_dev, "%s: illegal interrupt[%d]\n",
4299 __func__, idx));
4300 ha->err_illegal_intr++;
4301 return;
4302 }
4303 fp = &ha->fp_array[idx];
4304
4305 if (fp == NULL) {
4306 QL_DPRINT1(ha, (ha->pci_dev, "%s: fp_array[%d] NULL\n",
4307 __func__, idx));
4308 ha->err_fp_null++;
4309 } else {
4310 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4311 if (fp->fp_taskqueue != NULL)
4312 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
4313 }
4314
4315 return;

--- 11 unchanged lines hidden (view full) ---

4327 qlnx_host_t *ha;
4328
4329 p_hwfn = arg;
4330
4331 ha = (qlnx_host_t *)p_hwfn->p_dev;
4332
4333 ha->sp_interrupts++;
4334
4335 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
4336
4337 ecore_int_sp_dpc(p_hwfn);
4338
4339 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
4340
4341 return;
4342}
4343
4344/*****************************************************************************
4345 * Support Functions for DMA'able Memory
4346 *****************************************************************************/
4347

--- 31 unchanged lines hidden (view full) ---

4379 dma_buf->size, /* maxsize */
4380 1, /* nsegments */
4381 dma_buf->size, /* maxsegsize */
4382 0, /* flags */
4383 NULL, NULL, /* lockfunc, lockarg */
4384 &dma_buf->dma_tag);
4385
4386 if (ret) {
4387 QL_DPRINT1(ha,
4388 (dev, "%s: could not create dma tag\n", __func__));
4389 goto qlnx_alloc_dmabuf_exit;
4390 }
4391 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4392 (void **)&dma_buf->dma_b,
4393 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
4394 &dma_buf->dma_map);
4395 if (ret) {
4396 bus_dma_tag_destroy(dma_buf->dma_tag);
4397 QL_DPRINT1(ha,
4398 (dev, "%s: bus_dmamem_alloc failed\n", __func__));
4399 goto qlnx_alloc_dmabuf_exit;
4400 }
4401
4402 ret = bus_dmamap_load(dma_buf->dma_tag,
4403 dma_buf->dma_map,
4404 dma_buf->dma_b,
4405 dma_buf->size,
4406 qlnx_dmamap_callback,

--- 45 unchanged lines hidden (view full) ---

4452 return (NULL);
4453 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size);
4454
4455 *phys = dma_buf.dma_addr;
4456
4457 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size);
4458
4459 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t));
4460
4461 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__,
4462 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag,
4463 dma_buf.dma_b, (void *)dma_buf.dma_addr, size));
4464
4465 return (dma_buf.dma_b);
4466}
4467
4468void
4469qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys,
4470 uint32_t size)
4471{
4472 qlnx_dma_t dma_buf, *dma_p;

--- 4 unchanged lines hidden (view full) ---

4477 dev = ha->pci_dev;
4478
4479 if (v_addr == NULL)
4480 return;
4481
4482 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4483
4484 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size);
4485
4486 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__,
4487 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
4488 dma_p->dma_b, (void *)dma_p->dma_addr, size));
4489
4490 dma_buf = *dma_p;
4491
4492 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf);
4493 return;
4494}
4495
4496static int
4497qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)

--- 15 unchanged lines hidden (view full) ---

4513 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
4514 0, /* nsegments */
4515 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
4516 0, /* flags */
4517 NULL, NULL, /* lockfunc, lockarg */
4518 &ha->parent_tag);
4519
4520 if (ret) {
4521 QL_DPRINT1(ha, (dev, "%s: could not create parent dma tag\n",
4522 __func__));
4523 return (-1);
4524 }
4525
4526 ha->flags.parent_tag = 1;
4527
4528 return (0);
4529}
4530

--- 18 unchanged lines hidden (view full) ---

4549 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */
4550 QLNX_MAX_SEGMENTS, /* nsegments */
4551 (PAGE_SIZE * 4), /* maxsegsize */
4552 BUS_DMA_ALLOCNOW, /* flags */
4553 NULL, /* lockfunc */
4554 NULL, /* lockfuncarg */
4555 &ha->tx_tag)) {
4556
4557 QL_DPRINT1(ha, (ha->pci_dev, "%s: tx_tag alloc failed\n",
4558 __func__));
4559 return (-1);
4560 }
4561
4562 return (0);
4563}
4564
4565static void
4566qlnx_free_tx_dma_tag(qlnx_host_t *ha)

--- 16 unchanged lines hidden (view full) ---

4583 MJUM9BYTES, /* maxsize */
4584 1, /* nsegments */
4585 MJUM9BYTES, /* maxsegsize */
4586 BUS_DMA_ALLOCNOW, /* flags */
4587 NULL, /* lockfunc */
4588 NULL, /* lockfuncarg */
4589 &ha->rx_tag)) {
4590
4591 QL_DPRINT1(ha, (ha->pci_dev, "%s: rx_tag alloc failed\n",
4592 __func__));
4593
4594 return (-1);
4595 }
4596 return (0);
4597}
4598
4599static void
4600qlnx_free_rx_dma_tag(qlnx_host_t *ha)

--- 73 unchanged lines hidden (view full) ---

4674 pci_reg, reg_value, 4);
4675 return;
4676}
4677
4678
4679int
4680qlnx_pci_find_capability(void *ecore_dev, int cap)
4681{
4682 int reg;
4683
4684 if (pci_find_cap(((qlnx_host_t *)ecore_dev)->pci_dev, PCIY_EXPRESS,
4685 &reg) == 0)
4686 return reg;
4687 else {
4688 QL_DPRINT1(((qlnx_host_t *)ecore_dev),
4689 (((qlnx_host_t *)ecore_dev)->pci_dev,
4690 "%s: failed\n", __func__));
4691 return 0;
4692 }
4693}
4694
4695uint32_t
4696qlnx_reg_rd32(void *hwfn, uint32_t reg_addr)
4697{
4698 uint32_t data32;

--- 370 unchanged lines hidden (view full) ---

5069}
5070
5071void
5072qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats)
5073{
5074 enum ecore_mcp_protocol_type type;
5075 union ecore_mcp_protocol_stats *stats;
5076 struct ecore_eth_stats eth_stats;
5077 device_t dev;
5078
5079 dev = ((qlnx_host_t *)cdev)->pci_dev;
5080 stats = proto_stats;
5081 type = proto_type;
5082
5083 switch (type) {
5084 case ECORE_MCP_LAN_STATS:
5085 ecore_get_vport_stats((struct ecore_dev *)cdev, &eth_stats);
5086 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5087 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5088 stats->lan_stats.fcs_err = -1;
5089 break;
5090
5091 default:
5092 ((qlnx_host_t *)cdev)->err_get_proto_invalid_type++;
5093
5094 QL_DPRINT1(((qlnx_host_t *)cdev),
5095 (dev, "%s: invalid protocol type 0x%x\n", __func__,
5096 type));
5097 break;
5098 }
5099 return;
5100}
5101
5102static int
5103qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5104{
5105 struct ecore_hwfn *p_hwfn;
5106 struct ecore_ptt *p_ptt;
5107
5108 p_hwfn = &ha->cdev.hwfns[0];
5109 p_ptt = ecore_ptt_acquire(p_hwfn);
5110
5111 if (p_ptt == NULL) {
5112 QL_DPRINT1(ha, (ha->pci_dev,
5113 "%s : ecore_ptt_acquire failed\n", __func__));
5114 return (-1);
5115 }
5116 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL);
5117
5118 ecore_ptt_release(p_hwfn, p_ptt);
5119
5120 return (0);
5121}
5122
5123static int
5124qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5125{
5126 struct ecore_hwfn *p_hwfn;
5127 struct ecore_ptt *p_ptt;
5128
5129 p_hwfn = &ha->cdev.hwfns[0];
5130 p_ptt = ecore_ptt_acquire(p_hwfn);
5131
5132 if (p_ptt == NULL) {
5133 QL_DPRINT1(ha, (ha->pci_dev,
5134 "%s : ecore_ptt_acquire failed\n", __func__));
5135 return (-1);
5136 }
5137 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size);
5138
5139 ecore_ptt_release(p_hwfn, p_ptt);
5140
5141 return (0);
5142}

--- 86 unchanged lines hidden (view full) ---

5229 struct ecore_hwfn *p_hwfn;
5230 int hwfn_index, rc;
5231 u16 rel_sb_id;
5232
5233 hwfn_index = sb_id % cdev->num_hwfns;
5234 p_hwfn = &cdev->hwfns[hwfn_index];
5235 rel_sb_id = sb_id / cdev->num_hwfns;
5236
5237 QL_DPRINT2(((qlnx_host_t *)cdev), (((qlnx_host_t *)cdev)->pci_dev,
5238 "%s: hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x "
5239 "sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n",
5240 __func__, hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info,
5241 sb_virt_addr, (void *)sb_phy_addr));
5242
5243 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5244 sb_virt_addr, sb_phy_addr, rel_sb_id);
5245
5246 return rc;
5247}
5248
5249/* This function allocates fast-path status block memory */

--- 7 unchanged lines hidden (view full) ---

5257 struct ecore_dev *cdev;
5258
5259 cdev = &ha->cdev;
5260
5261 size = sizeof(*sb_virt);
5262 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size);
5263
5264 if (!sb_virt) {
5265 QL_DPRINT1(ha, (ha->pci_dev,
5266 "%s: Status block allocation failed\n", __func__));
5267 return -ENOMEM;
5268 }
5269
5270 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id);
5271 if (rc) {
5272 QL_DPRINT1(ha, (ha->pci_dev, "%s: failed\n", __func__));
5273 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size);
5274 }
5275
5276 return rc;
5277}
5278
5279static void
5280qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)

--- 79 unchanged lines hidden (view full) ---

5360
5361 cdev = &ha->cdev;
5362
5363 rx_buf_size = rxq->rx_buf_size;
5364
5365 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5366
5367 if (mp == NULL) {
5368 QL_DPRINT1(ha, (ha->pci_dev,
5369 "%s : Failed to allocate Rx data\n", __func__));
5370 return -ENOMEM;
5371 }
5372
5373 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5374
5375 map = (bus_dmamap_t)0;
5376
5377 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5378 BUS_DMA_NOWAIT);
5379 dma_addr = segs[0].ds_addr;
5380
5381 if (ret || !dma_addr || (nsegs != 1)) {
5382 m_freem(mp);
5383 QL_DPRINT1(ha, (ha->pci_dev,
5384 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5385 __func__, ret, (long long unsigned int)dma_addr,
5386 nsegs));
5387 return -ENOMEM;
5388 }
5389
5390 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
5391 sw_rx_data->data = mp;
5392 sw_rx_data->dma_addr = dma_addr;
5393 sw_rx_data->map = map;
5394

--- 18 unchanged lines hidden (view full) ---

5413 bus_dma_segment_t segs[1];
5414 int nsegs;
5415 int ret;
5416 struct sw_rx_data *rx_buf;
5417
5418 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size);
5419
5420 if (mp == NULL) {
5421 QL_DPRINT1(ha, (ha->pci_dev,
5422 "%s : Failed to allocate Rx data\n", __func__));
5423 return -ENOMEM;
5424 }
5425
5426 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
5427
5428 map = (bus_dmamap_t)0;
5429
5430 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
5431 BUS_DMA_NOWAIT);
5432 dma_addr = segs[0].ds_addr;
5433
5434 if (ret || !dma_addr || (nsegs != 1)) {
5435 m_freem(mp);
5436 QL_DPRINT1(ha, (ha->pci_dev,
5437 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
5438 __func__, ret, (long long unsigned int)dma_addr,
5439 nsegs));
5440 return -ENOMEM;
5441 }
5442
5443 rx_buf = &tpa->rx_buf;
5444
5445 memset(rx_buf, 0, sizeof (struct sw_rx_data));
5446
5447 rx_buf->data = mp;

--- 80 unchanged lines hidden (view full) ---

5528
5529 for (i = 0; i < rxq->num_rx_buffers; i++) {
5530 rc = qlnx_alloc_rx_buffer(ha, rxq);
5531 if (rc)
5532 break;
5533 }
5534 num_allocated = i;
5535 if (!num_allocated) {
5536 QL_DPRINT1(ha, (ha->pci_dev,
5537 "%s: Rx buffers allocation failed\n", __func__));
5538 goto err;
5539 } else if (num_allocated < rxq->num_rx_buffers) {
5540 QL_DPRINT1(ha, (ha->pci_dev,
5541 "%s: Allocated less buffers than"
5542 " desired (%d allocated)\n", __func__, num_allocated));
5543 }
5544
5545#ifdef QLNX_SOFT_LRO
5546
5547 {
5548 struct lro_ctrl *lro;
5549
5550 lro = &rxq->lro;
5551
5552#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO)
5553 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
5554 QL_DPRINT1(ha, (ha->pci_dev,
5555 "%s: tcp_lro_init[%d] failed\n",
5556 __func__, rxq->rxq_id));
5557 goto err;
5558 }
5559#else
5560 if (tcp_lro_init(lro)) {
5561 QL_DPRINT1(ha, (ha->pci_dev,
5562 "%s: tcp_lro_init[%d] failed\n",
5563 __func__, rxq->rxq_id));
5564 goto err;
5565 }
5566#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */
5567
5568 lro->ifp = ha->ifp;
5569 }
5570#endif /* #ifdef QLNX_SOFT_LRO */
5571 return 0;

--- 107 unchanged lines hidden (view full) ---

5679 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
5680 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
5681
5682 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
5683
5684 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
5685 M_NOWAIT, &fp->tx_mtx);
5686 if (fp->tx_br == NULL) {
5687 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
5688 " fp[%d, %d]\n", ha->dev_unit, fp->rss_id));
5689 return -ENOMEM;
5690 }
5691 return 0;
5692}
5693
5694static int
5695qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
5696{
5697 int rc, tc;

--- 90 unchanged lines hidden (view full) ---

5788 vport_start_params.tpa_mode =
5789 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE);
5790 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS;
5791
5792 vport_start_params.vport_id = vport_id;
5793 vport_start_params.mtu = mtu;
5794
5795
5796 QL_DPRINT2(ha, (ha->pci_dev, "%s: setting mtu to %d\n", __func__, mtu));
5797
5798 for_each_hwfn(cdev, i) {
5799 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5800
5801 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
5802 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5803
5804 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params);
5805
5806 if (rc) {
5807 QL_DPRINT1(ha, (ha->pci_dev,
5808 "%s: Failed to start VPORT V-PORT %d "
5809 "with MTU %d\n", __func__, vport_id, mtu));
5810 return -ENOMEM;
5811 }
5812
5813 ecore_hw_start_fastpath(p_hwfn);
5814
5815 QL_DPRINT2(ha, (ha->pci_dev,
5816 "%s: Started V-PORT %d with MTU %d\n",
5817 __func__, vport_id, mtu));
5818 }
5819 return 0;
5820}
5821
5822
5823static int
5824qlnx_update_vport(struct ecore_dev *cdev,
5825 struct qlnx_update_vport_params *params)

--- 21 unchanged lines hidden (view full) ---

5847 params->update_inner_vlan_removal_flg;
5848 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
5849
5850 sp_params.sge_tpa_params = params->sge_tpa_params;
5851
5852 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
5853 * We need to re-fix the rss values per engine for CMT.
5854 */
5855
5856 sp_params.rss_params = params->rss_params;
5857
5858 for_each_hwfn(cdev, i) {
5859
5860 p_hwfn = &cdev->hwfns[i];
5861
5862 if ((cdev->num_hwfns > 1) &&
5863 params->rss_params->update_rss_config &&
5864 params->rss_params->rss_enable) {

--- 5 unchanged lines hidden (view full) ---

5870 fp_index = ((cdev->num_hwfns * j) + i) %
5871 ha->num_rss;
5872
5873 fp = &ha->fp_array[fp_index];
5874 rss->rss_ind_table[j] = fp->rxq->handle;
5875 }
5876
5877 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) {
5878 QL_DPRINT3(ha, (ha->pci_dev,
5879 "%p %p %p %p %p %p %p %p \n",
5880 rss->rss_ind_table[j],
5881 rss->rss_ind_table[j+1],
5882 rss->rss_ind_table[j+2],
5883 rss->rss_ind_table[j+3],
5884 rss->rss_ind_table[j+4],
5885 rss->rss_ind_table[j+5],
5886 rss->rss_ind_table[j+6],
5887 rss->rss_ind_table[j+7]));
5888 j += 8;
5889 }
5890 }
5891
5892 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
5893 rc = ecore_sp_vport_update(p_hwfn, &sp_params,
5894 ECORE_SPQ_MODE_EBLOCK, NULL);
5895 if (rc) {
5896 QL_DPRINT1(ha, (ha->pci_dev,
5897 "%s:Failed to update VPORT\n", __func__));
5898 return rc;
5899 }
5900
5901 QL_DPRINT2(ha, (ha->pci_dev,
5902 "%s: Updated V-PORT %d: tx_active_flag %d,"
5903 "rx_active_flag %d [tx_update %d], [rx_update %d]\n",
5904 __func__,
5905 params->vport_id, params->vport_active_tx_flg,
5906 params->vport_active_rx_flg,
5907 params->update_vport_active_tx_flg,
5908 params->update_vport_active_rx_flg));
5909 }
5910
5911 return 0;
5912}
5913
5914static void
5915qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
5916{

--- 77 unchanged lines hidden (view full) ---

5994 struct ifnet *ifp;
5995 struct ecore_hwfn *p_hwfn;
5996 struct ecore_sge_tpa_params tpa_params;
5997 struct ecore_queue_start_common_params qparams;
5998 struct qlnx_fastpath *fp;
5999
6000 ifp = ha->ifp;
6001
6002 if (!ha->num_rss) {
6003 QL_DPRINT1(ha, (ha->pci_dev,
6004 "%s: Cannot update V-VPORT as active as there"
6005 " are no Rx queues\n", __func__));
6006 return -EINVAL;
6007 }
6008
6009#ifndef QLNX_SOFT_LRO
6010 hw_lro_enable = ifp->if_capenable & IFCAP_LRO;
6011#endif /* #ifndef QLNX_SOFT_LRO */
6012
6013 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg,
6014 vlan_removal_en, tx_switching, hw_lro_enable);
6015
6016 if (rc) {
6017 QL_DPRINT1(ha, (ha->pci_dev,
6018 "%s: Start V-PORT failed %d\n", __func__, rc));
6019 return rc;
6020 }
6021
6022 QL_DPRINT2(ha, (ha->pci_dev,
6023 "%s: Start vport ramrod passed,"
6024 " vport_id = %d, MTU = %d, vlan_removal_en = %d\n", __func__,
6025 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en));
6026
6027 for_each_rss(i) {
6028 struct ecore_rxq_start_ret_params rx_ret_params;
6029 struct ecore_txq_start_ret_params tx_ret_params;
6030
6031 fp = &ha->fp_array[i];
6032 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6033

--- 16 unchanged lines hidden (view full) ---

6050 fp->rxq->rx_bd_ring.p_phys_addr,
6051 /* cqe_pbl_addr */
6052 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6053 /* cqe_pbl_size */
6054 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6055 &rx_ret_params);
6056
6057 if (rc) {
6058 QL_DPRINT1(ha, (ha->pci_dev,
6059 "%s: Start RXQ #%d failed %d\n", __func__,
6060 i, rc));
6061 return rc;
6062 }
6063
6064 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6065 fp->rxq->handle = rx_ret_params.p_handle;
6066 fp->rxq->hw_cons_ptr =
6067 &fp->sb_info->sb_virt->pi_array[RX_PI];
6068

--- 17 unchanged lines hidden (view full) ---

6086 p_hwfn->hw_info.opaque_fid,
6087 &qparams, tc,
6088 /* bd_chain_phys_addr */
6089 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6090 ecore_chain_get_page_cnt(&txq->tx_pbl),
6091 &tx_ret_params);
6092
6093 if (rc) {
6094 QL_DPRINT1(ha, (ha->pci_dev,
6095 "%s: Start TXQ #%d failed %d\n",
6096 __func__, txq->index, rc));
6097 return rc;
6098 }
6099
6100 txq->doorbell_addr = tx_ret_params.p_doorbell;
6101 txq->handle = tx_ret_params.p_handle;
6102
6103 txq->hw_cons_ptr =
6104 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];

--- 63 unchanged lines hidden (view full) ---

6168 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2;
6169 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2;
6170
6171 vport_update_params.sge_tpa_params = &tpa_params;
6172 }
6173
6174 rc = qlnx_update_vport(cdev, &vport_update_params);
6175 if (rc) {
6176 QL_DPRINT1(ha, (ha->pci_dev,
6177 "%s: Update V-PORT failed %d\n", __func__, rc));
6178 return rc;
6179 }
6180
6181 return 0;
6182}
6183
6184static int
6185qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6186 struct qlnx_tx_queue *txq)
6187{
6188 uint16_t hw_bd_cons;
6189 uint16_t ecore_cons_idx;
6190
6191 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
6192
6193 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6194
6195 while (hw_bd_cons !=
6196 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6197
6198 mtx_lock(&fp->tx_mtx);
6199
6200 (void)qlnx_tx_int(ha, fp, txq);
6201
6202 mtx_unlock(&fp->tx_mtx);
6203
6204 qlnx_mdelay(__func__, 2);
6205
6206 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6207 }
6208
6209 QL_DPRINT2(ha, (ha->pci_dev, "%s[%d, %d]: done\n", __func__,
6210 fp->rss_id, txq->index));
6211
6212 return 0;
6213}
6214
6215static int
6216qlnx_stop_queues(qlnx_host_t *ha)
6217{
6218 struct qlnx_update_vport_params vport_update_params;

--- 13 unchanged lines hidden (view full) ---

6232 vport_update_params.update_vport_active_rx_flg = 1;
6233 vport_update_params.vport_active_rx_flg = 0;
6234 vport_update_params.rss_params = &ha->rss_params;
6235 vport_update_params.rss_params->update_rss_config = 0;
6236 vport_update_params.rss_params->rss_enable = 0;
6237 vport_update_params.update_inner_vlan_removal_flg = 0;
6238 vport_update_params.inner_vlan_removal_flg = 0;
6239
6240 rc = qlnx_update_vport(cdev, &vport_update_params);
6241 if (rc) {
6242 QL_DPRINT1(ha, (ha->pci_dev, "%s:Failed to update vport\n",
6243 __func__));
6244 return rc;
6245 }
6246
6247 /* Flush Tx queues. If needed, request drain from MCP */
6248 for_each_rss(i) {
6249 fp = &ha->fp_array[i];
6250
6251 for (tc = 0; tc < ha->num_tc; tc++) {

--- 16 unchanged lines hidden (view full) ---

6268 for (tc = 0; tc < ha->num_tc; tc++) {
6269 int tx_queue_id;
6270
6271 tx_queue_id = tc * ha->num_rss + i;
6272 rc = ecore_eth_tx_queue_stop(p_hwfn,
6273 fp->txq[tc]->handle);
6274
6275 if (rc) {
6276 QL_DPRINT1(ha, (ha->pci_dev,
6277 "%s: Failed to stop TXQ #%d\n",
6278 __func__, tx_queue_id));
6279 return rc;
6280 }
6281 }
6282
6283 /* Stop the Rx Queue*/
6284 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6285 false);
6286 if (rc) {
6287 QL_DPRINT1(ha, (ha->pci_dev,
6288 "%s: Failed to stop RXQ #%d\n", __func__, i));
6289 return rc;
6290 }
6291 }
6292
6293 /* Stop the vport */
6294 for_each_hwfn(cdev, i) {
6295
6296 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6297
6298 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6299
6300 if (rc) {
6301 QL_DPRINT1(ha, (ha->pci_dev,
6302 "%s: Failed to stop VPORT\n", __func__));
6303 return rc;
6304 }
6305 }
6306
6307 return rc;
6308}
6309
6310static int

--- 262 unchanged lines hidden (view full) ---

6573 int i;
6574 int rc = 0;
6575 struct ecore_dev *cdev;
6576 device_t dev;
6577
6578 cdev = &ha->cdev;
6579 dev = ha->pci_dev;
6580
6581 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
6582
6583 rc = qlnx_alloc_mem_arrays(ha);
6584 if (rc)
6585 goto qlnx_load_exit0;
6586
6587 qlnx_init_fp(ha);
6588
6589 rc = qlnx_alloc_mem_load(ha);
6590 if (rc)
6591 goto qlnx_load_exit1;
6592
6593 QL_DPRINT2(ha, (dev, "%s: Allocated %d RSS queues on %d TC/s\n",
6594 __func__, ha->num_rss, ha->num_tc));
6595
6596 for (i = 0; i < ha->num_rss; i++) {
6597
6598 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
6599 (INTR_TYPE_NET | INTR_MPSAFE),
6600 NULL, qlnx_fp_isr, &ha->irq_vec[i],
6601 &ha->irq_vec[i].handle))) {
6602
6603 QL_DPRINT1(ha, (dev, "could not setup interrupt\n"));
6604
6605 goto qlnx_load_exit2;
6606 }
6607
6608 QL_DPRINT2(ha, (dev, "%s: rss_id = %d irq_rid %d"
6609 " irq %p handle %p\n", __func__, i,
6610 ha->irq_vec[i].irq_rid,
6611 ha->irq_vec[i].irq, ha->irq_vec[i].handle));
6612
6613 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
6614 }
6615
6616 rc = qlnx_start_queues(ha);
6617 if (rc)
6618 goto qlnx_load_exit2;
6619
6620 QL_DPRINT2(ha, (dev, "%s: Start VPORT, RXQ and TXQ succeeded\n",
6621 __func__));
6622
6623 /* Add primary mac and set Rx filters */
6624 rc = qlnx_set_rx_mode(ha);
6625 if (rc)
6626 goto qlnx_load_exit2;
6627
6628 /* Ask for link-up using current configuration */
6629 qlnx_set_link(ha, true);

--- 9 unchanged lines hidden (view full) ---

6639
6640qlnx_load_exit2:
6641 qlnx_free_mem_load(ha);
6642
6643qlnx_load_exit1:
6644 ha->num_rss = 0;
6645
6646qlnx_load_exit0:
6647 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit [%d]\n", __func__, rc));
6648 return rc;
6649}
6650
6651static void
6652qlnx_drain_soft_lro(qlnx_host_t *ha)
6653{
6654#ifdef QLNX_SOFT_LRO
6655

--- 40 unchanged lines hidden (view full) ---

6696{
6697 struct ecore_dev *cdev;
6698 device_t dev;
6699 int i;
6700
6701 cdev = &ha->cdev;
6702 dev = ha->pci_dev;
6703
6704 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
6705
6706 if (ha->state == QLNX_STATE_OPEN) {
6707
6708 qlnx_set_link(ha, false);
6709 qlnx_clean_filters(ha);
6710 qlnx_stop_queues(ha);
6711 ecore_hw_stop_fastpath(cdev);
6712

--- 13 unchanged lines hidden (view full) ---

6726
6727 if (ha->flags.callout_init)
6728 callout_drain(&ha->qlnx_callout);
6729
6730 qlnx_mdelay(__func__, 1000);
6731
6732 ha->state = QLNX_STATE_CLOSED;
6733
6734 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
6735 return;
6736}
6737
6738static int
6739qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
6740{
6741 int rval = -1;
6742 struct ecore_hwfn *p_hwfn;
6743 struct ecore_ptt *p_ptt;
6744
6745 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6746
6747 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6748 p_ptt = ecore_ptt_acquire(p_hwfn);
6749
6750 if (!p_ptt) {
6751 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n",
6752 __func__));
6753 return (rval);
6754 }
6755
6756 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6757
6758 if (rval == DBG_STATUS_OK)
6759 rval = 0;
6760 else {
6761 QL_DPRINT1(ha, (ha->pci_dev,
6762 "%s : ecore_dbg_grc_get_dump_buf_size failed [0x%x]\n",
6763 __func__, rval));
6764 }
6765
6766 ecore_ptt_release(p_hwfn, p_ptt);
6767
6768 return (rval);
6769}
6770
6771static int

--- 4 unchanged lines hidden (view full) ---

6776 struct ecore_ptt *p_ptt;
6777
6778 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
6779
6780 p_hwfn = &ha->cdev.hwfns[hwfn_index];
6781 p_ptt = ecore_ptt_acquire(p_hwfn);
6782
6783 if (!p_ptt) {
6784 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n",
6785 __func__));
6786 return (rval);
6787 }
6788
6789 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords);
6790
6791 if (rval == DBG_STATUS_OK)
6792 rval = 0;
6793 else {
6794 QL_DPRINT1(ha, (ha->pci_dev, "%s : "
6795 "ecore_dbg_idle_chk_get_dump_buf_size failed [0x%x]\n",
6796 __func__, rval));
6797 }
6798
6799 ecore_ptt_release(p_hwfn, p_ptt);
6800
6801 return (rval);
6802}
6803
6804

--- 264 unchanged lines hidden ---