1/*
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Cavium, Inc. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33/*$FreeBSD: stable/11/sys/dev/liquidio/lio_core.c 325618 2017-11-09 19:52:56Z sbruno $*/
34
35#include "lio_bsd.h"
36#include "lio_common.h"
37#include "lio_droq.h"
38#include "lio_iq.h"
39#include "lio_response_manager.h"
40#include "lio_device.h"
41#include "lio_ctrl.h"
42#include "lio_main.h"
43#include "lio_rxtx.h"
44#include "lio_network.h"
45
46int
47lio_set_feature(struct ifnet *ifp, int cmd, uint16_t param1)
48{
49	struct lio_ctrl_pkt	nctrl;
50	struct lio		*lio = if_getsoftc(ifp);
51	struct octeon_device	*oct = lio->oct_dev;
52	int	ret = 0;
53
54	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
55
56	nctrl.ncmd.cmd64 = 0;
57	nctrl.ncmd.s.cmd = cmd;
58	nctrl.ncmd.s.param1 = param1;
59	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
60	nctrl.wait_time = 100;
61	nctrl.lio = lio;
62	nctrl.cb_fn = lio_ctrl_cmd_completion;
63
64	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
65	if (ret < 0) {
66		lio_dev_err(oct, "Feature change failed in core (ret: 0x%x)\n",
67			    ret);
68	}
69
70	return (ret);
71}
72
73void
74lio_ctrl_cmd_completion(void *nctrl_ptr)
75{
76	struct lio_ctrl_pkt	*nctrl = (struct lio_ctrl_pkt *)nctrl_ptr;
77	struct lio		*lio;
78	struct octeon_device	*oct;
79	uint8_t	*mac;
80
81	lio = nctrl->lio;
82
83	if (lio->oct_dev == NULL)
84		return;
85
86	oct = lio->oct_dev;
87
88	switch (nctrl->ncmd.s.cmd) {
89	case LIO_CMD_CHANGE_DEVFLAGS:
90	case LIO_CMD_SET_MULTI_LIST:
91		break;
92
93	case LIO_CMD_CHANGE_MACADDR:
94		mac = ((uint8_t *)&nctrl->udd[0]) + 2;
95		if (nctrl->ncmd.s.param1) {
96			/* vfidx is 0 based, but vf_num (param1) is 1 based */
97			int	vfidx = nctrl->ncmd.s.param1 - 1;
98			bool	mac_is_admin_assigned = nctrl->ncmd.s.param2;
99
100			if (mac_is_admin_assigned)
101				lio_dev_info(oct, "MAC Address %pM is configured for VF %d\n",
102					     mac, vfidx);
103		} else {
104			lio_dev_info(oct, "MAC Address changed to %02x:%02x:%02x:%02x:%02x:%02x\n",
105				     mac[0], mac[1], mac[2], mac[3], mac[4],
106				     mac[5]);
107		}
108		break;
109
110	case LIO_CMD_GPIO_ACCESS:
111		lio_dev_info(oct, "LED Flashing visual identification\n");
112		break;
113
114	case LIO_CMD_ID_ACTIVE:
115		lio_dev_info(oct, "LED Flashing visual identification\n");
116		break;
117
118	case LIO_CMD_LRO_ENABLE:
119		lio_dev_info(oct, "HW LRO Enabled\n");
120		break;
121
122	case LIO_CMD_LRO_DISABLE:
123		lio_dev_info(oct, "HW LRO Disabled\n");
124		break;
125
126	case LIO_CMD_VERBOSE_ENABLE:
127		lio_dev_info(oct, "Firmware debug enabled\n");
128		break;
129
130	case LIO_CMD_VERBOSE_DISABLE:
131		lio_dev_info(oct, "Firmware debug disabled\n");
132		break;
133
134	case LIO_CMD_VLAN_FILTER_CTL:
135		if (nctrl->ncmd.s.param1)
136			lio_dev_info(oct, "VLAN filter enabled\n");
137		else
138			lio_dev_info(oct, "VLAN filter disabled\n");
139		break;
140
141	case LIO_CMD_ADD_VLAN_FILTER:
142		lio_dev_info(oct, "VLAN filter %d added\n",
143			     nctrl->ncmd.s.param1);
144		break;
145
146	case LIO_CMD_DEL_VLAN_FILTER:
147		lio_dev_info(oct, "VLAN filter %d removed\n",
148			     nctrl->ncmd.s.param1);
149		break;
150
151	case LIO_CMD_SET_SETTINGS:
152		lio_dev_info(oct, "Settings changed\n");
153		break;
154
155		/*
156		 * Case to handle "LIO_CMD_TNL_RX_CSUM_CTL"
157		 * Command passed by NIC driver
158		 */
159	case LIO_CMD_TNL_RX_CSUM_CTL:
160		if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_ENABLE) {
161			lio_dev_info(oct, "RX Checksum Offload Enabled\n");
162		} else if (nctrl->ncmd.s.param1 == LIO_CMD_RXCSUM_DISABLE) {
163			lio_dev_info(oct, "RX Checksum Offload Disabled\n");
164		}
165		break;
166
167		/*
168		 * Case to handle "LIO_CMD_TNL_TX_CSUM_CTL"
169		 * Command passed by NIC driver
170		 */
171	case LIO_CMD_TNL_TX_CSUM_CTL:
172		if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_ENABLE) {
173			lio_dev_info(oct, "TX Checksum Offload Enabled\n");
174		} else if (nctrl->ncmd.s.param1 == LIO_CMD_TXCSUM_DISABLE) {
175			lio_dev_info(oct, "TX Checksum Offload Disabled\n");
176		}
177		break;
178
179		/*
180		 * Case to handle "LIO_CMD_VXLAN_PORT_CONFIG"
181		 * Command passed by NIC driver
182		 */
183	case LIO_CMD_VXLAN_PORT_CONFIG:
184		if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_ADD) {
185			lio_dev_info(oct, "VxLAN Destination UDP PORT:%d ADDED\n",
186				     nctrl->ncmd.s.param1);
187		} else if (nctrl->ncmd.s.more == LIO_CMD_VXLAN_PORT_DEL) {
188			lio_dev_info(oct, "VxLAN Destination UDP PORT:%d DELETED\n",
189				     nctrl->ncmd.s.param1);
190		}
191		break;
192
193	case LIO_CMD_SET_FLOW_CTL:
194		lio_dev_info(oct, "Set RX/TX flow control parameters\n");
195		break;
196
197	case LIO_CMD_SET_FNV:
198		if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_ENABLE)
199			lio_dev_info(oct, "FNV Enabled\n");
200		else if (nctrl->ncmd.s.param1 == LIO_CMD_FNV_DISABLE)
201			lio_dev_info(oct, "FNV Disabled\n");
202		break;
203
204	case LIO_CMD_PKT_STEERING_CTL:
205		if (nctrl->ncmd.s.param1 == LIO_CMD_PKT_STEERING_ENABLE) {
206			lio_dev_info(oct, "Packet Steering Enabled\n");
207		} else if (nctrl->ncmd.s.param1 ==
208			   LIO_CMD_PKT_STEERING_DISABLE) {
209			lio_dev_info(oct, "Packet Steering Disabled\n");
210		}
211
212		break;
213
214	case LIO_CMD_QUEUE_COUNT_CTL:
215		lio_dev_info(oct, "Queue count updated to %d\n",
216			     nctrl->ncmd.s.param1);
217		break;
218
219	default:
220		lio_dev_err(oct, "%s Unknown cmd %d\n", __func__,
221			    nctrl->ncmd.s.cmd);
222	}
223}
224
225
226/*
227 * \brief Setup output queue
228 * @param oct octeon device
229 * @param q_no which queue
230 * @param num_descs how many descriptors
231 * @param desc_size size of each descriptor
232 * @param app_ctx application context
233 */
234static int
235lio_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
236	       int desc_size, void *app_ctx)
237{
238	int	ret_val = 0;
239
240	lio_dev_dbg(oct, "Creating Droq: %d\n", q_no);
241	/* droq creation and local register settings. */
242	ret_val = lio_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
243	if (ret_val < 0)
244		return (ret_val);
245
246	if (ret_val == 1) {
247		lio_dev_dbg(oct, "Using default droq %d\n", q_no);
248		return (0);
249	}
250
251	/*
252	 * Send Credit for Octeon Output queues. Credits are always
253         * sent after the output queue is enabled.
254         */
255	lio_write_csr32(oct, oct->droq[q_no]->pkts_credit_reg,
256			oct->droq[q_no]->max_count);
257
258	return (ret_val);
259}
260
261static void
262lio_push_packet(void *m_buff, uint32_t len, union octeon_rh *rh, void *rxq,
263		void *arg)
264{
265	struct mbuf	*mbuf = m_buff;
266	struct ifnet	*ifp = arg;
267	struct lio_droq	*droq = rxq;
268
269	if (ifp != NULL) {
270		struct lio	*lio = if_getsoftc(ifp);
271
272		/* Do not proceed if the interface is not in RUNNING state. */
273		if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
274			lio_recv_buffer_free(mbuf);
275			droq->stats.rx_dropped++;
276			return;
277		}
278
279		if (rh->r_dh.has_hash) {
280			uint32_t	hashtype, hashval;
281
282			if (rh->r_dh.has_hwtstamp) {
283				hashval = htobe32(*(uint32_t *)
284						  (((uint8_t *)mbuf->m_data) +
285						   ((rh->r_dh.len - 2) *
286						    BYTES_PER_DHLEN_UNIT)));
287				hashtype =
288				    htobe32(*(((uint32_t *)
289					       (((uint8_t *)mbuf->m_data) +
290						((rh->r_dh.len - 2) *
291						 BYTES_PER_DHLEN_UNIT))) + 1));
292			} else {
293				hashval = htobe32(*(uint32_t *)
294						  (((uint8_t *)mbuf->m_data) +
295						   ((rh->r_dh.len - 1) *
296						    BYTES_PER_DHLEN_UNIT)));
297				hashtype =
298				    htobe32(*(((uint32_t *)
299					       (((uint8_t *)mbuf->m_data) +
300						((rh->r_dh.len - 1) *
301						 BYTES_PER_DHLEN_UNIT))) + 1));
302			}
303
304			mbuf->m_pkthdr.flowid = hashval;
305
306			switch (hashtype) {
307			case LIO_RSS_HASH_IPV4:
308				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
309				break;
310			case LIO_RSS_HASH_TCP_IPV4:
311				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
312				break;
313			case LIO_RSS_HASH_IPV6:
314				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
315				break;
316			case LIO_RSS_HASH_TCP_IPV6:
317				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
318				break;
319			case LIO_RSS_HASH_IPV6_EX:
320				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6_EX);
321				break;
322			case LIO_RSS_HASH_TCP_IPV6_EX:
323				M_HASHTYPE_SET(mbuf,
324					       M_HASHTYPE_RSS_TCP_IPV6_EX);
325				break;
326			default:
327				M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
328			}
329
330		} else {
331			/*
332                         * This case won't hit as FW will always set has_hash
333                         * in rh.
334                         */
335			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
336			mbuf->m_pkthdr.flowid = droq->q_no;
337		}
338
339		m_adj(mbuf, rh->r_dh.len * 8);
340		len -= rh->r_dh.len * 8;
341		mbuf->m_flags |= M_PKTHDR;
342
343		if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) &&
344		    (rh->r_dh.priority || rh->r_dh.vlan)) {
345			uint16_t	priority = rh->r_dh.priority;
346			uint16_t	vid = rh->r_dh.vlan;
347			uint16_t	vtag;
348
349			vtag = priority << 13 | vid;
350			mbuf->m_pkthdr.ether_vtag = vtag;
351			mbuf->m_flags |= M_VLANTAG;
352		}
353
354		if (rh->r_dh.csum_verified & LIO_IPSUM_VERIFIED)
355			mbuf->m_pkthdr.csum_flags |= (CSUM_L3_CALC |
356						      CSUM_L3_VALID);
357
358		if (rh->r_dh.csum_verified & LIO_L4SUM_VERIFIED) {
359			mbuf->m_pkthdr.csum_flags |= (CSUM_L4_CALC |
360						      CSUM_L4_VALID);
361			mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
362						      CSUM_PSEUDO_HDR);
363			mbuf->m_pkthdr.csum_data = htons(0xffff);
364		}
365
366		mbuf->m_pkthdr.rcvif = ifp;
367		mbuf->m_pkthdr.len = len;
368
369		if ((lio_hwlro == 0) &&
370		    (if_getcapenable(ifp) & IFCAP_LRO) &&
371		    (mbuf->m_pkthdr.csum_flags &
372		     (CSUM_L3_VALID | CSUM_L4_VALID | CSUM_DATA_VALID |
373		      CSUM_PSEUDO_HDR)) == (CSUM_L3_VALID | CSUM_L4_VALID |
374					    CSUM_DATA_VALID |
375					    CSUM_PSEUDO_HDR)) {
376			if (droq->lro.lro_cnt) {
377				if (tcp_lro_rx(&droq->lro, mbuf, 0) == 0) {
378					droq->stats.rx_bytes_received += len;
379					droq->stats.rx_pkts_received++;
380					return;
381				}
382			}
383		}
384
385		if_input(ifp, mbuf);
386
387		droq->stats.rx_bytes_received += len;
388		droq->stats.rx_pkts_received++;
389
390	} else {
391		lio_recv_buffer_free(mbuf);
392		droq->stats.rx_dropped++;
393	}
394}
395
396/*
397 * \brief Setup input and output queues
398 * @param octeon_dev octeon device
399 * @param ifidx  Interface Index
400 *
401 * Note: Queues are with respect to the octeon device. Thus
402 * an input queue is for egress packets, and output queues
403 * are for ingress packets.
404 */
405int
406lio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
407		    uint32_t num_iqs, uint32_t num_oqs)
408{
409	struct lio_droq_ops	droq_ops;
410	struct ifnet		*ifp;
411	struct lio_droq		*droq;
412	struct lio		*lio;
413	static int		cpu_id, cpu_id_modulus;
414	int	num_tx_descs, q, q_no, retval = 0;
415
416	ifp = octeon_dev->props.ifp;
417
418	lio = if_getsoftc(ifp);
419
420	bzero(&droq_ops, sizeof(struct lio_droq_ops));
421
422	droq_ops.fptr = lio_push_packet;
423	droq_ops.farg = (void *)ifp;
424
425	cpu_id = 0;
426	cpu_id_modulus = mp_ncpus;
427	/* set up DROQs. */
428	for (q = 0; q < num_oqs; q++) {
429		q_no = lio->linfo.rxpciq[q].s.q_no;
430		lio_dev_dbg(octeon_dev, "lio_setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
431			    q, q_no);
432		retval = lio_setup_droq(octeon_dev, q_no,
433					LIO_GET_NUM_RX_DESCS_NIC_IF_CFG(
434						     lio_get_conf(octeon_dev),
435								  lio->ifidx),
436					LIO_GET_NUM_RX_BUF_SIZE_NIC_IF_CFG(
437						     lio_get_conf(octeon_dev),
438							   lio->ifidx), NULL);
439		if (retval) {
440			lio_dev_err(octeon_dev, "%s : Runtime DROQ(RxQ) creation failed.\n",
441				    __func__);
442			return (1);
443		}
444
445		droq = octeon_dev->droq[q_no];
446
447		/* designate a CPU for this droq */
448		droq->cpu_id = cpu_id;
449		cpu_id++;
450		if (cpu_id >= cpu_id_modulus)
451			cpu_id = 0;
452
453		lio_register_droq_ops(octeon_dev, q_no, &droq_ops);
454	}
455
456	/* set up IQs. */
457	for (q = 0; q < num_iqs; q++) {
458		num_tx_descs = LIO_GET_NUM_TX_DESCS_NIC_IF_CFG(
459						     lio_get_conf(octeon_dev),
460							       lio->ifidx);
461		retval = lio_setup_iq(octeon_dev, ifidx, q,
462				      lio->linfo.txpciq[q], num_tx_descs);
463		if (retval) {
464			lio_dev_err(octeon_dev, " %s : Runtime IQ(TxQ) creation failed.\n",
465				    __func__);
466			return (1);
467		}
468	}
469
470	return (0);
471}
472
473/*
474 * \brief Droq packet processor sceduler
475 * @param oct octeon device
476 */
477static void
478lio_schedule_droq_pkt_handlers(struct octeon_device *oct)
479{
480	struct lio_droq	*droq;
481	uint64_t	oq_no;
482
483	if (oct->int_status & LIO_DEV_INTR_PKT_DATA) {
484		for (oq_no = 0; oq_no < LIO_MAX_OUTPUT_QUEUES(oct); oq_no++) {
485			if (!(oct->io_qmask.oq & BIT_ULL(oq_no)))
486				continue;
487
488			droq = oct->droq[oq_no];
489
490			taskqueue_enqueue(droq->droq_taskqueue,
491					  &droq->droq_task);
492		}
493	}
494}
495
496static void
497lio_msix_intr_handler(void *vector)
498{
499	struct lio_ioq_vector	*ioq_vector = (struct lio_ioq_vector *)vector;
500	struct octeon_device	*oct = ioq_vector->oct_dev;
501	struct lio_droq		*droq = oct->droq[ioq_vector->droq_index];
502	uint64_t		ret;
503
504	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
505
506	if ((ret & LIO_MSIX_PO_INT) || (ret & LIO_MSIX_PI_INT)) {
507		struct lio_instr_queue *iq = oct->instr_queue[droq->q_no];
508		int	reschedule, tx_done = 1;
509
510		reschedule = lio_droq_process_packets(oct, droq, oct->rx_budget);
511
512		if (atomic_load_acq_int(&iq->instr_pending))
513			tx_done = lio_flush_iq(oct, iq, oct->tx_budget);
514
515		if ((oct->props.ifp != NULL) && (iq->br != NULL)) {
516			if (mtx_trylock(&iq->enq_lock)) {
517				if (!drbr_empty(oct->props.ifp, iq->br))
518					lio_mq_start_locked(oct->props.ifp,
519							    iq);
520				mtx_unlock(&iq->enq_lock);
521			}
522		}
523
524		if (reschedule || !tx_done)
525			taskqueue_enqueue(droq->droq_taskqueue, &droq->droq_task);
526		else
527			lio_enable_irq(droq, iq);
528	}
529}
530
531static void
532lio_intr_handler(void *dev)
533{
534	struct octeon_device	*oct = (struct octeon_device *)dev;
535
536	/* Disable our interrupts for the duration of ISR */
537	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
538
539	oct->fn_list.process_interrupt_regs(oct);
540
541	lio_schedule_droq_pkt_handlers(oct);
542
543	/* Re-enable our interrupts  */
544	if (!(atomic_load_acq_int(&oct->status) == LIO_DEV_IN_RESET))
545		oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
546}
547
548int
549lio_setup_interrupt(struct octeon_device *oct, uint32_t num_ioqs)
550{
551	device_t		device;
552	struct lio_ioq_vector	*ioq_vector;
553	int	cpu_id, err, i;
554	int	num_alloc_ioq_vectors;
555	int	num_ioq_vectors;
556	int	res_id;
557
558	if (!oct->msix_on)
559		return (1);
560
561	ioq_vector = oct->ioq_vector;
562
563#ifdef RSS
564	if (oct->sriov_info.num_pf_rings != rss_getnumbuckets()) {
565		lio_dev_info(oct, "IOQ vectors (%d) are not equal number of RSS buckets (%d)\n",
566			     oct->sriov_info.num_pf_rings, rss_getnumbuckets());
567	}
568#endif
569
570	device = oct->device;
571
572	oct->num_msix_irqs = num_ioqs;
573	/* one non ioq interrupt for handling sli_mac_pf_int_sum */
574	oct->num_msix_irqs += 1;
575	num_alloc_ioq_vectors = oct->num_msix_irqs;
576
577	if (pci_alloc_msix(device, &num_alloc_ioq_vectors) ||
578	    (num_alloc_ioq_vectors != oct->num_msix_irqs))
579		goto err;
580
581	num_ioq_vectors = oct->num_msix_irqs;
582
583	/* For PF, there is one non-ioq interrupt handler */
584	for (i = 0; i < num_ioq_vectors - 1; i++, ioq_vector++) {
585		res_id = i + 1;
586
587		ioq_vector->msix_res =
588		    bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
589					   RF_SHAREABLE | RF_ACTIVE);
590		if (ioq_vector->msix_res == NULL) {
591			lio_dev_err(oct,
592				    "Unable to allocate bus res msix[%d]\n", i);
593			goto err_1;
594		}
595
596		err = bus_setup_intr(device, ioq_vector->msix_res,
597				     INTR_TYPE_NET | INTR_MPSAFE, NULL,
598				     lio_msix_intr_handler, ioq_vector,
599				     &ioq_vector->tag);
600		if (err) {
601			bus_release_resource(device, SYS_RES_IRQ, res_id,
602					     ioq_vector->msix_res);
603			ioq_vector->msix_res = NULL;
604			lio_dev_err(oct, "Failed to register intr handler");
605			goto err_1;
606		}
607
608		bus_describe_intr(device, ioq_vector->msix_res, ioq_vector->tag,
609				  "rxtx%u", i);
610		ioq_vector->vector = res_id;
611
612#ifdef RSS
613		cpu_id = rss_getcpu(i % rss_getnumbuckets());
614#else
615		cpu_id = i % mp_ncpus;
616#endif
617		CPU_SETOF(cpu_id, &ioq_vector->affinity_mask);
618
619		/* Setting the IRQ affinity. */
620		err = bus_bind_intr(device, ioq_vector->msix_res, cpu_id);
621		if (err)
622			lio_dev_err(oct, "bus bind interrupt fail");
623#ifdef RSS
624		lio_dev_dbg(oct, "Bound RSS bucket %d to CPU %d\n", i, cpu_id);
625#else
626		lio_dev_dbg(oct, "Bound Queue %d to CPU %d\n", i, cpu_id);
627#endif
628	}
629
630	lio_dev_dbg(oct, "MSI-X enabled\n");
631
632	res_id = num_ioq_vectors;
633	oct->msix_res = bus_alloc_resource_any(device, SYS_RES_IRQ, &res_id,
634					       RF_SHAREABLE | RF_ACTIVE);
635	if (oct->msix_res == NULL) {
636		lio_dev_err(oct, "Unable to allocate bus res msix for non-ioq interrupt\n");
637		goto err_1;
638	}
639
640	err = bus_setup_intr(device, oct->msix_res, INTR_TYPE_NET | INTR_MPSAFE,
641			     NULL, lio_intr_handler, oct, &oct->tag);
642	if (err) {
643		bus_release_resource(device, SYS_RES_IRQ, res_id,
644				     oct->msix_res);
645		oct->msix_res = NULL;
646		lio_dev_err(oct, "Failed to register intr handler");
647		goto err_1;
648	}
649
650	bus_describe_intr(device, oct->msix_res, oct->tag, "aux");
651	oct->aux_vector = res_id;
652
653	return (0);
654err_1:
655	if (oct->tag != NULL) {
656		bus_teardown_intr(device, oct->msix_res, oct->tag);
657		oct->tag = NULL;
658	}
659
660	while (i) {
661		i--;
662		ioq_vector--;
663
664		if (ioq_vector->tag != NULL) {
665			bus_teardown_intr(device, ioq_vector->msix_res,
666					  ioq_vector->tag);
667			ioq_vector->tag = NULL;
668		}
669
670		if (ioq_vector->msix_res != NULL) {
671			bus_release_resource(device, SYS_RES_IRQ,
672					     ioq_vector->vector,
673					     ioq_vector->msix_res);
674			ioq_vector->msix_res = NULL;
675		}
676	}
677
678	if (oct->msix_res != NULL) {
679		bus_release_resource(device, SYS_RES_IRQ, oct->aux_vector,
680				     oct->msix_res);
681		oct->msix_res = NULL;
682	}
683err:
684	pci_release_msi(device);
685	lio_dev_err(oct, "MSI-X disabled\n");
686	return (1);
687}
688