ethernet-rx.c revision 217212
1210311Sjmallett/*************************************************************************
2210311SjmallettCopyright (c) 2003-2007  Cavium Networks (support@cavium.com). All rights
3210311Sjmallettreserved.
4210311Sjmallett
5210311Sjmallett
6210311SjmallettRedistribution and use in source and binary forms, with or without
7210311Sjmallettmodification, are permitted provided that the following conditions are
8210311Sjmallettmet:
9210311Sjmallett
10210311Sjmallett    * Redistributions of source code must retain the above copyright
11210311Sjmallett      notice, this list of conditions and the following disclaimer.
12210311Sjmallett
13210311Sjmallett    * Redistributions in binary form must reproduce the above
14210311Sjmallett      copyright notice, this list of conditions and the following
15210311Sjmallett      disclaimer in the documentation and/or other materials provided
16210311Sjmallett      with the distribution.
17210311Sjmallett
18210311Sjmallett    * Neither the name of Cavium Networks nor the names of
19210311Sjmallett      its contributors may be used to endorse or promote products
20210311Sjmallett      derived from this software without specific prior written
21210311Sjmallett      permission.
22210311Sjmallett
23210311SjmallettThis Software, including technical data, may be subject to U.S. export  control laws, including the U.S. Export Administration Act and its  associated regulations, and may be subject to export or import  regulations in other countries.
24210311Sjmallett
25210311SjmallettTO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
26210311SjmallettAND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
27210311Sjmallett
28210311Sjmallett*************************************************************************/
29210311Sjmallett
30210311Sjmallett#include <sys/cdefs.h>
31210311Sjmallett__FBSDID("$FreeBSD: head/sys/mips/cavium/octe/ethernet-rx.c 217212 2011-01-09 23:46:24Z jmallett $");
32210311Sjmallett
33210311Sjmallett#include <sys/param.h>
34210311Sjmallett#include <sys/systm.h>
35210311Sjmallett#include <sys/bus.h>
36210311Sjmallett#include <sys/endian.h>
37210311Sjmallett#include <sys/kernel.h>
38210311Sjmallett#include <sys/mbuf.h>
39210311Sjmallett#include <sys/socket.h>
40213156Sjmallett#include <sys/proc.h>
41213156Sjmallett#include <sys/sched.h>
42210311Sjmallett#include <sys/smp.h>
43210311Sjmallett#include <sys/taskqueue.h>
44210311Sjmallett
45210311Sjmallett#include <net/ethernet.h>
46210311Sjmallett#include <net/if.h>
47210311Sjmallett
48210311Sjmallett#include "wrapper-cvmx-includes.h"
49210311Sjmallett#include "ethernet-headers.h"
50210311Sjmallett
51210311Sjmallettextern int pow_receive_group;
52210311Sjmallettextern struct ifnet *cvm_oct_device[];
53210311Sjmallett
54210311Sjmallettstatic struct task cvm_oct_task;
55210311Sjmallettstatic struct taskqueue *cvm_oct_taskq;
56210311Sjmallett
57217212Sjmallettstatic int cvm_oct_rx_active;
58217212Sjmallett
59210311Sjmallett/**
60210311Sjmallett * Interrupt handler. The interrupt occurs whenever the POW
61210311Sjmallett * transitions from 0->1 packets in our group.
62210311Sjmallett *
63210311Sjmallett * @param cpl
64210311Sjmallett * @param dev_id
65210311Sjmallett * @param regs
66210311Sjmallett * @return
67210311Sjmallett */
68210311Sjmallettint cvm_oct_do_interrupt(void *dev_id)
69210311Sjmallett{
70210311Sjmallett	/* Acknowledge the interrupt */
71210311Sjmallett	if (INTERRUPT_LIMIT)
72210311Sjmallett		cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
73210311Sjmallett	else
74210311Sjmallett		cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
75217212Sjmallett
76217212Sjmallett	/*
77217212Sjmallett	 * Schedule task if there isn't one running.
78217212Sjmallett	 */
79217212Sjmallett	if (atomic_cmpset_int(&cvm_oct_rx_active, 0, 1))
80217212Sjmallett		taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
81217212Sjmallett
82210311Sjmallett	return FILTER_HANDLED;
83210311Sjmallett}
84210311Sjmallett
85210311Sjmallett
86210311Sjmallett/**
87210311Sjmallett * This is called on receive errors, and determines if the packet
88210311Sjmallett * can be dropped early-on in cvm_oct_tasklet_rx().
89210311Sjmallett *
90210311Sjmallett * @param work Work queue entry pointing to the packet.
91210311Sjmallett * @return Non-zero if the packet can be dropped, zero otherwise.
92210311Sjmallett */
93210311Sjmallettstatic inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
94210311Sjmallett{
95210311Sjmallett	if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
96210311Sjmallett		/* Ignore length errors on min size packets. Some equipment
97210311Sjmallett		   incorrectly pads packets to 64+4FCS instead of 60+4FCS.
98210311Sjmallett		   Note these packets still get counted as frame errors. */
99210311Sjmallett	} else
100210311Sjmallett	if (USE_10MBPS_PREAMBLE_WORKAROUND && ((work->word2.snoip.err_code == 5) || (work->word2.snoip.err_code == 7))) {
101210311Sjmallett
102210311Sjmallett		/* We received a packet with either an alignment error or a
103210311Sjmallett		   FCS error. This may be signalling that we are running
104210311Sjmallett		   10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} off. If this is the
105210311Sjmallett		   case we need to parse the packet to determine if we can
106210311Sjmallett		   remove a non spec preamble and generate a correct packet */
107210311Sjmallett		int interface = cvmx_helper_get_interface_num(work->ipprt);
108210311Sjmallett		int index = cvmx_helper_get_interface_index_num(work->ipprt);
109210311Sjmallett		cvmx_gmxx_rxx_frm_ctl_t gmxx_rxx_frm_ctl;
110210311Sjmallett		gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
111210311Sjmallett		if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
112210311Sjmallett
113210311Sjmallett			uint8_t *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr);
114210311Sjmallett			int i = 0;
115210311Sjmallett
116210311Sjmallett			while (i < work->len-1) {
117210311Sjmallett				if (*ptr != 0x55)
118210311Sjmallett					break;
119210311Sjmallett				ptr++;
120210311Sjmallett				i++;
121210311Sjmallett			}
122210311Sjmallett
123210311Sjmallett			if (*ptr == 0xd5) {
124210311Sjmallett				/*
125210311Sjmallett				DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
126210311Sjmallett				*/
127210311Sjmallett				work->packet_ptr.s.addr += i+1;
128210311Sjmallett				work->len -= i+5;
129210311Sjmallett			} else
130210311Sjmallett			if ((*ptr & 0xf) == 0xd) {
131210311Sjmallett				/*
132210311Sjmallett				DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
133210311Sjmallett				*/
134210311Sjmallett				work->packet_ptr.s.addr += i;
135210311Sjmallett				work->len -= i+4;
136210311Sjmallett				for (i = 0; i < work->len; i++) {
137210311Sjmallett					*ptr = ((*ptr&0xf0)>>4) | ((*(ptr+1)&0xf)<<4);
138210311Sjmallett					ptr++;
139210311Sjmallett				}
140210311Sjmallett			} else {
141210311Sjmallett				DEBUGPRINT("Port %d unknown preamble, packet dropped\n", work->ipprt);
142210311Sjmallett				/*
143210311Sjmallett				cvmx_helper_dump_packet(work);
144210311Sjmallett				*/
145210311Sjmallett				cvm_oct_free_work(work);
146210311Sjmallett				return 1;
147210311Sjmallett			}
148210311Sjmallett		}
149210311Sjmallett	} else {
150210311Sjmallett		DEBUGPRINT("Port %d receive error code %d, packet dropped\n", work->ipprt, work->word2.snoip.err_code);
151210311Sjmallett		cvm_oct_free_work(work);
152210311Sjmallett		return 1;
153210311Sjmallett	}
154210311Sjmallett
155210311Sjmallett	return 0;
156210311Sjmallett}
157210311Sjmallett
158210311Sjmallett/**
159210311Sjmallett * Tasklet function that is scheduled on a core when an interrupt occurs.
160210311Sjmallett *
161210311Sjmallett * @param unused
162210311Sjmallett */
163210311Sjmallettvoid cvm_oct_tasklet_rx(void *context, int pending)
164210311Sjmallett{
165213156Sjmallett	int                 coreid;
166210311Sjmallett	uint64_t            old_group_mask;
167210311Sjmallett	uint64_t            old_scratch;
168210311Sjmallett	int                 rx_count = 0;
169210311Sjmallett	int                 number_to_free;
170210311Sjmallett	int                 num_freed;
171210311Sjmallett	int                 packet_not_copied;
172210311Sjmallett
173213156Sjmallett	sched_pin();
174213156Sjmallett	coreid = cvmx_get_core_num();
175213156Sjmallett
176210311Sjmallett	/* Prefetch cvm_oct_device since we know we need it soon */
177210311Sjmallett	CVMX_PREFETCH(cvm_oct_device, 0);
178210311Sjmallett
179210311Sjmallett	if (USE_ASYNC_IOBDMA) {
180210311Sjmallett		/* Save scratch in case userspace is using it */
181210311Sjmallett		CVMX_SYNCIOBDMA;
182210311Sjmallett		old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
183210311Sjmallett	}
184210311Sjmallett
185210311Sjmallett	/* Only allow work for our group (and preserve priorities) */
186210311Sjmallett	old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
187210311Sjmallett	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
188210311Sjmallett		       (old_group_mask & ~0xFFFFull) | 1<<pow_receive_group);
189210311Sjmallett
190210311Sjmallett	if (USE_ASYNC_IOBDMA)
191210311Sjmallett		cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
192210311Sjmallett
193210311Sjmallett	while (1) {
194210311Sjmallett		struct mbuf *m = NULL;
195210311Sjmallett		int mbuf_in_hw;
196210311Sjmallett		cvmx_wqe_t *work;
197210311Sjmallett
198210311Sjmallett		if (USE_ASYNC_IOBDMA) {
199210311Sjmallett			work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
200210311Sjmallett		} else {
201210311Sjmallett			if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
202210311Sjmallett				work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
203210311Sjmallett			else
204210311Sjmallett				work = NULL;
205210311Sjmallett		}
206210311Sjmallett		CVMX_PREFETCH(work, 0);
207210311Sjmallett		if (work == NULL)
208210311Sjmallett			break;
209210311Sjmallett
210210311Sjmallett		/* Limit each core to processing MAX_RX_PACKETS packets without a break.
211210311Sjmallett		   This way the RX can't starve the TX task. */
212210311Sjmallett		if (USE_ASYNC_IOBDMA) {
213210311Sjmallett
214210311Sjmallett			if ((INTERRUPT_LIMIT == 0) || (rx_count < MAX_RX_PACKETS))
215210311Sjmallett				cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
216210311Sjmallett			else {
217210311Sjmallett				cvmx_scratch_write64(CVMX_SCR_SCRATCH, 0x8000000000000000ull);
218210311Sjmallett				cvmx_pow_tag_sw_null_nocheck();
219210311Sjmallett			}
220210311Sjmallett		}
221210311Sjmallett
222215974Sjmallett		mbuf_in_hw = work->word2.s.bufs == 1;
223210311Sjmallett		if ((mbuf_in_hw)) {
224210311Sjmallett			m = *(struct mbuf **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
225210311Sjmallett			CVMX_PREFETCH(m, offsetof(struct mbuf, m_data));
226210311Sjmallett			CVMX_PREFETCH(m, offsetof(struct mbuf, m_pkthdr));
227210311Sjmallett		}
228210311Sjmallett		CVMX_PREFETCH(cvm_oct_device[work->ipprt], 0);
229210311Sjmallett		//CVMX_PREFETCH(m, 0);
230210311Sjmallett
231210311Sjmallett
232210311Sjmallett		rx_count++;
233210311Sjmallett		/* Immediately throw away all packets with receive errors */
234210311Sjmallett		if ((work->word2.snoip.rcv_error)) {
235210311Sjmallett			if (cvm_oct_check_rcv_error(work))
236210311Sjmallett				continue;
237210311Sjmallett		}
238210311Sjmallett
239210311Sjmallett		/* We can only use the zero copy path if mbufs are in the FPA pool
240210311Sjmallett		   and the packet fits in a single buffer */
241210311Sjmallett		if ((mbuf_in_hw)) {
242210311Sjmallett			CVMX_PREFETCH(m->m_data, 0);
243210311Sjmallett
244210311Sjmallett			m->m_pkthdr.len = m->m_len = work->len;
245210311Sjmallett
246210311Sjmallett			packet_not_copied = 1;
247210311Sjmallett
248210311Sjmallett			/*
249210311Sjmallett			 * Adjust the data pointer based on the offset
250210311Sjmallett			 * of the packet within the buffer.
251210311Sjmallett			 */
252210311Sjmallett			m->m_data += (work->packet_ptr.s.back << 7) + (work->packet_ptr.s.addr & 0x7f);
253210311Sjmallett		} else {
254210311Sjmallett
255210311Sjmallett			/* We have to copy the packet. First allocate an
256210311Sjmallett			   mbuf for it */
257210311Sjmallett			MGETHDR(m, M_DONTWAIT, MT_DATA);
258210311Sjmallett			if (m == NULL) {
259210311Sjmallett				DEBUGPRINT("Port %d failed to allocate mbuf, packet dropped\n", work->ipprt);
260210311Sjmallett				cvm_oct_free_work(work);
261210311Sjmallett				continue;
262210311Sjmallett			}
263210311Sjmallett
264210311Sjmallett			/* Check if we've received a packet that was entirely
265210311Sjmallett			   stored in the work entry. This is untested */
266210311Sjmallett			if ((work->word2.s.bufs == 0)) {
267210311Sjmallett				uint8_t *ptr = work->packet_data;
268210311Sjmallett
269210311Sjmallett				if (cvmx_likely(!work->word2.s.not_IP)) {
270210311Sjmallett					/* The beginning of the packet moves
271210311Sjmallett					   for IP packets */
272210311Sjmallett					if (work->word2.s.is_v6)
273210311Sjmallett						ptr += 2;
274210311Sjmallett					else
275210311Sjmallett						ptr += 6;
276210311Sjmallett				}
277210311Sjmallett				panic("%s: not yet implemented; copy in small packet.", __func__);
278210311Sjmallett				/* No packet buffers to free */
279210311Sjmallett			} else {
280210311Sjmallett				int segments = work->word2.s.bufs;
281210311Sjmallett				cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
282210311Sjmallett				int len = work->len;
283210311Sjmallett
284210311Sjmallett				while (segments--) {
285210311Sjmallett					cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
286210311Sjmallett					/* Octeon Errata PKI-100: The segment
287210311Sjmallett					   size is wrong. Until it is fixed,
288210311Sjmallett					   calculate the segment size based on
289210311Sjmallett					   the packet pool buffer size. When
290210311Sjmallett					   it is fixed, the following line
291210311Sjmallett					   should be replaced with this one:
292210311Sjmallett					int segment_size = segment_ptr.s.size; */
293210311Sjmallett					int segment_size = CVMX_FPA_PACKET_POOL_SIZE - (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
294210311Sjmallett					/* Don't copy more than what is left
295210311Sjmallett					   in the packet */
296210311Sjmallett					if (segment_size > len)
297210311Sjmallett						segment_size = len;
298210311Sjmallett					/* Copy the data into the packet */
299210311Sjmallett					panic("%s: not yet implemented; copy in packet segments.", __func__);
300210311Sjmallett#if 0
301210311Sjmallett					memcpy(m_put(m, segment_size), cvmx_phys_to_ptr(segment_ptr.s.addr), segment_size);
302210311Sjmallett#endif
303210311Sjmallett					/* Reduce the amount of bytes left
304210311Sjmallett					   to copy */
305210311Sjmallett					len -= segment_size;
306210311Sjmallett					segment_ptr = next_ptr;
307210311Sjmallett				}
308210311Sjmallett			}
309210311Sjmallett			packet_not_copied = 0;
310210311Sjmallett		}
311210311Sjmallett
312210311Sjmallett		if (((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
313210311Sjmallett		    cvm_oct_device[work->ipprt])) {
314210311Sjmallett			struct ifnet *ifp = cvm_oct_device[work->ipprt];
315210311Sjmallett
316210311Sjmallett			/* Only accept packets for devices
317210311Sjmallett			   that are currently up */
318210311Sjmallett			if ((ifp->if_flags & IFF_UP)) {
319210311Sjmallett				m->m_pkthdr.rcvif = ifp;
320210311Sjmallett
321210311Sjmallett				if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
322210311Sjmallett					if ((work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
323210311Sjmallett						m->m_pkthdr.csum_flags = 0; /* XXX */
324210311Sjmallett					else {
325210311Sjmallett						m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
326210311Sjmallett						m->m_pkthdr.csum_data = 0xffff;
327210311Sjmallett					}
328210311Sjmallett				} else {
329210311Sjmallett					m->m_pkthdr.csum_flags = 0; /* XXX */
330210311Sjmallett				}
331210311Sjmallett
332210311Sjmallett				ifp->if_ipackets++;
333210311Sjmallett
334210311Sjmallett				(*ifp->if_input)(ifp, m);
335210311Sjmallett			} else {
336210311Sjmallett				/* Drop any packet received for a device that isn't up */
337210311Sjmallett				/*
338210311Sjmallett				DEBUGPRINT("%s: Device not up, packet dropped\n",
339210311Sjmallett					   if_name(ifp));
340210311Sjmallett				*/
341210311Sjmallett				m_freem(m);
342210311Sjmallett			}
343210311Sjmallett		} else {
344210311Sjmallett			/* Drop any packet received for a device that
345210311Sjmallett			   doesn't exist */
346210311Sjmallett			DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n", work->ipprt);
347210311Sjmallett			m_freem(m);
348210311Sjmallett		}
349210311Sjmallett
350210311Sjmallett		/* Check to see if the mbuf and work share
351210311Sjmallett		   the same packet buffer */
352215974Sjmallett		if ((packet_not_copied)) {
353210311Sjmallett			/* This buffer needs to be replaced, increment
354210311Sjmallett			the number of buffers we need to free by one */
355210311Sjmallett			cvmx_fau_atomic_add32(
356210311Sjmallett				FAU_NUM_PACKET_BUFFERS_TO_FREE, 1);
357210311Sjmallett
358210311Sjmallett			cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
359210311Sjmallett				      DONT_WRITEBACK(1));
360210311Sjmallett		} else
361210311Sjmallett			cvm_oct_free_work(work);
362210311Sjmallett	}
363210311Sjmallett
364217212Sjmallett	/*
365217212Sjmallett	 * If we hit our limit, schedule another task while we clean up.
366217212Sjmallett	 */
367217212Sjmallett	if (INTERRUPT_LIMIT != 0 && rx_count == MAX_RX_PACKETS) {
368217212Sjmallett		taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
369217212Sjmallett	} else {
370217212Sjmallett		/*
371217212Sjmallett		 * No more packets, all done.
372217212Sjmallett		 */
373217212Sjmallett		if (!atomic_cmpset_int(&cvm_oct_rx_active, 1, 0))
374217212Sjmallett			panic("%s: inconsistent rx active state.", __func__);
375217212Sjmallett	}
376217212Sjmallett
377210311Sjmallett	/* Restore the original POW group mask */
378210311Sjmallett	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
379210311Sjmallett	if (USE_ASYNC_IOBDMA) {
380210311Sjmallett		/* Restore the scratch area */
381210311Sjmallett		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
382210311Sjmallett	}
383210311Sjmallett
384215974Sjmallett	/* Refill the packet buffer pool */
385215974Sjmallett	number_to_free =
386215974Sjmallett	  cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
387210311Sjmallett
388215974Sjmallett	if (number_to_free > 0) {
389215974Sjmallett		cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
390215974Sjmallett				      -number_to_free);
391215974Sjmallett		num_freed =
392215974Sjmallett			cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
393215974Sjmallett					     CVMX_FPA_PACKET_POOL_SIZE,
394215974Sjmallett					     number_to_free);
395215974Sjmallett		if (num_freed != number_to_free) {
396210311Sjmallett			cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
397215974Sjmallett					      number_to_free - num_freed);
398210311Sjmallett		}
399210311Sjmallett	}
400213156Sjmallett	sched_unpin();
401210311Sjmallett}
402210311Sjmallett
403210311Sjmallett
404210311Sjmallett
405210311Sjmallettvoid cvm_oct_rx_initialize(void)
406210311Sjmallett{
407210311Sjmallett	TASK_INIT(&cvm_oct_task, 0, cvm_oct_tasklet_rx, NULL);
408210311Sjmallett
409210311Sjmallett	cvm_oct_taskq = taskqueue_create_fast("oct_rx", M_NOWAIT,
410210311Sjmallett					      taskqueue_thread_enqueue,
411210311Sjmallett					      &cvm_oct_taskq);
412210311Sjmallett	taskqueue_start_threads(&cvm_oct_taskq, min(mp_ncpus, MAXCPU),
413210311Sjmallett				PI_NET, "octe taskq");
414210311Sjmallett}
415210311Sjmallett
416210311Sjmallettvoid cvm_oct_rx_shutdown(void)
417210311Sjmallett{
418210311Sjmallett	panic("%s: not yet implemented.", __func__);
419210311Sjmallett}
420210311Sjmallett
421