1325618Ssbruno/*
2325618Ssbruno *   BSD LICENSE
3325618Ssbruno *
4325618Ssbruno *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5325618Ssbruno *   All rights reserved.
6325618Ssbruno *
7325618Ssbruno *   Redistribution and use in source and binary forms, with or without
8325618Ssbruno *   modification, are permitted provided that the following conditions
9325618Ssbruno *   are met:
10325618Ssbruno *
11325618Ssbruno *     * Redistributions of source code must retain the above copyright
12325618Ssbruno *       notice, this list of conditions and the following disclaimer.
13325618Ssbruno *     * Redistributions in binary form must reproduce the above copyright
14325618Ssbruno *       notice, this list of conditions and the following disclaimer in
15325618Ssbruno *       the documentation and/or other materials provided with the
16325618Ssbruno *       distribution.
17325618Ssbruno *     * Neither the name of Cavium, Inc. nor the names of its
18325618Ssbruno *       contributors may be used to endorse or promote products derived
19325618Ssbruno *       from this software without specific prior written permission.
20325618Ssbruno *
21325618Ssbruno *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22325618Ssbruno *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23325618Ssbruno *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24325618Ssbruno *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25325618Ssbruno *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26325618Ssbruno *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27325618Ssbruno *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28325618Ssbruno *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29325618Ssbruno *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30325618Ssbruno *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31325618Ssbruno *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32325618Ssbruno */
33325618Ssbruno/*$FreeBSD: stable/11/sys/dev/liquidio/base/lio_request_manager.c 325618 2017-11-09 19:52:56Z sbruno $*/
34325618Ssbruno
35325618Ssbruno#include "lio_bsd.h"
36325618Ssbruno#include "lio_common.h"
37325618Ssbruno#include "lio_droq.h"
38325618Ssbruno#include "lio_iq.h"
39325618Ssbruno#include "lio_response_manager.h"
40325618Ssbruno#include "lio_device.h"
41325618Ssbruno#include "lio_main.h"
42325618Ssbruno#include "lio_network.h"
43325618Ssbruno#include "cn23xx_pf_device.h"
44325618Ssbruno#include "lio_rxtx.h"
45325618Ssbruno
46325618Ssbrunostruct lio_iq_post_status {
47325618Ssbruno	int	status;
48325618Ssbruno	int	index;
49325618Ssbruno};
50325618Ssbruno
51325618Ssbrunostatic void	lio_check_db_timeout(void *arg, int pending);
52325618Ssbrunostatic void	__lio_check_db_timeout(struct octeon_device *oct,
53325618Ssbruno				       uint64_t iq_no);
54325618Ssbruno
55325618Ssbruno/* Return 0 on success, 1 on failure */
56325618Ssbrunoint
57325618Ssbrunolio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
58325618Ssbruno		     uint32_t num_descs)
59325618Ssbruno{
60325618Ssbruno	struct lio_instr_queue	*iq;
61325618Ssbruno	struct lio_iq_config	*conf = NULL;
62325618Ssbruno	struct lio_tq		*db_tq;
63325618Ssbruno	struct lio_request_list	*request_buf;
64325618Ssbruno	bus_size_t		max_size;
65325618Ssbruno	uint32_t		iq_no = (uint32_t)txpciq.s.q_no;
66325618Ssbruno	uint32_t		q_size;
67325618Ssbruno	int			error, i;
68325618Ssbruno
69325618Ssbruno	if (LIO_CN23XX_PF(oct))
70325618Ssbruno		conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)));
71325618Ssbruno	if (conf == NULL) {
72325618Ssbruno		lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id);
73325618Ssbruno		return (1);
74325618Ssbruno	}
75325618Ssbruno
76325618Ssbruno	q_size = (uint32_t)conf->instr_type * num_descs;
77325618Ssbruno	iq = oct->instr_queue[iq_no];
78325618Ssbruno	iq->oct_dev = oct;
79325618Ssbruno
80325618Ssbruno	max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs;
81325618Ssbruno
82325618Ssbruno	error = bus_dma_tag_create(bus_get_dma_tag(oct->device),	/* parent */
83325618Ssbruno				   1, 0,				/* alignment, bounds */
84325618Ssbruno				   BUS_SPACE_MAXADDR,			/* lowaddr */
85325618Ssbruno				   BUS_SPACE_MAXADDR,			/* highaddr */
86325618Ssbruno				   NULL, NULL,				/* filter, filterarg */
87325618Ssbruno				   max_size,				/* maxsize */
88325618Ssbruno				   LIO_MAX_SG,				/* nsegments */
89325618Ssbruno				   PAGE_SIZE,				/* maxsegsize */
90325618Ssbruno				   0,					/* flags */
91325618Ssbruno				   NULL,				/* lockfunc */
92325618Ssbruno				   NULL,				/* lockfuncarg */
93325618Ssbruno				   &iq->txtag);
94325618Ssbruno	if (error) {
95325618Ssbruno		lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
96325618Ssbruno			    iq_no);
97325618Ssbruno		return (1);
98325618Ssbruno	}
99325618Ssbruno
100325618Ssbruno	iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma);
101325618Ssbruno	if (!iq->base_addr) {
102325618Ssbruno		lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n",
103325618Ssbruno			    iq_no);
104325618Ssbruno		return (1);
105325618Ssbruno	}
106325618Ssbruno
107325618Ssbruno	iq->max_count = num_descs;
108325618Ssbruno
109325618Ssbruno	/*
110325618Ssbruno	 * Initialize a list to holds requests that have been posted to
111325618Ssbruno	 * Octeon but has yet to be fetched by octeon
112325618Ssbruno	 */
113325618Ssbruno	iq->request_list = malloc(sizeof(*iq->request_list) * num_descs,
114325618Ssbruno				  M_DEVBUF, M_NOWAIT | M_ZERO);
115325618Ssbruno	if (iq->request_list == NULL) {
116325618Ssbruno		lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n",
117325618Ssbruno			    iq_no);
118325618Ssbruno		return (1);
119325618Ssbruno	}
120325618Ssbruno
121325618Ssbruno	lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n",
122325618Ssbruno		    iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma),
123325618Ssbruno		    iq->max_count);
124325618Ssbruno
125325618Ssbruno	/* Create the descriptor buffer dma maps */
126325618Ssbruno	request_buf = iq->request_list;
127325618Ssbruno	for (i = 0; i < num_descs; i++, request_buf++) {
128325618Ssbruno		error = bus_dmamap_create(iq->txtag, 0, &request_buf->map);
129325618Ssbruno		if (error) {
130325618Ssbruno			lio_dev_err(oct, "Unable to create TX DMA map\n");
131325618Ssbruno			return (1);
132325618Ssbruno		}
133325618Ssbruno	}
134325618Ssbruno
135325618Ssbruno	iq->txpciq.txpciq64 = txpciq.txpciq64;
136325618Ssbruno	iq->fill_cnt = 0;
137325618Ssbruno	iq->host_write_index = 0;
138325618Ssbruno	iq->octeon_read_index = 0;
139325618Ssbruno	iq->flush_index = 0;
140325618Ssbruno	iq->last_db_time = 0;
141325618Ssbruno	iq->db_timeout = (uint32_t)conf->db_timeout;
142325618Ssbruno	atomic_store_rel_int(&iq->instr_pending, 0);
143325618Ssbruno
144325618Ssbruno	/* Initialize the lock for this instruction queue */
145325618Ssbruno	mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF);
146325618Ssbruno	mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF);
147325618Ssbruno	mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF);
148325618Ssbruno
149325618Ssbruno	mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL,
150325618Ssbruno		 MTX_DEF);
151325618Ssbruno
152325618Ssbruno	oct->io_qmask.iq |= BIT_ULL(iq_no);
153325618Ssbruno
154325618Ssbruno	/* Set the 32B/64B mode for each input queue */
155325618Ssbruno	oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
156325618Ssbruno	iq->iqcmd_64B = (conf->instr_type == 64);
157325618Ssbruno
158325618Ssbruno	oct->fn_list.setup_iq_regs(oct, iq_no);
159325618Ssbruno
160325618Ssbruno	db_tq = &oct->check_db_tq[iq_no];
161325618Ssbruno	db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
162325618Ssbruno				     taskqueue_thread_enqueue, &db_tq->tq);
163325618Ssbruno	if (db_tq->tq == NULL) {
164325618Ssbruno		lio_dev_err(oct, "check db wq create failed for iq %d\n",
165325618Ssbruno			    iq_no);
166325618Ssbruno		return (1);
167325618Ssbruno	}
168325618Ssbruno
169325618Ssbruno	TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
170325618Ssbruno			  (void *)db_tq);
171325618Ssbruno	db_tq->ctxul = iq_no;
172325618Ssbruno	db_tq->ctxptr = oct;
173325618Ssbruno
174325618Ssbruno	taskqueue_start_threads(&db_tq->tq, 1, PI_NET,
175325618Ssbruno				"lio%d_check_db_timeout:%d",
176325618Ssbruno				oct->octeon_id, iq_no);
177325618Ssbruno	taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1);
178325618Ssbruno
179325618Ssbruno	/* Allocate a buf ring */
180325618Ssbruno	oct->instr_queue[iq_no]->br =
181325618Ssbruno		buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
182325618Ssbruno			       &oct->instr_queue[iq_no]->enq_lock);
183325618Ssbruno	if (oct->instr_queue[iq_no]->br == NULL) {
184325618Ssbruno		lio_dev_err(oct, "Critical Failure setting up buf ring\n");
185325618Ssbruno		return (1);
186325618Ssbruno	}
187325618Ssbruno
188325618Ssbruno	return (0);
189325618Ssbruno}
190325618Ssbruno
191325618Ssbrunoint
192325618Ssbrunolio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no)
193325618Ssbruno{
194325618Ssbruno	struct lio_instr_queue		*iq = oct->instr_queue[iq_no];
195325618Ssbruno	struct lio_request_list		*request_buf;
196325618Ssbruno	struct lio_mbuf_free_info	*finfo;
197325618Ssbruno	uint64_t			desc_size = 0, q_size;
198325618Ssbruno	int				i;
199325618Ssbruno
200325618Ssbruno	lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no);
201325618Ssbruno
202325618Ssbruno	if (oct->check_db_tq[iq_no].tq != NULL) {
203325618Ssbruno		while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq,
204325618Ssbruno						&oct->check_db_tq[iq_no].work,
205325618Ssbruno						NULL))
206325618Ssbruno			taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq,
207325618Ssbruno						&oct->check_db_tq[iq_no].work);
208325618Ssbruno		taskqueue_free(oct->check_db_tq[iq_no].tq);
209325618Ssbruno		oct->check_db_tq[iq_no].tq = NULL;
210325618Ssbruno	}
211325618Ssbruno
212325618Ssbruno	if (LIO_CN23XX_PF(oct))
213325618Ssbruno		desc_size =
214325618Ssbruno		    LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf));
215325618Ssbruno
216325618Ssbruno	request_buf = iq->request_list;
217325618Ssbruno	for (i = 0; i < iq->max_count; i++, request_buf++) {
218325618Ssbruno		if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) ||
219325618Ssbruno		    (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) {
220325618Ssbruno			if (request_buf->buf != NULL) {
221325618Ssbruno				finfo = request_buf->buf;
222325618Ssbruno				bus_dmamap_sync(iq->txtag, request_buf->map,
223325618Ssbruno						BUS_DMASYNC_POSTWRITE);
224325618Ssbruno				bus_dmamap_unload(iq->txtag,
225325618Ssbruno						  request_buf->map);
226325618Ssbruno				m_freem(finfo->mb);
227325618Ssbruno				request_buf->buf = NULL;
228325618Ssbruno				if (request_buf->map != NULL) {
229325618Ssbruno					bus_dmamap_destroy(iq->txtag,
230325618Ssbruno							   request_buf->map);
231325618Ssbruno					request_buf->map = NULL;
232325618Ssbruno				}
233325618Ssbruno			} else if (request_buf->map != NULL) {
234325618Ssbruno				bus_dmamap_unload(iq->txtag, request_buf->map);
235325618Ssbruno				bus_dmamap_destroy(iq->txtag, request_buf->map);
236325618Ssbruno				request_buf->map = NULL;
237325618Ssbruno			}
238325618Ssbruno		}
239325618Ssbruno	}
240325618Ssbruno
241325618Ssbruno	if (iq->br != NULL) {
242325618Ssbruno		buf_ring_free(iq->br, M_DEVBUF);
243325618Ssbruno		iq->br = NULL;
244325618Ssbruno	}
245325618Ssbruno
246325618Ssbruno	if (iq->request_list != NULL) {
247325618Ssbruno		free(iq->request_list, M_DEVBUF);
248325618Ssbruno		iq->request_list = NULL;
249325618Ssbruno	}
250325618Ssbruno
251325618Ssbruno	if (iq->txtag != NULL) {
252325618Ssbruno		bus_dma_tag_destroy(iq->txtag);
253325618Ssbruno		iq->txtag = NULL;
254325618Ssbruno	}
255325618Ssbruno
256325618Ssbruno	if (iq->base_addr) {
257325618Ssbruno		q_size = iq->max_count * desc_size;
258325618Ssbruno		lio_dma_free((uint32_t)q_size, iq->base_addr);
259325618Ssbruno
260325618Ssbruno		oct->io_qmask.iq &= ~(1ULL << iq_no);
261325618Ssbruno		bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue));
262325618Ssbruno		oct->num_iqs--;
263325618Ssbruno
264325618Ssbruno		return (0);
265325618Ssbruno	}
266325618Ssbruno
267325618Ssbruno	return (1);
268325618Ssbruno}
269325618Ssbruno
270325618Ssbruno/* Return 0 on success, 1 on failure */
271325618Ssbrunoint
272325618Ssbrunolio_setup_iq(struct octeon_device *oct, int ifidx, int q_index,
273325618Ssbruno	     union octeon_txpciq txpciq, uint32_t num_descs)
274325618Ssbruno{
275325618Ssbruno	uint32_t	iq_no = (uint32_t)txpciq.s.q_no;
276325618Ssbruno
277325618Ssbruno	if (oct->instr_queue[iq_no]->oct_dev != NULL) {
278325618Ssbruno		lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n",
279325618Ssbruno			    iq_no);
280325618Ssbruno		oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
281325618Ssbruno		return (0);
282325618Ssbruno	}
283325618Ssbruno
284325618Ssbruno	oct->instr_queue[iq_no]->q_index = q_index;
285325618Ssbruno	oct->instr_queue[iq_no]->ifidx = ifidx;
286325618Ssbruno
287325618Ssbruno	if (lio_init_instr_queue(oct, txpciq, num_descs)) {
288325618Ssbruno		lio_delete_instr_queue(oct, iq_no);
289325618Ssbruno		return (1);
290325618Ssbruno	}
291325618Ssbruno
292325618Ssbruno	oct->num_iqs++;
293325618Ssbruno	if (oct->fn_list.enable_io_queues(oct))
294325618Ssbruno		return (1);
295325618Ssbruno
296325618Ssbruno	return (0);
297325618Ssbruno}
298325618Ssbruno
299325618Ssbrunoint
300325618Ssbrunolio_wait_for_instr_fetch(struct octeon_device *oct)
301325618Ssbruno{
302325618Ssbruno	int	i, retry = 1000, pending, instr_cnt = 0;
303325618Ssbruno
304325618Ssbruno	do {
305325618Ssbruno		instr_cnt = 0;
306325618Ssbruno
307325618Ssbruno		for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
308325618Ssbruno			if (!(oct->io_qmask.iq & BIT_ULL(i)))
309325618Ssbruno				continue;
310325618Ssbruno			pending = atomic_load_acq_int(
311325618Ssbruno					&oct->instr_queue[i]->instr_pending);
312325618Ssbruno			if (pending)
313325618Ssbruno				__lio_check_db_timeout(oct, i);
314325618Ssbruno			instr_cnt += pending;
315325618Ssbruno		}
316325618Ssbruno
317325618Ssbruno		if (instr_cnt == 0)
318325618Ssbruno			break;
319325618Ssbruno
320325618Ssbruno		lio_sleep_timeout(1);
321325618Ssbruno
322325618Ssbruno	} while (retry-- && instr_cnt);
323325618Ssbruno
324325618Ssbruno	return (instr_cnt);
325325618Ssbruno}
326325618Ssbruno
327325618Ssbrunostatic inline void
328325618Ssbrunolio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq)
329325618Ssbruno{
330325618Ssbruno
331325618Ssbruno	if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) {
332325618Ssbruno		lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt);
333325618Ssbruno		/* make sure doorbell write goes through */
334325618Ssbruno		__compiler_membar();
335325618Ssbruno		iq->fill_cnt = 0;
336325618Ssbruno		iq->last_db_time = ticks;
337325618Ssbruno		return;
338325618Ssbruno	}
339325618Ssbruno}
340325618Ssbruno
341325618Ssbrunostatic inline void
342325618Ssbruno__lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
343325618Ssbruno{
344325618Ssbruno	uint8_t	*iqptr, cmdsize;
345325618Ssbruno
346325618Ssbruno	cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
347325618Ssbruno	iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
348325618Ssbruno
349325618Ssbruno	memcpy(iqptr, cmd, cmdsize);
350325618Ssbruno}
351325618Ssbruno
352325618Ssbrunostatic inline struct lio_iq_post_status
353325618Ssbruno__lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
354325618Ssbruno{
355325618Ssbruno	struct lio_iq_post_status	st;
356325618Ssbruno
357325618Ssbruno	st.status = LIO_IQ_SEND_OK;
358325618Ssbruno
359325618Ssbruno	/*
360325618Ssbruno	 * This ensures that the read index does not wrap around to the same
361325618Ssbruno	 * position if queue gets full before Octeon could fetch any instr.
362325618Ssbruno	 */
363325618Ssbruno	if (atomic_load_acq_int(&iq->instr_pending) >=
364325618Ssbruno	    (int32_t)(iq->max_count - 1)) {
365325618Ssbruno		st.status = LIO_IQ_SEND_FAILED;
366325618Ssbruno		st.index = -1;
367325618Ssbruno		return (st);
368325618Ssbruno	}
369325618Ssbruno
370325618Ssbruno	if (atomic_load_acq_int(&iq->instr_pending) >=
371325618Ssbruno	    (int32_t)(iq->max_count - 2))
372325618Ssbruno		st.status = LIO_IQ_SEND_STOP;
373325618Ssbruno
374325618Ssbruno	__lio_copy_cmd_into_iq(iq, cmd);
375325618Ssbruno
376325618Ssbruno	/* "index" is returned, host_write_index is modified. */
377325618Ssbruno	st.index = iq->host_write_index;
378325618Ssbruno	iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
379325618Ssbruno					      iq->max_count);
380325618Ssbruno	iq->fill_cnt++;
381325618Ssbruno
382325618Ssbruno	/*
383325618Ssbruno	 * Flush the command into memory. We need to be sure the data is in
384325618Ssbruno	 * memory before indicating that the instruction is pending.
385325618Ssbruno	 */
386325618Ssbruno	wmb();
387325618Ssbruno
388325618Ssbruno	atomic_add_int(&iq->instr_pending, 1);
389325618Ssbruno
390325618Ssbruno	return (st);
391325618Ssbruno}
392325618Ssbruno
393325618Ssbrunostatic inline void
394325618Ssbruno__lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf,
395325618Ssbruno			  int reqtype)
396325618Ssbruno{
397325618Ssbruno
398325618Ssbruno	iq->request_list[idx].buf = buf;
399325618Ssbruno	iq->request_list[idx].reqtype = reqtype;
400325618Ssbruno}
401325618Ssbruno
402325618Ssbruno/* Can only run in process context */
403325618Ssbrunoint
404325618Ssbrunolio_process_iq_request_list(struct octeon_device *oct,
405325618Ssbruno			    struct lio_instr_queue *iq, uint32_t budget)
406325618Ssbruno{
407325618Ssbruno	struct lio_soft_command		*sc;
408325618Ssbruno	struct octeon_instr_irh		*irh = NULL;
409325618Ssbruno	struct lio_mbuf_free_info	*finfo;
410325618Ssbruno	void				*buf;
411325618Ssbruno	uint32_t			inst_count = 0;
412325618Ssbruno	uint32_t			old = iq->flush_index;
413325618Ssbruno	int				reqtype;
414325618Ssbruno
415325618Ssbruno	while (old != iq->octeon_read_index) {
416325618Ssbruno		reqtype = iq->request_list[old].reqtype;
417325618Ssbruno		buf = iq->request_list[old].buf;
418325618Ssbruno		finfo = buf;
419325618Ssbruno
420325618Ssbruno		if (reqtype == LIO_REQTYPE_NONE)
421325618Ssbruno			goto skip_this;
422325618Ssbruno
423325618Ssbruno		switch (reqtype) {
424325618Ssbruno		case LIO_REQTYPE_NORESP_NET:
425325618Ssbruno			lio_free_mbuf(iq, buf);
426325618Ssbruno			break;
427325618Ssbruno		case LIO_REQTYPE_NORESP_NET_SG:
428325618Ssbruno			lio_free_sgmbuf(iq, buf);
429325618Ssbruno			break;
430325618Ssbruno		case LIO_REQTYPE_RESP_NET:
431325618Ssbruno		case LIO_REQTYPE_SOFT_COMMAND:
432325618Ssbruno			sc = buf;
433325618Ssbruno			if (LIO_CN23XX_PF(oct))
434325618Ssbruno				irh = (struct octeon_instr_irh *)
435325618Ssbruno					&sc->cmd.cmd3.irh;
436325618Ssbruno			if (irh->rflag) {
437325618Ssbruno				/*
438325618Ssbruno				 * We're expecting a response from Octeon.
439325618Ssbruno				 * It's up to lio_process_ordered_list() to
440325618Ssbruno				 * process  sc. Add sc to the ordered soft
441325618Ssbruno				 * command response list because we expect
442325618Ssbruno				 * a response from Octeon.
443325618Ssbruno				 */
444325618Ssbruno				mtx_lock(&oct->response_list
445325618Ssbruno					 [LIO_ORDERED_SC_LIST].lock);
446325618Ssbruno				atomic_add_int(&oct->response_list
447325618Ssbruno					       [LIO_ORDERED_SC_LIST].
448325618Ssbruno					       pending_req_count, 1);
449325618Ssbruno				STAILQ_INSERT_TAIL(&oct->response_list
450325618Ssbruno						   [LIO_ORDERED_SC_LIST].
451325618Ssbruno						   head, &sc->node, entries);
452325618Ssbruno				mtx_unlock(&oct->response_list
453325618Ssbruno					   [LIO_ORDERED_SC_LIST].lock);
454325618Ssbruno			} else {
455325618Ssbruno				if (sc->callback != NULL) {
456325618Ssbruno					/* This callback must not sleep */
457325618Ssbruno					sc->callback(oct, LIO_REQUEST_DONE,
458325618Ssbruno						     sc->callback_arg);
459325618Ssbruno				}
460325618Ssbruno			}
461325618Ssbruno
462325618Ssbruno			break;
463325618Ssbruno		default:
464325618Ssbruno			lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n",
465325618Ssbruno				    __func__, reqtype, buf, old);
466325618Ssbruno		}
467325618Ssbruno
468325618Ssbruno		iq->request_list[old].buf = NULL;
469325618Ssbruno		iq->request_list[old].reqtype = 0;
470325618Ssbruno
471325618Ssbrunoskip_this:
472325618Ssbruno		inst_count++;
473325618Ssbruno		old = lio_incr_index(old, 1, iq->max_count);
474325618Ssbruno
475325618Ssbruno		if ((budget) && (inst_count >= budget))
476325618Ssbruno			break;
477325618Ssbruno	}
478325618Ssbruno
479325618Ssbruno	iq->flush_index = old;
480325618Ssbruno
481325618Ssbruno	return (inst_count);
482325618Ssbruno}
483325618Ssbruno
484325618Ssbruno/* Can only be called from process context */
485325618Ssbrunoint
486325618Ssbrunolio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq,
487325618Ssbruno	     uint32_t budget)
488325618Ssbruno{
489325618Ssbruno	uint32_t	inst_processed = 0;
490325618Ssbruno	uint32_t	tot_inst_processed = 0;
491325618Ssbruno	int		tx_done = 1;
492325618Ssbruno
493325618Ssbruno	if (!mtx_trylock(&iq->iq_flush_running_lock))
494325618Ssbruno		return (tx_done);
495325618Ssbruno
496325618Ssbruno	mtx_lock(&iq->lock);
497325618Ssbruno
498325618Ssbruno	iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
499325618Ssbruno
500325618Ssbruno	do {
501325618Ssbruno		/* Process any outstanding IQ packets. */
502325618Ssbruno		if (iq->flush_index == iq->octeon_read_index)
503325618Ssbruno			break;
504325618Ssbruno
505325618Ssbruno		if (budget)
506325618Ssbruno			inst_processed =
507325618Ssbruno				lio_process_iq_request_list(oct, iq,
508325618Ssbruno							    budget -
509325618Ssbruno							    tot_inst_processed);
510325618Ssbruno		else
511325618Ssbruno			inst_processed =
512325618Ssbruno				lio_process_iq_request_list(oct, iq, 0);
513325618Ssbruno
514325618Ssbruno		if (inst_processed) {
515325618Ssbruno			atomic_subtract_int(&iq->instr_pending, inst_processed);
516325618Ssbruno			iq->stats.instr_processed += inst_processed;
517325618Ssbruno		}
518325618Ssbruno		tot_inst_processed += inst_processed;
519325618Ssbruno		inst_processed = 0;
520325618Ssbruno
521325618Ssbruno	} while (tot_inst_processed < budget);
522325618Ssbruno
523325618Ssbruno	if (budget && (tot_inst_processed >= budget))
524325618Ssbruno		tx_done = 0;
525325618Ssbruno
526325618Ssbruno	iq->last_db_time = ticks;
527325618Ssbruno
528325618Ssbruno	mtx_unlock(&iq->lock);
529325618Ssbruno
530325618Ssbruno	mtx_unlock(&iq->iq_flush_running_lock);
531325618Ssbruno
532325618Ssbruno	return (tx_done);
533325618Ssbruno}
534325618Ssbruno
535325618Ssbruno/*
536325618Ssbruno * Process instruction queue after timeout.
537325618Ssbruno * This routine gets called from a taskqueue or when removing the module.
538325618Ssbruno */
539325618Ssbrunostatic void
540325618Ssbruno__lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no)
541325618Ssbruno{
542325618Ssbruno	struct lio_instr_queue	*iq;
543325618Ssbruno	uint64_t		next_time;
544325618Ssbruno
545325618Ssbruno	if (oct == NULL)
546325618Ssbruno		return;
547325618Ssbruno
548325618Ssbruno	iq = oct->instr_queue[iq_no];
549325618Ssbruno	if (iq == NULL)
550325618Ssbruno		return;
551325618Ssbruno
552325618Ssbruno	if (atomic_load_acq_int(&iq->instr_pending)) {
553325618Ssbruno		/* If ticks - last_db_time < db_timeout do nothing  */
554325618Ssbruno		next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout);
555325618Ssbruno		if (!lio_check_timeout(ticks, next_time))
556325618Ssbruno			return;
557325618Ssbruno
558325618Ssbruno		iq->last_db_time = ticks;
559325618Ssbruno
560325618Ssbruno		/* Flush the instruction queue */
561325618Ssbruno		lio_flush_iq(oct, iq, 0);
562325618Ssbruno
563325618Ssbruno		lio_enable_irq(NULL, iq);
564325618Ssbruno	}
565325618Ssbruno
566325618Ssbruno	if (oct->props.ifp != NULL && iq->br != NULL) {
567325618Ssbruno		if (mtx_trylock(&iq->enq_lock)) {
568325618Ssbruno			if (!drbr_empty(oct->props.ifp, iq->br))
569325618Ssbruno				lio_mq_start_locked(oct->props.ifp, iq);
570325618Ssbruno
571325618Ssbruno			mtx_unlock(&iq->enq_lock);
572325618Ssbruno		}
573325618Ssbruno	}
574325618Ssbruno}
575325618Ssbruno
576325618Ssbruno/*
577325618Ssbruno * Called by the Poll thread at regular intervals to check the instruction
578325618Ssbruno * queue for commands to be posted and for commands that were fetched by Octeon.
579325618Ssbruno */
580325618Ssbrunostatic void
581325618Ssbrunolio_check_db_timeout(void *arg, int pending)
582325618Ssbruno{
583325618Ssbruno	struct lio_tq		*db_tq = (struct lio_tq *)arg;
584325618Ssbruno	struct octeon_device	*oct = db_tq->ctxptr;
585325618Ssbruno	uint64_t		iq_no = db_tq->ctxul;
586325618Ssbruno	uint32_t		delay = 10;
587325618Ssbruno
588325618Ssbruno	__lio_check_db_timeout(oct, iq_no);
589325618Ssbruno	taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work,
590325618Ssbruno				  lio_ms_to_ticks(delay));
591325618Ssbruno}
592325618Ssbruno
593325618Ssbrunoint
594325618Ssbrunolio_send_command(struct octeon_device *oct, uint32_t iq_no,
595325618Ssbruno		 uint32_t force_db, void *cmd, void *buf,
596325618Ssbruno		 uint32_t datasize, uint32_t reqtype)
597325618Ssbruno{
598325618Ssbruno	struct lio_iq_post_status	st;
599325618Ssbruno	struct lio_instr_queue		*iq = oct->instr_queue[iq_no];
600325618Ssbruno
601325618Ssbruno	/*
602325618Ssbruno	 * Get the lock and prevent other tasks and tx interrupt handler
603325618Ssbruno	 * from running.
604325618Ssbruno	 */
605325618Ssbruno	mtx_lock(&iq->post_lock);
606325618Ssbruno
607325618Ssbruno	st = __lio_post_command2(iq, cmd);
608325618Ssbruno
609325618Ssbruno	if (st.status != LIO_IQ_SEND_FAILED) {
610325618Ssbruno		__lio_add_to_request_list(iq, st.index, buf, reqtype);
611325618Ssbruno		LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
612325618Ssbruno		LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
613325618Ssbruno
614325618Ssbruno		if (force_db || (st.status == LIO_IQ_SEND_STOP))
615325618Ssbruno			lio_ring_doorbell(oct, iq);
616325618Ssbruno	} else {
617325618Ssbruno		LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
618325618Ssbruno	}
619325618Ssbruno
620325618Ssbruno	mtx_unlock(&iq->post_lock);
621325618Ssbruno
622325618Ssbruno	/*
623325618Ssbruno	 * This is only done here to expedite packets being flushed for
624325618Ssbruno	 * cases where there are no IQ completion interrupts.
625325618Ssbruno	 */
626325618Ssbruno
627325618Ssbruno	return (st.status);
628325618Ssbruno}
629325618Ssbruno
630325618Ssbrunovoid
631325618Ssbrunolio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc,
632325618Ssbruno			 uint8_t opcode, uint8_t subcode, uint32_t irh_ossp,
633325618Ssbruno			 uint64_t ossp0, uint64_t ossp1)
634325618Ssbruno{
635325618Ssbruno	struct lio_config		*lio_cfg;
636325618Ssbruno	struct octeon_instr_ih3		*ih3;
637325618Ssbruno	struct octeon_instr_pki_ih3	*pki_ih3;
638325618Ssbruno	struct octeon_instr_irh		*irh;
639325618Ssbruno	struct octeon_instr_rdp		*rdp;
640325618Ssbruno
641325618Ssbruno	KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__));
642325618Ssbruno	KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__));
643325618Ssbruno
644325618Ssbruno	lio_cfg = lio_get_conf(oct);
645325618Ssbruno
646325618Ssbruno	if (LIO_CN23XX_PF(oct)) {
647325618Ssbruno		ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
648325618Ssbruno
649325618Ssbruno		ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
650325618Ssbruno
651325618Ssbruno		pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
652325618Ssbruno
653325618Ssbruno		pki_ih3->w = 1;
654325618Ssbruno		pki_ih3->raw = 1;
655325618Ssbruno		pki_ih3->utag = 1;
656325618Ssbruno		pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
657325618Ssbruno		pki_ih3->utt = 1;
658325618Ssbruno		pki_ih3->tag = LIO_CONTROL;
659325618Ssbruno		pki_ih3->tagtype = LIO_ATOMIC_TAG;
660325618Ssbruno		pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
661325618Ssbruno		pki_ih3->pm = 0x7;
662325618Ssbruno		pki_ih3->sl = 8;
663325618Ssbruno
664325618Ssbruno		if (sc->datasize)
665325618Ssbruno			ih3->dlengsz = sc->datasize;
666325618Ssbruno
667325618Ssbruno		irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
668325618Ssbruno		irh->opcode = opcode;
669325618Ssbruno		irh->subcode = subcode;
670325618Ssbruno
671325618Ssbruno		/* opcode/subcode specific parameters (ossp) */
672325618Ssbruno		irh->ossp = irh_ossp;
673325618Ssbruno		sc->cmd.cmd3.ossp[0] = ossp0;
674325618Ssbruno		sc->cmd.cmd3.ossp[1] = ossp1;
675325618Ssbruno
676325618Ssbruno		if (sc->rdatasize) {
677325618Ssbruno			rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
678325618Ssbruno			rdp->pcie_port = oct->pcie_port;
679325618Ssbruno			rdp->rlen = sc->rdatasize;
680325618Ssbruno
681325618Ssbruno			irh->rflag = 1;
682325618Ssbruno			/* PKI IH3 */
683325618Ssbruno			/* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
684325618Ssbruno			ih3->fsz = LIO_SOFTCMDRESP_IH3;
685325618Ssbruno		} else {
686325618Ssbruno			irh->rflag = 0;
687325618Ssbruno			/* PKI IH3 */
688325618Ssbruno			/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
689325618Ssbruno			ih3->fsz = LIO_PCICMD_O3;
690325618Ssbruno		}
691325618Ssbruno	}
692325618Ssbruno}
693325618Ssbruno
694325618Ssbrunoint
695325618Ssbrunolio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc)
696325618Ssbruno{
697325618Ssbruno	struct octeon_instr_ih3	*ih3;
698325618Ssbruno	struct octeon_instr_irh	*irh;
699325618Ssbruno	uint32_t		len = 0;
700325618Ssbruno
701325618Ssbruno	if (LIO_CN23XX_PF(oct)) {
702325618Ssbruno		ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
703325618Ssbruno		if (ih3->dlengsz) {
704325618Ssbruno			KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL",
705325618Ssbruno					      __func__, __LINE__));
706325618Ssbruno			sc->cmd.cmd3.dptr = sc->dmadptr;
707325618Ssbruno		}
708325618Ssbruno
709325618Ssbruno		irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
710325618Ssbruno		if (irh->rflag) {
711325618Ssbruno			KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL",
712325618Ssbruno					      __func__, __LINE__));
713325618Ssbruno			KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL",
714325618Ssbruno						  __func__, __LINE__));
715325618Ssbruno			*sc->status_word = COMPLETION_WORD_INIT;
716325618Ssbruno			sc->cmd.cmd3.rptr = sc->dmarptr;
717325618Ssbruno		}
718325618Ssbruno		len = (uint32_t)ih3->dlengsz;
719325618Ssbruno	}
720325618Ssbruno	if (sc->wait_time)
721325618Ssbruno		sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time);
722325618Ssbruno
723325618Ssbruno	return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
724325618Ssbruno				 len, LIO_REQTYPE_SOFT_COMMAND));
725325618Ssbruno}
726325618Ssbruno
727325618Ssbrunoint
728325618Ssbrunolio_setup_sc_buffer_pool(struct octeon_device *oct)
729325618Ssbruno{
730325618Ssbruno	struct lio_soft_command	*sc;
731325618Ssbruno	uint64_t		dma_addr;
732325618Ssbruno	int			i;
733325618Ssbruno
734325618Ssbruno	STAILQ_INIT(&oct->sc_buf_pool.head);
735325618Ssbruno	mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF);
736325618Ssbruno	atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0);
737325618Ssbruno
738325618Ssbruno	for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) {
739325618Ssbruno		sc = (struct lio_soft_command *)
740325618Ssbruno			lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr);
741325618Ssbruno		if (sc == NULL) {
742325618Ssbruno			lio_free_sc_buffer_pool(oct);
743325618Ssbruno			return (1);
744325618Ssbruno		}
745325618Ssbruno
746325618Ssbruno		sc->dma_addr = dma_addr;
747325618Ssbruno		sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
748325618Ssbruno
749325618Ssbruno		STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
750325618Ssbruno	}
751325618Ssbruno
752325618Ssbruno	return (0);
753325618Ssbruno}
754325618Ssbruno
755325618Ssbrunoint
756325618Ssbrunolio_free_sc_buffer_pool(struct octeon_device *oct)
757325618Ssbruno{
758325618Ssbruno	struct lio_stailq_node	*tmp, *tmp2;
759325618Ssbruno	struct lio_soft_command	*sc;
760325618Ssbruno
761325618Ssbruno	mtx_lock(&oct->sc_buf_pool.lock);
762325618Ssbruno
763325618Ssbruno	STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) {
764325618Ssbruno		sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head,
765325618Ssbruno					    struct lio_soft_command, node);
766325618Ssbruno
767325618Ssbruno		STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries);
768325618Ssbruno
769325618Ssbruno		lio_dma_free(sc->size, sc);
770325618Ssbruno	}
771325618Ssbruno
772325618Ssbruno	STAILQ_INIT(&oct->sc_buf_pool.head);
773325618Ssbruno
774325618Ssbruno	mtx_unlock(&oct->sc_buf_pool.lock);
775325618Ssbruno
776325618Ssbruno	return (0);
777325618Ssbruno}
778325618Ssbruno
779325618Ssbrunostruct lio_soft_command *
780325618Ssbrunolio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize,
781325618Ssbruno		       uint32_t rdatasize, uint32_t ctxsize)
782325618Ssbruno{
783325618Ssbruno	struct lio_soft_command	*sc = NULL;
784325618Ssbruno	struct lio_stailq_node	*tmp;
785325618Ssbruno	uint64_t		dma_addr;
786325618Ssbruno	uint32_t		size;
787325618Ssbruno	uint32_t		offset = sizeof(struct lio_soft_command);
788325618Ssbruno
789325618Ssbruno	KASSERT((offset + datasize + rdatasize + ctxsize) <=
790325618Ssbruno		LIO_SOFT_COMMAND_BUFFER_SIZE,
791325618Ssbruno		("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE",
792325618Ssbruno		 __func__, __LINE__));
793325618Ssbruno
794325618Ssbruno	mtx_lock(&oct->sc_buf_pool.lock);
795325618Ssbruno
796325618Ssbruno	if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) {
797325618Ssbruno		mtx_unlock(&oct->sc_buf_pool.lock);
798325618Ssbruno		return (NULL);
799325618Ssbruno	}
800325618Ssbruno	tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries);
801325618Ssbruno
802325618Ssbruno	STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries);
803325618Ssbruno
804325618Ssbruno	atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1);
805325618Ssbruno
806325618Ssbruno	mtx_unlock(&oct->sc_buf_pool.lock);
807325618Ssbruno
808325618Ssbruno	sc = (struct lio_soft_command *)tmp;
809325618Ssbruno
810325618Ssbruno	dma_addr = sc->dma_addr;
811325618Ssbruno	size = sc->size;
812325618Ssbruno
813325618Ssbruno	bzero(sc, sc->size);
814325618Ssbruno
815325618Ssbruno	sc->dma_addr = dma_addr;
816325618Ssbruno	sc->size = size;
817325618Ssbruno
818325618Ssbruno	if (ctxsize) {
819325618Ssbruno		sc->ctxptr = (uint8_t *)sc + offset;
820325618Ssbruno		sc->ctxsize = ctxsize;
821325618Ssbruno	}
822325618Ssbruno
823325618Ssbruno	/* Start data at 128 byte boundary */
824325618Ssbruno	offset = (offset + ctxsize + 127) & 0xffffff80;
825325618Ssbruno
826325618Ssbruno	if (datasize) {
827325618Ssbruno		sc->virtdptr = (uint8_t *)sc + offset;
828325618Ssbruno		sc->dmadptr = dma_addr + offset;
829325618Ssbruno		sc->datasize = datasize;
830325618Ssbruno	}
831325618Ssbruno	/* Start rdata at 128 byte boundary */
832325618Ssbruno	offset = (offset + datasize + 127) & 0xffffff80;
833325618Ssbruno
834325618Ssbruno	if (rdatasize) {
835325618Ssbruno		KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__,
836325618Ssbruno					  __LINE__));
837325618Ssbruno		sc->virtrptr = (uint8_t *)sc + offset;
838325618Ssbruno		sc->dmarptr = dma_addr + offset;
839325618Ssbruno		sc->rdatasize = rdatasize;
840325618Ssbruno		sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
841325618Ssbruno					       rdatasize - 8);
842325618Ssbruno	}
843325618Ssbruno	return (sc);
844325618Ssbruno}
845325618Ssbruno
846325618Ssbrunovoid
847325618Ssbrunolio_free_soft_command(struct octeon_device *oct,
848325618Ssbruno		      struct lio_soft_command *sc)
849325618Ssbruno{
850325618Ssbruno
851325618Ssbruno	mtx_lock(&oct->sc_buf_pool.lock);
852325618Ssbruno
853325618Ssbruno	STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries);
854325618Ssbruno
855325618Ssbruno	atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1);
856325618Ssbruno
857325618Ssbruno	mtx_unlock(&oct->sc_buf_pool.lock);
858325618Ssbruno}
859