1/*-
2 * Copyright (c) 2012 The FreeBSD Foundation
3 * Copyright (c) 2015 Chelsio Communications, Inc.
4 * All rights reserved.
5 *
6 * This software was developed by Edward Tomasz Napierala under sponsorship
7 * from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32/*
33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include "opt_inet.h"
40#include "opt_inet6.h"
41
42#ifdef TCP_OFFLOAD
43#include <sys/param.h>
44#include <sys/capsicum.h>
45#include <sys/condvar.h>
46#include <sys/conf.h>
47#include <sys/file.h>
48#include <sys/kernel.h>
49#include <sys/kthread.h>
50#include <sys/ktr.h>
51#include <sys/lock.h>
52#include <sys/mbuf.h>
53#include <sys/mutex.h>
54#include <sys/module.h>
55#include <sys/protosw.h>
56#include <sys/socket.h>
57#include <sys/socketvar.h>
58#include <sys/sysctl.h>
59#include <sys/systm.h>
60#include <sys/sx.h>
61#include <sys/uio.h>
62#include <machine/bus.h>
63#include <vm/uma.h>
64#include <vm/vm.h>
65#include <vm/pmap.h>
66#include <netinet/in.h>
67#include <netinet/in_pcb.h>
68#include <netinet/tcp.h>
69#include <netinet/tcp_var.h>
70#include <netinet/toecore.h>
71
72#include <dev/iscsi/icl.h>
73#include <dev/iscsi/iscsi_proto.h>
74#include <icl_conn_if.h>
75
76#include <cam/scsi/scsi_all.h>
77#include <cam/scsi/scsi_da.h>
78#include <cam/ctl/ctl_io.h>
79#include <cam/ctl/ctl.h>
80#include <cam/ctl/ctl_backend.h>
81#include <cam/ctl/ctl_error.h>
82#include <cam/ctl/ctl_frontend.h>
83#include <cam/ctl/ctl_debug.h>
84#include <cam/ctl/ctl_ha.h>
85#include <cam/ctl/ctl_ioctl.h>
86
87#include <cam/cam.h>
88#include <cam/cam_ccb.h>
89#include <cam/cam_xpt.h>
90#include <cam/cam_debug.h>
91#include <cam/cam_sim.h>
92#include <cam/cam_xpt_sim.h>
93#include <cam/cam_xpt_periph.h>
94#include <cam/cam_periph.h>
95#include <cam/cam_compat.h>
96#include <cam/scsi/scsi_message.h>
97
98#include "common/common.h"
99#include "common/t4_tcb.h"
100#include "tom/t4_tom.h"
101#include "cxgbei.h"
102
103SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
104    "Chelsio iSCSI offload");
105static int coalesce = 1;
106SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, coalesce, CTLFLAG_RWTUN,
107	&coalesce, 0, "Try to coalesce PDUs before sending");
108static int partial_receive_len = 128 * 1024;
109SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN,
110    &partial_receive_len, 0, "Minimum read size for partially received "
111    "data segment");
112static int sendspace = 1048576;
113SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN,
114    &sendspace, 0, "Default send socket buffer size");
115static int recvspace = 1048576;
116SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN,
117    &recvspace, 0, "Default receive socket buffer size");
118
119static uma_zone_t prsv_zone;
120static volatile u_int icl_cxgbei_ncons;
121
122#define ICL_CONN_LOCK(X)		mtx_lock(X->ic_lock)
123#define ICL_CONN_UNLOCK(X)		mtx_unlock(X->ic_lock)
124#define ICL_CONN_LOCK_ASSERT(X)		mtx_assert(X->ic_lock, MA_OWNED)
125#define ICL_CONN_LOCK_ASSERT_NOT(X)	mtx_assert(X->ic_lock, MA_NOTOWNED)
126
127static icl_conn_new_pdu_t	icl_cxgbei_conn_new_pdu;
128static icl_conn_pdu_data_segment_length_t
129				    icl_cxgbei_conn_pdu_data_segment_length;
130static icl_conn_pdu_append_data_t	icl_cxgbei_conn_pdu_append_data;
131static icl_conn_pdu_get_data_t	icl_cxgbei_conn_pdu_get_data;
132static icl_conn_pdu_queue_t	icl_cxgbei_conn_pdu_queue;
133static icl_conn_handoff_t	icl_cxgbei_conn_handoff;
134static icl_conn_free_t		icl_cxgbei_conn_free;
135static icl_conn_close_t		icl_cxgbei_conn_close;
136static icl_conn_task_setup_t	icl_cxgbei_conn_task_setup;
137static icl_conn_task_done_t	icl_cxgbei_conn_task_done;
138static icl_conn_transfer_setup_t	icl_cxgbei_conn_transfer_setup;
139static icl_conn_transfer_done_t	icl_cxgbei_conn_transfer_done;
140
141static kobj_method_t icl_cxgbei_methods[] = {
142	KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu),
143	KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free),
144	KOBJMETHOD(icl_conn_pdu_data_segment_length,
145	    icl_cxgbei_conn_pdu_data_segment_length),
146	KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data),
147	KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data),
148	KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue),
149	KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff),
150	KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free),
151	KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close),
152	KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup),
153	KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done),
154	KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup),
155	KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done),
156	{ 0, 0 }
157};
158
159DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn));
160
161void
162icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
163{
164#ifdef INVARIANTS
165	struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
166#endif
167
168	MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
169	MPASS(ic == ip->ip_conn);
170	MPASS(ip->ip_bhs_mbuf != NULL);
171
172	m_freem(ip->ip_ahs_mbuf);
173	m_freem(ip->ip_data_mbuf);
174	m_freem(ip->ip_bhs_mbuf);	/* storage for icl_cxgbei_pdu itself */
175
176#ifdef DIAGNOSTIC
177	if (__predict_true(ic != NULL))
178		refcount_release(&ic->ic_outstanding_pdus);
179#endif
180}
181
182struct icl_pdu *
183icl_cxgbei_new_pdu(int flags)
184{
185	struct icl_cxgbei_pdu *icp;
186	struct icl_pdu *ip;
187	struct mbuf *m;
188	uintptr_t a;
189
190	m = m_gethdr(flags, MT_DATA);
191	if (__predict_false(m == NULL))
192		return (NULL);
193
194	a = roundup2(mtod(m, uintptr_t), _Alignof(struct icl_cxgbei_pdu));
195	icp = (struct icl_cxgbei_pdu *)a;
196	bzero(icp, sizeof(*icp));
197
198	icp->icp_signature = CXGBEI_PDU_SIGNATURE;
199	ip = &icp->ip;
200	ip->ip_bhs_mbuf = m;
201
202	a = roundup2((uintptr_t)(icp + 1), _Alignof(struct iscsi_bhs *));
203	ip->ip_bhs = (struct iscsi_bhs *)a;
204#ifdef INVARIANTS
205	/* Everything must fit entirely in the mbuf. */
206	a = (uintptr_t)(ip->ip_bhs + 1);
207	MPASS(a <= (uintptr_t)m + MSIZE);
208#endif
209	bzero(ip->ip_bhs, sizeof(*ip->ip_bhs));
210
211	m->m_data = (void *)ip->ip_bhs;
212	m->m_len = sizeof(struct iscsi_bhs);
213	m->m_pkthdr.len = m->m_len;
214
215	return (ip);
216}
217
218void
219icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic)
220{
221
222	ip->ip_conn = ic;
223#ifdef DIAGNOSTIC
224	refcount_acquire(&ic->ic_outstanding_pdus);
225#endif
226}
227
228/*
229 * Allocate icl_pdu with empty BHS to fill up by the caller.
230 */
231static struct icl_pdu *
232icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags)
233{
234	struct icl_pdu *ip;
235
236	ip = icl_cxgbei_new_pdu(flags);
237	if (__predict_false(ip == NULL))
238		return (NULL);
239	icl_cxgbei_new_pdu_set_conn(ip, ic);
240
241	return (ip);
242}
243
244static size_t
245icl_pdu_data_segment_length(const struct icl_pdu *request)
246{
247	uint32_t len = 0;
248
249	len += request->ip_bhs->bhs_data_segment_len[0];
250	len <<= 8;
251	len += request->ip_bhs->bhs_data_segment_len[1];
252	len <<= 8;
253	len += request->ip_bhs->bhs_data_segment_len[2];
254
255	return (len);
256}
257
258size_t
259icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic,
260    const struct icl_pdu *request)
261{
262
263	return (icl_pdu_data_segment_length(request));
264}
265
266static struct mbuf *
267finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
268{
269	struct icl_pdu *ip = &icp->ip;
270	uint8_t ulp_submode, padding;
271	struct mbuf *m, *last;
272	struct iscsi_bhs *bhs;
273
274	/*
275	 * Fix up the data segment mbuf first.
276	 */
277	m = ip->ip_data_mbuf;
278	ulp_submode = icc->ulp_submode;
279	if (m) {
280		last = m_last(m);
281
282		/*
283		 * Round up the data segment to a 4B boundary.  Pad with 0 if
284		 * necessary.  There will definitely be room in the mbuf.
285		 */
286		padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len;
287		if (padding) {
288			bzero(mtod(last, uint8_t *) + last->m_len, padding);
289			last->m_len += padding;
290		}
291	} else {
292		MPASS(ip->ip_data_len == 0);
293		ulp_submode &= ~ULP_CRC_DATA;
294		padding = 0;
295	}
296
297	/*
298	 * Now the header mbuf that has the BHS.
299	 */
300	m = ip->ip_bhs_mbuf;
301	MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs));
302	MPASS(m->m_len == sizeof(struct iscsi_bhs));
303
304	bhs = ip->ip_bhs;
305	bhs->bhs_data_segment_len[2] = ip->ip_data_len;
306	bhs->bhs_data_segment_len[1] = ip->ip_data_len >> 8;
307	bhs->bhs_data_segment_len[0] = ip->ip_data_len >> 16;
308
309	/* "Convert" PDU to mbuf chain.  Do not use icp/ip after this. */
310	m->m_pkthdr.len = sizeof(struct iscsi_bhs) + ip->ip_data_len + padding;
311	m->m_next = ip->ip_data_mbuf;
312	set_mbuf_ulp_submode(m, ulp_submode);
313#ifdef INVARIANTS
314	bzero(icp, sizeof(*icp));
315#endif
316#ifdef DIAGNOSTIC
317	refcount_release(&icc->ic.ic_outstanding_pdus);
318#endif
319
320	return (m);
321}
322
323int
324icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip,
325    const void *addr, size_t len, int flags)
326{
327	struct mbuf *m;
328#ifdef INVARIANTS
329	struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
330#endif
331
332	MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
333	MPASS(ic == ip->ip_conn);
334	KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len));
335
336	m = ip->ip_data_mbuf;
337	if (m == NULL) {
338		m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES);
339		if (__predict_false(m == NULL))
340			return (ENOMEM);
341
342		ip->ip_data_mbuf = m;
343	}
344
345	if (__predict_true(m_append(m, len, addr) != 0)) {
346		ip->ip_data_len += len;
347		MPASS(ip->ip_data_len <= ic->ic_max_data_segment_length);
348		return (0);
349	} else {
350	    	if (flags & M_WAITOK) {
351			CXGBE_UNIMPLEMENTED("fail safe append");
352		}
353		ip->ip_data_len = m_length(m, NULL);
354		return (1);
355	}
356}
357
358void
359icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip,
360    size_t off, void *addr, size_t len)
361{
362	struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
363
364	if (icp->icp_flags & ICPF_RX_DDP)
365		return; /* data is DDP'ed, no need to copy */
366	m_copydata(ip->ip_data_mbuf, off, len, addr);
367}
368
369void
370icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip)
371{
372	struct epoch_tracker et;
373	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
374	struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
375	struct socket *so = ic->ic_socket;
376	struct toepcb *toep = icc->toep;
377	struct inpcb *inp;
378	struct mbuf *m;
379
380	MPASS(ic == ip->ip_conn);
381	MPASS(ip->ip_bhs_mbuf != NULL);
382	/* The kernel doesn't generate PDUs with AHS. */
383	MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0);
384
385	ICL_CONN_LOCK_ASSERT(ic);
386	/* NOTE: sowriteable without so_snd lock is a mostly harmless race. */
387	if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) {
388		icl_cxgbei_conn_pdu_free(ic, ip);
389		return;
390	}
391
392	m = finalize_pdu(icc, icp);
393	M_ASSERTPKTHDR(m);
394	MPASS((m->m_pkthdr.len & 3) == 0);
395
396	/*
397	 * Do not get inp from toep->inp as the toepcb might have detached
398	 * already.
399	 */
400	inp = sotoinpcb(so);
401	CURVNET_SET(toep->vnet);
402	NET_EPOCH_ENTER(et);
403	INP_WLOCK(inp);
404	if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) ||
405	    __predict_false((toep->flags & TPF_ATTACHED) == 0))
406		m_freem(m);
407	else {
408		mbufq_enqueue(&toep->ulp_pduq, m);
409		t4_push_pdus(icc->sc, toep, 0);
410	}
411	INP_WUNLOCK(inp);
412	NET_EPOCH_EXIT(et);
413	CURVNET_RESTORE();
414}
415
416static struct icl_conn *
417icl_cxgbei_new_conn(const char *name, struct mtx *lock)
418{
419	struct icl_cxgbei_conn *icc;
420	struct icl_conn *ic;
421
422	refcount_acquire(&icl_cxgbei_ncons);
423
424	icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE,
425	    M_WAITOK | M_ZERO);
426	icc->icc_signature = CXGBEI_CONN_SIGNATURE;
427	STAILQ_INIT(&icc->rcvd_pdus);
428
429	ic = &icc->ic;
430	ic->ic_lock = lock;
431
432	/* XXXNP: review.  Most of these icl_conn fields aren't really used */
433	STAILQ_INIT(&ic->ic_to_send);
434	cv_init(&ic->ic_send_cv, "icl_cxgbei_tx");
435	cv_init(&ic->ic_receive_cv, "icl_cxgbei_rx");
436#ifdef DIAGNOSTIC
437	refcount_init(&ic->ic_outstanding_pdus, 0);
438#endif
439	/* This is a stop-gap value that will be corrected during handoff. */
440	ic->ic_max_data_segment_length = 16384;
441	ic->ic_name = name;
442	ic->ic_offload = "cxgbei";
443	ic->ic_unmapped = false;
444
445	CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
446
447	return (ic);
448}
449
450void
451icl_cxgbei_conn_free(struct icl_conn *ic)
452{
453	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
454
455	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
456
457	CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
458
459	cv_destroy(&ic->ic_send_cv);
460	cv_destroy(&ic->ic_receive_cv);
461
462	kobj_delete((struct kobj *)icc, M_CXGBE);
463	refcount_release(&icl_cxgbei_ncons);
464}
465
466static int
467icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
468    int rspace)
469{
470	struct sockopt opt;
471	int error, one = 1, ss, rs;
472
473	ss = max(sendspace, sspace);
474	rs = max(recvspace, rspace);
475
476	error = soreserve(so, ss, rs);
477	if (error != 0) {
478		icl_cxgbei_conn_close(ic);
479		return (error);
480	}
481	SOCKBUF_LOCK(&so->so_snd);
482	so->so_snd.sb_flags |= SB_AUTOSIZE;
483	SOCKBUF_UNLOCK(&so->so_snd);
484	SOCKBUF_LOCK(&so->so_rcv);
485	so->so_rcv.sb_flags |= SB_AUTOSIZE;
486	SOCKBUF_UNLOCK(&so->so_rcv);
487
488	/*
489	 * Disable Nagle.
490	 */
491	bzero(&opt, sizeof(opt));
492	opt.sopt_dir = SOPT_SET;
493	opt.sopt_level = IPPROTO_TCP;
494	opt.sopt_name = TCP_NODELAY;
495	opt.sopt_val = &one;
496	opt.sopt_valsize = sizeof(one);
497	error = sosetopt(so, &opt);
498	if (error != 0) {
499		icl_cxgbei_conn_close(ic);
500		return (error);
501	}
502
503	return (0);
504}
505
506/*
507 * Request/response structure used to find out the adapter offloading a socket.
508 */
509struct find_ofld_adapter_rr {
510	struct socket *so;
511	struct adapter *sc;	/* result */
512};
513
514static void
515find_offload_adapter(struct adapter *sc, void *arg)
516{
517	struct find_ofld_adapter_rr *fa = arg;
518	struct socket *so = fa->so;
519	struct tom_data *td = sc->tom_softc;
520	struct tcpcb *tp;
521	struct inpcb *inp;
522
523	/* Non-TCP were filtered out earlier. */
524	MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
525
526	if (fa->sc != NULL)
527		return;	/* Found already. */
528
529	if (td == NULL)
530		return;	/* TOE not enabled on this adapter. */
531
532	inp = sotoinpcb(so);
533	INP_WLOCK(inp);
534	if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
535		tp = intotcpcb(inp);
536		if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
537			fa->sc = sc;	/* Found. */
538	}
539	INP_WUNLOCK(inp);
540}
541
542/* XXXNP: move this to t4_tom. */
543static void
544send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
545{
546	struct wrqe *wr;
547	struct fw_flowc_wr *flowc;
548	const u_int nparams = 1;
549	u_int flowclen;
550	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
551
552	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
553
554	wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
555	if (wr == NULL) {
556		/* XXX */
557		panic("%s: allocation failure.", __func__);
558	}
559	flowc = wrtod(wr);
560	memset(flowc, 0, wr->wr_len);
561
562	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
563	    V_FW_FLOWC_WR_NPARAMS(nparams));
564	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
565	    V_FW_WR_FLOWID(toep->tid));
566
567	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
568	flowc->mnemval[0].val = htobe32(maxlen);
569
570	txsd->tx_credits = howmany(flowclen, 16);
571	txsd->plen = 0;
572	KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
573	    ("%s: not enough credits (%d)", __func__, toep->tx_credits));
574	toep->tx_credits -= txsd->tx_credits;
575	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
576		toep->txsd_pidx = 0;
577	toep->txsd_avail--;
578
579        t4_wrq_tx(sc, wr);
580}
581
582static void
583set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode)
584{
585	uint64_t val;
586
587	CTR3(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, submode=%#x",
588	    __func__, toep->tid, ulp_submode);
589
590	val = V_TCB_ULP_TYPE(ULP_MODE_ISCSI) | V_TCB_ULP_RAW(ulp_submode);
591	t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE,
592	    V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val,
593	    0, 0);
594}
595
596/*
597 * XXXNP: Who is responsible for cleaning up the socket if this returns with an
598 * error?  Review all error paths.
599 *
600 * XXXNP: What happens to the socket's fd reference if the operation is
601 * successful, and how does that affect the socket's life cycle?
602 */
603int
604icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
605{
606	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
607	struct cxgbei_data *ci;
608	struct find_ofld_adapter_rr fa;
609	struct file *fp;
610	struct socket *so;
611	struct inpcb *inp;
612	struct tcpcb *tp;
613	struct toepcb *toep;
614	cap_rights_t rights;
615	int error;
616
617	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
618	ICL_CONN_LOCK_ASSERT_NOT(ic);
619
620	/*
621	 * Steal the socket from userland.
622	 */
623	error = fget(curthread, fd,
624	    cap_rights_init_one(&rights, CAP_SOCK_CLIENT), &fp);
625	if (error != 0)
626		return (error);
627	if (fp->f_type != DTYPE_SOCKET) {
628		fdrop(fp, curthread);
629		return (EINVAL);
630	}
631	so = fp->f_data;
632	if (so->so_type != SOCK_STREAM ||
633	    so->so_proto->pr_protocol != IPPROTO_TCP) {
634		fdrop(fp, curthread);
635		return (EINVAL);
636	}
637
638	ICL_CONN_LOCK(ic);
639	if (ic->ic_socket != NULL) {
640		ICL_CONN_UNLOCK(ic);
641		fdrop(fp, curthread);
642		return (EBUSY);
643	}
644	ic->ic_disconnecting = false;
645	ic->ic_socket = so;
646	fp->f_ops = &badfileops;
647	fp->f_data = NULL;
648	fdrop(fp, curthread);
649	ICL_CONN_UNLOCK(ic);
650
651	/* Find the adapter offloading this socket. */
652	fa.sc = NULL;
653	fa.so = so;
654	t4_iterate(find_offload_adapter, &fa);
655	if (fa.sc == NULL)
656		return (EINVAL);
657	icc->sc = fa.sc;
658	ci = icc->sc->iscsi_ulp_softc;
659
660	inp = sotoinpcb(so);
661	INP_WLOCK(inp);
662	tp = intotcpcb(inp);
663	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))
664		error = EBUSY;
665	else {
666		/*
667		 * socket could not have been "unoffloaded" if here.
668		 */
669		MPASS(tp->t_flags & TF_TOE);
670		MPASS(tp->tod != NULL);
671		MPASS(tp->t_toe != NULL);
672		toep = tp->t_toe;
673		MPASS(toep->vi->adapter == icc->sc);
674		icc->toep = toep;
675		icc->cwt = cxgbei_select_worker_thread(icc);
676
677		/*
678		 * We maintain the _send_ DSL in this field just to have a
679		 * convenient way to assert that the kernel never sends
680		 * oversized PDUs.  This field is otherwise unused in the driver
681		 * or the kernel.
682		 */
683		ic->ic_max_data_segment_length = ci->max_tx_pdu_len -
684		    ISCSI_BHS_SIZE;
685
686		icc->ulp_submode = 0;
687		if (ic->ic_header_crc32c) {
688			icc->ulp_submode |= ULP_CRC_HEADER;
689			ic->ic_max_data_segment_length -=
690			    ISCSI_HEADER_DIGEST_SIZE;
691		}
692		if (ic->ic_data_crc32c) {
693			icc->ulp_submode |= ULP_CRC_DATA;
694			ic->ic_max_data_segment_length -=
695			    ISCSI_DATA_DIGEST_SIZE;
696		}
697		so->so_options |= SO_NO_DDP;
698		toep->params.ulp_mode = ULP_MODE_ISCSI;
699		toep->ulpcb = icc;
700
701		send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len);
702		set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode);
703		error = 0;
704	}
705	INP_WUNLOCK(inp);
706
707	if (error == 0) {
708		error = icl_cxgbei_setsockopt(ic, so, ci->max_tx_pdu_len,
709		    ci->max_rx_pdu_len);
710	}
711
712	return (error);
713}
714
715void
716icl_cxgbei_conn_close(struct icl_conn *ic)
717{
718	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
719	struct icl_pdu *ip;
720	struct socket *so;
721	struct sockbuf *sb;
722	struct inpcb *inp;
723	struct toepcb *toep = icc->toep;
724
725	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
726	ICL_CONN_LOCK_ASSERT_NOT(ic);
727
728	ICL_CONN_LOCK(ic);
729	so = ic->ic_socket;
730	if (ic->ic_disconnecting || so == NULL) {
731		CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p",
732		    __func__, icc, ic->ic_disconnecting, so);
733		ICL_CONN_UNLOCK(ic);
734		return;
735	}
736	ic->ic_disconnecting = true;
737
738	/* These are unused in this driver right now. */
739	MPASS(STAILQ_EMPTY(&ic->ic_to_send));
740	MPASS(ic->ic_receive_pdu == NULL);
741
742#ifdef DIAGNOSTIC
743	KASSERT(ic->ic_outstanding_pdus == 0,
744	    ("destroying session with %d outstanding PDUs",
745	     ic->ic_outstanding_pdus));
746#endif
747	ICL_CONN_UNLOCK(ic);
748
749	CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1,
750	    icc);
751	inp = sotoinpcb(so);
752	sb = &so->so_rcv;
753	INP_WLOCK(inp);
754	if (toep != NULL) {	/* NULL if connection was never offloaded. */
755		toep->ulpcb = NULL;
756		mbufq_drain(&toep->ulp_pduq);
757		SOCKBUF_LOCK(sb);
758		if (icc->rx_flags & RXF_ACTIVE) {
759			volatile u_int *p = &icc->rx_flags;
760
761			SOCKBUF_UNLOCK(sb);
762			INP_WUNLOCK(inp);
763
764			while (*p & RXF_ACTIVE)
765				pause("conclo", 1);
766
767			INP_WLOCK(inp);
768			SOCKBUF_LOCK(sb);
769		}
770
771		while (!STAILQ_EMPTY(&icc->rcvd_pdus)) {
772			ip = STAILQ_FIRST(&icc->rcvd_pdus);
773			STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next);
774			icl_cxgbei_conn_pdu_free(ic, ip);
775		}
776		SOCKBUF_UNLOCK(sb);
777	}
778	INP_WUNLOCK(inp);
779
780	ICL_CONN_LOCK(ic);
781	ic->ic_socket = NULL;
782	ICL_CONN_UNLOCK(ic);
783
784	/*
785	 * XXXNP: we should send RST instead of FIN when PDUs held in various
786	 * queues were purged instead of delivered reliably but soabort isn't
787	 * really general purpose and wouldn't do the right thing here.
788	 */
789	soclose(so);
790}
791
792int
793icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip,
794    struct ccb_scsiio *csio, uint32_t *ittp, void **arg)
795{
796	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
797	struct toepcb *toep = icc->toep;
798	struct adapter *sc = icc->sc;
799	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
800	struct ppod_region *pr = &ci->pr;
801	struct ppod_reservation *prsv;
802	uint32_t itt;
803	int rc = 0;
804
805	/* This is for the offload driver's state.  Must not be set already. */
806	MPASS(arg != NULL);
807	MPASS(*arg == NULL);
808
809	if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN ||
810	    csio->dxfer_len < ci->ddp_threshold) {
811no_ddp:
812		/*
813		 * No DDP for this I/O.  Allocate an ITT (based on the one
814		 * passed in) that cannot be a valid hardware DDP tag in the
815		 * iSCSI region.
816		 */
817		itt = *ittp & M_PPOD_TAG;
818		itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit;
819		*ittp = htobe32(itt);
820		MPASS(*arg == NULL);	/* State is maintained for DDP only. */
821		if (rc != 0)
822			counter_u64_add(ci->ddp_setup_error, 1);
823		return (0);
824	}
825
826	/*
827	 * Reserve resources for DDP, update the itt that should be used in the
828	 * PDU, and save DDP specific state for this I/O in *arg.
829	 */
830
831	prsv = uma_zalloc(prsv_zone, M_NOWAIT);
832	if (prsv == NULL) {
833		rc = ENOMEM;
834		goto no_ddp;
835	}
836
837	/* XXX add support for all CAM_DATA_ types */
838	MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR);
839	rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr,
840	    csio->dxfer_len, prsv);
841	if (rc != 0) {
842		uma_zfree(prsv_zone, prsv);
843		goto no_ddp;
844	}
845
846	rc = t4_write_page_pods_for_buf(sc, &toep->ofld_txq->wrq, toep->tid,
847	    prsv, (vm_offset_t)csio->data_ptr, csio->dxfer_len);
848	if (rc != 0) {
849		t4_free_page_pods(prsv);
850		uma_zfree(prsv_zone, prsv);
851		goto no_ddp;
852	}
853
854	*ittp = htobe32(prsv->prsv_tag);
855	*arg = prsv;
856	counter_u64_add(ci->ddp_setup_ok, 1);
857	return (0);
858}
859
860void
861icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg)
862{
863
864	if (arg != NULL) {
865		struct ppod_reservation *prsv = arg;
866
867		t4_free_page_pods(prsv);
868		uma_zfree(prsv_zone, prsv);
869	}
870}
871
872/* XXXNP: PDU should be passed in as parameter, like on the initiator. */
873#define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr)
874#define io_to_ppod_reservation(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr)
875
876int
877icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
878    uint32_t *tttp, void **arg)
879{
880	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
881	struct toepcb *toep = icc->toep;
882	struct ctl_scsiio *ctsio = &io->scsiio;
883	struct adapter *sc = icc->sc;
884	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
885	struct ppod_region *pr = &ci->pr;
886	struct ppod_reservation *prsv;
887	uint32_t ttt;
888	int xferlen, rc = 0, alias;
889
890	/* This is for the offload driver's state.  Must not be set already. */
891	MPASS(arg != NULL);
892	MPASS(*arg == NULL);
893
894	if (ctsio->ext_data_filled == 0) {
895		int first_burst;
896		struct icl_pdu *ip = io_to_request_pdu(io);
897		vm_offset_t buf;
898#ifdef INVARIANTS
899		struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
900
901		MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
902		MPASS(ic == ip->ip_conn);
903		MPASS(ip->ip_bhs_mbuf != NULL);
904#endif
905		first_burst = icl_pdu_data_segment_length(ip);
906
907		/*
908		 * Note that ICL calls conn_transfer_setup even if the first
909		 * burst had everything and there's nothing left to transfer.
910		 */
911		MPASS(ctsio->kern_data_len >= first_burst);
912		xferlen = ctsio->kern_data_len;
913		if (xferlen - first_burst < ci->ddp_threshold) {
914no_ddp:
915			/*
916			 * No DDP for this transfer.  Allocate a TTT (based on
917			 * the one passed in) that cannot be a valid hardware
918			 * DDP tag in the iSCSI region.
919			 */
920			ttt = *tttp & M_PPOD_TAG;
921			ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit;
922			*tttp = htobe32(ttt);
923			MPASS(io_to_ppod_reservation(io) == NULL);
924			if (rc != 0)
925				counter_u64_add(ci->ddp_setup_error, 1);
926			return (0);
927		}
928
929		if (ctsio->kern_sg_entries == 0)
930			buf = (vm_offset_t)ctsio->kern_data_ptr;
931		else if (ctsio->kern_sg_entries == 1) {
932			struct ctl_sg_entry *sgl = (void *)ctsio->kern_data_ptr;
933
934			MPASS(sgl->len == xferlen);
935			buf = (vm_offset_t)sgl->addr;
936		} else {
937			rc = EAGAIN;	/* XXX implement */
938			goto no_ddp;
939		}
940
941
942		/*
943		 * Reserve resources for DDP, update the ttt that should be used
944		 * in the PDU, and save DDP specific state for this I/O.
945		 */
946
947		MPASS(io_to_ppod_reservation(io) == NULL);
948		prsv = uma_zalloc(prsv_zone, M_NOWAIT);
949		if (prsv == NULL) {
950			rc = ENOMEM;
951			goto no_ddp;
952		}
953
954		rc = t4_alloc_page_pods_for_buf(pr, buf, xferlen, prsv);
955		if (rc != 0) {
956			uma_zfree(prsv_zone, prsv);
957			goto no_ddp;
958		}
959
960		rc = t4_write_page_pods_for_buf(sc, &toep->ofld_txq->wrq,
961		    toep->tid, prsv, buf, xferlen);
962		if (rc != 0) {
963			t4_free_page_pods(prsv);
964			uma_zfree(prsv_zone, prsv);
965			goto no_ddp;
966		}
967
968		*tttp = htobe32(prsv->prsv_tag);
969		io_to_ppod_reservation(io) = prsv;
970		*arg = ctsio;
971		counter_u64_add(ci->ddp_setup_ok, 1);
972		return (0);
973	}
974
975	/*
976	 * In the middle of an I/O.  A non-NULL page pod reservation indicates
977	 * that a DDP buffer is being used for the I/O.
978	 */
979
980	prsv = io_to_ppod_reservation(ctsio);
981	if (prsv == NULL)
982		goto no_ddp;
983
984	alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift;
985	alias++;
986	prsv->prsv_tag &= ~pr->pr_alias_mask;
987	prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask;
988
989	*tttp = htobe32(prsv->prsv_tag);
990	*arg = ctsio;
991
992	return (0);
993}
994
995void
996icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg)
997{
998	struct ctl_scsiio *ctsio = arg;
999
1000	if (ctsio != NULL && ctsio->kern_data_len == ctsio->ext_data_filled) {
1001		struct ppod_reservation *prsv;
1002
1003		prsv = io_to_ppod_reservation(ctsio);
1004		MPASS(prsv != NULL);
1005
1006		t4_free_page_pods(prsv);
1007		uma_zfree(prsv_zone, prsv);
1008	}
1009}
1010
1011static void
1012cxgbei_limits(struct adapter *sc, void *arg)
1013{
1014	struct icl_drv_limits *idl = arg;
1015	struct cxgbei_data *ci;
1016	int max_dsl;
1017
1018	if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0)
1019		return;
1020
1021	if (uld_active(sc, ULD_ISCSI)) {
1022		ci = sc->iscsi_ulp_softc;
1023		MPASS(ci != NULL);
1024
1025		/*
1026		 * AHS is not supported by the kernel so we'll not account for
1027		 * it either in our PDU len -> data segment len conversions.
1028		 */
1029
1030		max_dsl = ci->max_rx_pdu_len - ISCSI_BHS_SIZE -
1031		    ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE;
1032		if (idl->idl_max_recv_data_segment_length > max_dsl)
1033			idl->idl_max_recv_data_segment_length = max_dsl;
1034
1035		max_dsl = ci->max_tx_pdu_len - ISCSI_BHS_SIZE -
1036		    ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE;
1037		if (idl->idl_max_send_data_segment_length > max_dsl)
1038			idl->idl_max_send_data_segment_length = max_dsl;
1039	}
1040
1041	end_synchronized_op(sc, LOCK_HELD);
1042}
1043
1044static int
1045icl_cxgbei_limits(struct icl_drv_limits *idl)
1046{
1047
1048	/* Maximum allowed by the RFC.  cxgbei_limits will clip them. */
1049	idl->idl_max_recv_data_segment_length = (1 << 24) - 1;
1050	idl->idl_max_send_data_segment_length = (1 << 24) - 1;
1051
1052	/* These are somewhat arbitrary. */
1053	idl->idl_max_burst_length = 2 * 1024 * 1024;
1054	idl->idl_first_burst_length = 8192;
1055
1056	t4_iterate(cxgbei_limits, idl);
1057
1058	return (0);
1059}
1060
1061int
1062icl_cxgbei_mod_load(void)
1063{
1064	int rc;
1065
1066	/*
1067	 * Space to track pagepod reservations.
1068	 */
1069	prsv_zone = uma_zcreate("Pagepod reservations",
1070	    sizeof(struct ppod_reservation), NULL, NULL, NULL, NULL,
1071	    UMA_ALIGN_CACHE, 0);
1072
1073	refcount_init(&icl_cxgbei_ncons, 0);
1074
1075	rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits,
1076	    icl_cxgbei_new_conn);
1077
1078	return (rc);
1079}
1080
1081int
1082icl_cxgbei_mod_unload(void)
1083{
1084
1085	if (icl_cxgbei_ncons != 0)
1086		return (EBUSY);
1087
1088	icl_unregister("cxgbei", false);
1089
1090	uma_zdestroy(prsv_zone);
1091
1092	return (0);
1093}
1094#endif
1095