Deleted Added
full compact
t4_tom.c (237263) t4_tom.c (239344)
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/tom/t4_tom.c 237263 2012-06-19 07:34:13Z np $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/tom/t4_tom.c 239344 2012-08-17 00:49:29Z np $");
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/types.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/module.h>
39#include <sys/protosw.h>
40#include <sys/domain.h>
41#include <sys/socket.h>
42#include <sys/socketvar.h>
43#include <netinet/in.h>
44#include <netinet/in_pcb.h>
45#include <netinet/ip.h>
46#include <netinet/tcp_var.h>
47#define TCPSTATES
48#include <netinet/tcp_fsm.h>
49#include <netinet/toecore.h>
50
51#ifdef TCP_OFFLOAD
52#include "common/common.h"
53#include "common/t4_msg.h"
54#include "common/t4_regs.h"
55#include "tom/t4_tom_l2t.h"
56#include "tom/t4_tom.h"
57
30
31#include "opt_inet.h"
32
33#include <sys/param.h>
34#include <sys/types.h>
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/module.h>
39#include <sys/protosw.h>
40#include <sys/domain.h>
41#include <sys/socket.h>
42#include <sys/socketvar.h>
43#include <netinet/in.h>
44#include <netinet/in_pcb.h>
45#include <netinet/ip.h>
46#include <netinet/tcp_var.h>
47#define TCPSTATES
48#include <netinet/tcp_fsm.h>
49#include <netinet/toecore.h>
50
51#ifdef TCP_OFFLOAD
52#include "common/common.h"
53#include "common/t4_msg.h"
54#include "common/t4_regs.h"
55#include "tom/t4_tom_l2t.h"
56#include "tom/t4_tom.h"
57
58static struct protosw ddp_protosw;
59static struct pr_usrreqs ddp_usrreqs;
60
58/* Module ops */
59static int t4_tom_mod_load(void);
60static int t4_tom_mod_unload(void);
61static int t4_tom_modevent(module_t, int, void *);
62
63/* ULD ops and helpers */
64static int t4_tom_activate(struct adapter *);
65static int t4_tom_deactivate(struct adapter *);
66
67static struct uld_info tom_uld_info = {
68 .uld_id = ULD_TOM,
69 .activate = t4_tom_activate,
70 .deactivate = t4_tom_deactivate,
71};
72
73static void queue_tid_release(struct adapter *, int);
74static void release_offload_resources(struct toepcb *);
75static int alloc_tid_tabs(struct tid_info *);
76static void free_tid_tabs(struct tid_info *);
77static void free_tom_data(struct adapter *, struct tom_data *);
78
79struct toepcb *
80alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
81{
82 struct adapter *sc = pi->adapter;
83 struct toepcb *toep;
84 int tx_credits, txsd_total, len;
85
86 /*
87 * The firmware counts tx work request credits in units of 16 bytes
88 * each. Reserve room for an ABORT_REQ so the driver never has to worry
89 * about tx credits if it wants to abort a connection.
90 */
91 tx_credits = sc->params.ofldq_wr_cred;
92 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
93
94 /*
95 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
96 * immediate payload, and firmware counts tx work request credits in
97 * units of 16 byte. Calculate the maximum work requests possible.
98 */
99 txsd_total = tx_credits /
100 howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16);
101
102 if (txqid < 0)
103 txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
104 KASSERT(txqid >= pi->first_ofld_txq &&
105 txqid < pi->first_ofld_txq + pi->nofldtxq,
106 ("%s: txqid %d for port %p (first %d, n %d)", __func__, txqid, pi,
107 pi->first_ofld_txq, pi->nofldtxq));
108
109 if (rxqid < 0)
110 rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
111 KASSERT(rxqid >= pi->first_ofld_rxq &&
112 rxqid < pi->first_ofld_rxq + pi->nofldrxq,
113 ("%s: rxqid %d for port %p (first %d, n %d)", __func__, rxqid, pi,
114 pi->first_ofld_rxq, pi->nofldrxq));
115
116 len = offsetof(struct toepcb, txsd) +
117 txsd_total * sizeof(struct ofld_tx_sdesc);
118
119 toep = malloc(len, M_CXGBE, M_ZERO | flags);
120 if (toep == NULL)
121 return (NULL);
122
123 toep->td = sc->tom_softc;
124 toep->port = pi;
125 toep->tx_credits = tx_credits;
126 toep->ofld_txq = &sc->sge.ofld_txq[txqid];
127 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid];
128 toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
129 toep->txsd_total = txsd_total;
130 toep->txsd_avail = txsd_total;
131 toep->txsd_pidx = 0;
132 toep->txsd_cidx = 0;
133
134 return (toep);
135}
136
137void
138free_toepcb(struct toepcb *toep)
139{
140
141 KASSERT(toepcb_flag(toep, TPF_ATTACHED) == 0,
142 ("%s: attached to an inpcb", __func__));
143 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING) == 0,
144 ("%s: CPL pending", __func__));
145
146 free(toep, M_CXGBE);
147}
148
149/*
150 * Set up the socket for TCP offload.
151 */
152void
153offload_socket(struct socket *so, struct toepcb *toep)
154{
155 struct tom_data *td = toep->td;
156 struct inpcb *inp = sotoinpcb(so);
157 struct tcpcb *tp = intotcpcb(inp);
158 struct sockbuf *sb;
159
160 INP_WLOCK_ASSERT(inp);
161
162 /* Update socket */
163 sb = &so->so_snd;
164 SOCKBUF_LOCK(sb);
165 sb->sb_flags |= SB_NOCOALESCE;
166 SOCKBUF_UNLOCK(sb);
167 sb = &so->so_rcv;
168 SOCKBUF_LOCK(sb);
169 sb->sb_flags |= SB_NOCOALESCE;
61/* Module ops */
62static int t4_tom_mod_load(void);
63static int t4_tom_mod_unload(void);
64static int t4_tom_modevent(module_t, int, void *);
65
66/* ULD ops and helpers */
67static int t4_tom_activate(struct adapter *);
68static int t4_tom_deactivate(struct adapter *);
69
70static struct uld_info tom_uld_info = {
71 .uld_id = ULD_TOM,
72 .activate = t4_tom_activate,
73 .deactivate = t4_tom_deactivate,
74};
75
76static void queue_tid_release(struct adapter *, int);
77static void release_offload_resources(struct toepcb *);
78static int alloc_tid_tabs(struct tid_info *);
79static void free_tid_tabs(struct tid_info *);
80static void free_tom_data(struct adapter *, struct tom_data *);
81
82struct toepcb *
83alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
84{
85 struct adapter *sc = pi->adapter;
86 struct toepcb *toep;
87 int tx_credits, txsd_total, len;
88
89 /*
90 * The firmware counts tx work request credits in units of 16 bytes
91 * each. Reserve room for an ABORT_REQ so the driver never has to worry
92 * about tx credits if it wants to abort a connection.
93 */
94 tx_credits = sc->params.ofldq_wr_cred;
95 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
96
97 /*
98 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
99 * immediate payload, and firmware counts tx work request credits in
100 * units of 16 byte. Calculate the maximum work requests possible.
101 */
102 txsd_total = tx_credits /
103 howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16);
104
105 if (txqid < 0)
106 txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
107 KASSERT(txqid >= pi->first_ofld_txq &&
108 txqid < pi->first_ofld_txq + pi->nofldtxq,
109 ("%s: txqid %d for port %p (first %d, n %d)", __func__, txqid, pi,
110 pi->first_ofld_txq, pi->nofldtxq));
111
112 if (rxqid < 0)
113 rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
114 KASSERT(rxqid >= pi->first_ofld_rxq &&
115 rxqid < pi->first_ofld_rxq + pi->nofldrxq,
116 ("%s: rxqid %d for port %p (first %d, n %d)", __func__, rxqid, pi,
117 pi->first_ofld_rxq, pi->nofldrxq));
118
119 len = offsetof(struct toepcb, txsd) +
120 txsd_total * sizeof(struct ofld_tx_sdesc);
121
122 toep = malloc(len, M_CXGBE, M_ZERO | flags);
123 if (toep == NULL)
124 return (NULL);
125
126 toep->td = sc->tom_softc;
127 toep->port = pi;
128 toep->tx_credits = tx_credits;
129 toep->ofld_txq = &sc->sge.ofld_txq[txqid];
130 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid];
131 toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
132 toep->txsd_total = txsd_total;
133 toep->txsd_avail = txsd_total;
134 toep->txsd_pidx = 0;
135 toep->txsd_cidx = 0;
136
137 return (toep);
138}
139
140void
141free_toepcb(struct toepcb *toep)
142{
143
144 KASSERT(toepcb_flag(toep, TPF_ATTACHED) == 0,
145 ("%s: attached to an inpcb", __func__));
146 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING) == 0,
147 ("%s: CPL pending", __func__));
148
149 free(toep, M_CXGBE);
150}
151
152/*
153 * Set up the socket for TCP offload.
154 */
155void
156offload_socket(struct socket *so, struct toepcb *toep)
157{
158 struct tom_data *td = toep->td;
159 struct inpcb *inp = sotoinpcb(so);
160 struct tcpcb *tp = intotcpcb(inp);
161 struct sockbuf *sb;
162
163 INP_WLOCK_ASSERT(inp);
164
165 /* Update socket */
166 sb = &so->so_snd;
167 SOCKBUF_LOCK(sb);
168 sb->sb_flags |= SB_NOCOALESCE;
169 SOCKBUF_UNLOCK(sb);
170 sb = &so->so_rcv;
171 SOCKBUF_LOCK(sb);
172 sb->sb_flags |= SB_NOCOALESCE;
173 if (toep->ulp_mode == ULP_MODE_TCPDDP)
174 so->so_proto = &ddp_protosw;
170 SOCKBUF_UNLOCK(sb);
171
172 /* Update TCP PCB */
173 tp->tod = &td->tod;
174 tp->t_toe = toep;
175 tp->t_flags |= TF_TOE;
176
177 /* Install an extra hold on inp */
178 toep->inp = inp;
179 toepcb_set_flag(toep, TPF_ATTACHED);
180 in_pcbref(inp);
181
182 /* Add the TOE PCB to the active list */
183 mtx_lock(&td->toep_list_lock);
184 TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
185 mtx_unlock(&td->toep_list_lock);
186}
187
188/* This is _not_ the normal way to "unoffload" a socket. */
189void
190undo_offload_socket(struct socket *so)
191{
192 struct inpcb *inp = sotoinpcb(so);
193 struct tcpcb *tp = intotcpcb(inp);
194 struct toepcb *toep = tp->t_toe;
195 struct tom_data *td = toep->td;
196 struct sockbuf *sb;
197
198 INP_WLOCK_ASSERT(inp);
199
200 sb = &so->so_snd;
201 SOCKBUF_LOCK(sb);
202 sb->sb_flags &= ~SB_NOCOALESCE;
203 SOCKBUF_UNLOCK(sb);
204 sb = &so->so_rcv;
205 SOCKBUF_LOCK(sb);
206 sb->sb_flags &= ~SB_NOCOALESCE;
207 SOCKBUF_UNLOCK(sb);
208
209 tp->tod = NULL;
210 tp->t_toe = NULL;
211 tp->t_flags &= ~TF_TOE;
212
213 toep->inp = NULL;
214 toepcb_clr_flag(toep, TPF_ATTACHED);
215 if (in_pcbrele_wlocked(inp))
216 panic("%s: inp freed.", __func__);
217
218 mtx_lock(&td->toep_list_lock);
219 TAILQ_REMOVE(&td->toep_list, toep, link);
220 mtx_unlock(&td->toep_list_lock);
221}
222
223static void
224release_offload_resources(struct toepcb *toep)
225{
226 struct tom_data *td = toep->td;
227 struct adapter *sc = td_adapter(td);
228 int tid = toep->tid;
229
230 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING) == 0,
231 ("%s: %p has CPL pending.", __func__, toep));
232 KASSERT(toepcb_flag(toep, TPF_ATTACHED) == 0,
233 ("%s: %p is still attached.", __func__, toep));
234
235 CTR4(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p)",
236 __func__, toep, tid, toep->l2te);
237
175 SOCKBUF_UNLOCK(sb);
176
177 /* Update TCP PCB */
178 tp->tod = &td->tod;
179 tp->t_toe = toep;
180 tp->t_flags |= TF_TOE;
181
182 /* Install an extra hold on inp */
183 toep->inp = inp;
184 toepcb_set_flag(toep, TPF_ATTACHED);
185 in_pcbref(inp);
186
187 /* Add the TOE PCB to the active list */
188 mtx_lock(&td->toep_list_lock);
189 TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
190 mtx_unlock(&td->toep_list_lock);
191}
192
193/* This is _not_ the normal way to "unoffload" a socket. */
194void
195undo_offload_socket(struct socket *so)
196{
197 struct inpcb *inp = sotoinpcb(so);
198 struct tcpcb *tp = intotcpcb(inp);
199 struct toepcb *toep = tp->t_toe;
200 struct tom_data *td = toep->td;
201 struct sockbuf *sb;
202
203 INP_WLOCK_ASSERT(inp);
204
205 sb = &so->so_snd;
206 SOCKBUF_LOCK(sb);
207 sb->sb_flags &= ~SB_NOCOALESCE;
208 SOCKBUF_UNLOCK(sb);
209 sb = &so->so_rcv;
210 SOCKBUF_LOCK(sb);
211 sb->sb_flags &= ~SB_NOCOALESCE;
212 SOCKBUF_UNLOCK(sb);
213
214 tp->tod = NULL;
215 tp->t_toe = NULL;
216 tp->t_flags &= ~TF_TOE;
217
218 toep->inp = NULL;
219 toepcb_clr_flag(toep, TPF_ATTACHED);
220 if (in_pcbrele_wlocked(inp))
221 panic("%s: inp freed.", __func__);
222
223 mtx_lock(&td->toep_list_lock);
224 TAILQ_REMOVE(&td->toep_list, toep, link);
225 mtx_unlock(&td->toep_list_lock);
226}
227
228static void
229release_offload_resources(struct toepcb *toep)
230{
231 struct tom_data *td = toep->td;
232 struct adapter *sc = td_adapter(td);
233 int tid = toep->tid;
234
235 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING) == 0,
236 ("%s: %p has CPL pending.", __func__, toep));
237 KASSERT(toepcb_flag(toep, TPF_ATTACHED) == 0,
238 ("%s: %p is still attached.", __func__, toep));
239
240 CTR4(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p)",
241 __func__, toep, tid, toep->l2te);
242
243 if (toep->ulp_mode == ULP_MODE_TCPDDP)
244 release_ddp_resources(toep);
245
238 if (toep->l2te)
239 t4_l2t_release(toep->l2te);
240
241 if (tid >= 0) {
242 remove_tid(sc, tid);
243 release_tid(sc, tid, toep->ctrlq);
244 }
245
246 mtx_lock(&td->toep_list_lock);
247 TAILQ_REMOVE(&td->toep_list, toep, link);
248 mtx_unlock(&td->toep_list_lock);
249
250 free_toepcb(toep);
251}
252
253/*
254 * The kernel is done with the TCP PCB and this is our opportunity to unhook the
255 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no
256 * pending CPL) then it is time to release all resources tied to the toepcb.
257 *
258 * Also gets called when an offloaded active open fails and the TOM wants the
259 * kernel to take the TCP PCB back.
260 */
261static void
262t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
263{
264#if defined(KTR) || defined(INVARIANTS)
265 struct inpcb *inp = tp->t_inpcb;
266#endif
267 struct toepcb *toep = tp->t_toe;
268
269 INP_WLOCK_ASSERT(inp);
270
271 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
272 KASSERT(toepcb_flag(toep, TPF_ATTACHED),
273 ("%s: not attached", __func__));
274
275#ifdef KTR
276 if (tp->t_state == TCPS_SYN_SENT) {
277 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
278 __func__, toep->tid, toep, toep->flags, inp,
279 inp->inp_flags);
280 } else {
281 CTR6(KTR_CXGBE,
282 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
283 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
284 inp->inp_flags);
285 }
286#endif
287
288 tp->t_toe = NULL;
289 tp->t_flags &= ~TF_TOE;
290 toepcb_clr_flag(toep, TPF_ATTACHED);
291
292 if (toepcb_flag(toep, TPF_CPL_PENDING) == 0)
293 release_offload_resources(toep);
294}
295
296/*
297 * The TOE driver will not receive any more CPLs for the tid associated with the
298 * toepcb; release the hold on the inpcb.
299 */
300void
301final_cpl_received(struct toepcb *toep)
302{
303 struct inpcb *inp = toep->inp;
304
305 KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
306 INP_WLOCK_ASSERT(inp);
307 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING),
308 ("%s: CPL not pending already?", __func__));
309
310 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
311 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
312
313 toep->inp = NULL;
314 toepcb_clr_flag(toep, TPF_CPL_PENDING);
315
316 if (toepcb_flag(toep, TPF_ATTACHED) == 0)
317 release_offload_resources(toep);
318
319 if (!in_pcbrele_wlocked(inp))
320 INP_WUNLOCK(inp);
321}
322
323void
324insert_tid(struct adapter *sc, int tid, void *ctx)
325{
326 struct tid_info *t = &sc->tids;
327
328 t->tid_tab[tid] = ctx;
329 atomic_add_int(&t->tids_in_use, 1);
330}
331
332void *
333lookup_tid(struct adapter *sc, int tid)
334{
335 struct tid_info *t = &sc->tids;
336
337 return (t->tid_tab[tid]);
338}
339
340void
341update_tid(struct adapter *sc, int tid, void *ctx)
342{
343 struct tid_info *t = &sc->tids;
344
345 t->tid_tab[tid] = ctx;
346}
347
348void
349remove_tid(struct adapter *sc, int tid)
350{
351 struct tid_info *t = &sc->tids;
352
353 t->tid_tab[tid] = NULL;
354 atomic_subtract_int(&t->tids_in_use, 1);
355}
356
357void
358release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
359{
360 struct wrqe *wr;
361 struct cpl_tid_release *req;
362
363 wr = alloc_wrqe(sizeof(*req), ctrlq);
364 if (wr == NULL) {
365 queue_tid_release(sc, tid); /* defer */
366 return;
367 }
368 req = wrtod(wr);
369
370 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
371
372 t4_wrq_tx(sc, wr);
373}
374
375static void
376queue_tid_release(struct adapter *sc, int tid)
377{
378
379 CXGBE_UNIMPLEMENTED("deferred tid release");
380}
381
382/*
383 * What mtu_idx to use, given a 4-tuple and/or an MSS cap
384 */
385int
386find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss)
387{
388 unsigned short *mtus = &sc->params.mtus[0];
389 int i = 0, mss;
390
391 KASSERT(inc != NULL || pmss > 0,
392 ("%s: at least one of inc/pmss must be specified", __func__));
393
394 mss = inc ? tcp_mssopt(inc) : pmss;
395 if (pmss > 0 && mss > pmss)
396 mss = pmss;
397
398 while (i < NMTUS - 1 && mtus[i + 1] <= mss + 40)
399 ++i;
400
401 return (i);
402}
403
404/*
405 * Determine the receive window size for a socket.
406 */
407u_long
408select_rcv_wnd(struct socket *so)
409{
410 unsigned long wnd;
411
412 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
413
414 wnd = sbspace(&so->so_rcv);
415 if (wnd < MIN_RCV_WND)
416 wnd = MIN_RCV_WND;
417
418 return min(wnd, MAX_RCV_WND);
419}
420
421int
422select_rcv_wscale(void)
423{
424 int wscale = 0;
425 unsigned long space = sb_max;
426
427 if (space > MAX_RCV_WND)
428 space = MAX_RCV_WND;
429
430 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
431 wscale++;
432
433 return (wscale);
434}
435
436extern int always_keepalive;
437#define VIID_SMACIDX(v) (((unsigned int)(v) & 0x7f) << 1)
438
439/*
440 * socket so could be a listening socket too.
441 */
442uint64_t
443calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e,
444 int mtu_idx, int rscale, int rx_credits, int ulp_mode)
445{
446 uint64_t opt0;
447
448 KASSERT(rx_credits <= M_RCV_BUFSIZ,
449 ("%s: rcv_bufsiz too high", __func__));
450
451 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) |
452 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits);
453
454 if (so != NULL) {
455 struct inpcb *inp = sotoinpcb(so);
456 struct tcpcb *tp = intotcpcb(inp);
457 int keepalive = always_keepalive ||
458 so_options_get(so) & SO_KEEPALIVE;
459
460 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0);
461 opt0 |= V_KEEP_ALIVE(keepalive != 0);
462 }
463
464 if (e != NULL)
465 opt0 |= V_L2T_IDX(e->idx);
466
467 if (pi != NULL) {
468 opt0 |= V_SMAC_SEL(VIID_SMACIDX(pi->viid));
469 opt0 |= V_TX_CHAN(pi->tx_chan);
470 }
471
472 return htobe64(opt0);
473}
474
475#define FILTER_SEL_WIDTH_P_FC (3 + 1)
476#define FILTER_SEL_WIDTH_VIN_P_FC (6 + 7 + FILTER_SEL_WIDTH_P_FC)
477#define FILTER_SEL_WIDTH_TAG_P_FC (3 + FILTER_SEL_WIDTH_VIN_P_FC)
478#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
479#define VLAN_NONE 0xfff
480#define FILTER_SEL_VLAN_NONE 0xffff
481
482uint32_t
483select_ntuple(struct port_info *pi, struct l2t_entry *e, uint32_t filter_mode)
484{
485 uint16_t viid = pi->viid;
486 uint32_t ntuple = 0;
487
488 if (filter_mode == HW_TPL_FR_MT_PR_IV_P_FC) {
489 if (e->vlan == VLAN_NONE)
490 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
491 else {
492 ntuple |= e->vlan << FILTER_SEL_WIDTH_P_FC;
493 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
494 }
495 ntuple |= e->lport << S_PORT;
496 ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
497 } else if (filter_mode == HW_TPL_FR_MT_PR_OV_P_FC) {
498 ntuple |= G_FW_VIID_VIN(viid) << FILTER_SEL_WIDTH_P_FC;
499 ntuple |= G_FW_VIID_PFN(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
500 ntuple |= G_FW_VIID_VIVLD(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
501 ntuple |= e->lport << S_PORT;
502 ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
503 }
504
505 return (htobe32(ntuple));
506}
507
508static int
509alloc_tid_tabs(struct tid_info *t)
510{
511 size_t size;
512 unsigned int i;
513
514 size = t->ntids * sizeof(*t->tid_tab) +
515 t->natids * sizeof(*t->atid_tab) +
516 t->nstids * sizeof(*t->stid_tab);
517
518 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT);
519 if (t->tid_tab == NULL)
520 return (ENOMEM);
521
522 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
523 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
524 t->afree = t->atid_tab;
525 t->atids_in_use = 0;
526 for (i = 1; i < t->natids; i++)
527 t->atid_tab[i - 1].next = &t->atid_tab[i];
528 t->atid_tab[t->natids - 1].next = NULL;
529
530 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
531 t->stid_tab = (union serv_entry *)&t->atid_tab[t->natids];
532 t->sfree = t->stid_tab;
533 t->stids_in_use = 0;
534 for (i = 1; i < t->nstids; i++)
535 t->stid_tab[i - 1].next = &t->stid_tab[i];
536 t->stid_tab[t->nstids - 1].next = NULL;
537
538 atomic_store_rel_int(&t->tids_in_use, 0);
539
540 return (0);
541}
542
543static void
544free_tid_tabs(struct tid_info *t)
545{
546 KASSERT(t->tids_in_use == 0,
547 ("%s: %d tids still in use.", __func__, t->tids_in_use));
548 KASSERT(t->atids_in_use == 0,
549 ("%s: %d atids still in use.", __func__, t->atids_in_use));
550 KASSERT(t->stids_in_use == 0,
551 ("%s: %d tids still in use.", __func__, t->stids_in_use));
552
553 free(t->tid_tab, M_CXGBE);
554 t->tid_tab = NULL;
555
556 if (mtx_initialized(&t->atid_lock))
557 mtx_destroy(&t->atid_lock);
558 if (mtx_initialized(&t->stid_lock))
559 mtx_destroy(&t->stid_lock);
560}
561
562static void
563free_tom_data(struct adapter *sc, struct tom_data *td)
564{
565 KASSERT(TAILQ_EMPTY(&td->toep_list),
566 ("%s: TOE PCB list is not empty.", __func__));
567 KASSERT(td->lctx_count == 0,
568 ("%s: lctx hash table is not empty.", __func__));
569
570 t4_uninit_l2t_cpl_handlers(sc);
246 if (toep->l2te)
247 t4_l2t_release(toep->l2te);
248
249 if (tid >= 0) {
250 remove_tid(sc, tid);
251 release_tid(sc, tid, toep->ctrlq);
252 }
253
254 mtx_lock(&td->toep_list_lock);
255 TAILQ_REMOVE(&td->toep_list, toep, link);
256 mtx_unlock(&td->toep_list_lock);
257
258 free_toepcb(toep);
259}
260
261/*
262 * The kernel is done with the TCP PCB and this is our opportunity to unhook the
263 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no
264 * pending CPL) then it is time to release all resources tied to the toepcb.
265 *
266 * Also gets called when an offloaded active open fails and the TOM wants the
267 * kernel to take the TCP PCB back.
268 */
269static void
270t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
271{
272#if defined(KTR) || defined(INVARIANTS)
273 struct inpcb *inp = tp->t_inpcb;
274#endif
275 struct toepcb *toep = tp->t_toe;
276
277 INP_WLOCK_ASSERT(inp);
278
279 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
280 KASSERT(toepcb_flag(toep, TPF_ATTACHED),
281 ("%s: not attached", __func__));
282
283#ifdef KTR
284 if (tp->t_state == TCPS_SYN_SENT) {
285 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
286 __func__, toep->tid, toep, toep->flags, inp,
287 inp->inp_flags);
288 } else {
289 CTR6(KTR_CXGBE,
290 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
291 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
292 inp->inp_flags);
293 }
294#endif
295
296 tp->t_toe = NULL;
297 tp->t_flags &= ~TF_TOE;
298 toepcb_clr_flag(toep, TPF_ATTACHED);
299
300 if (toepcb_flag(toep, TPF_CPL_PENDING) == 0)
301 release_offload_resources(toep);
302}
303
304/*
305 * The TOE driver will not receive any more CPLs for the tid associated with the
306 * toepcb; release the hold on the inpcb.
307 */
308void
309final_cpl_received(struct toepcb *toep)
310{
311 struct inpcb *inp = toep->inp;
312
313 KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
314 INP_WLOCK_ASSERT(inp);
315 KASSERT(toepcb_flag(toep, TPF_CPL_PENDING),
316 ("%s: CPL not pending already?", __func__));
317
318 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
319 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
320
321 toep->inp = NULL;
322 toepcb_clr_flag(toep, TPF_CPL_PENDING);
323
324 if (toepcb_flag(toep, TPF_ATTACHED) == 0)
325 release_offload_resources(toep);
326
327 if (!in_pcbrele_wlocked(inp))
328 INP_WUNLOCK(inp);
329}
330
331void
332insert_tid(struct adapter *sc, int tid, void *ctx)
333{
334 struct tid_info *t = &sc->tids;
335
336 t->tid_tab[tid] = ctx;
337 atomic_add_int(&t->tids_in_use, 1);
338}
339
340void *
341lookup_tid(struct adapter *sc, int tid)
342{
343 struct tid_info *t = &sc->tids;
344
345 return (t->tid_tab[tid]);
346}
347
348void
349update_tid(struct adapter *sc, int tid, void *ctx)
350{
351 struct tid_info *t = &sc->tids;
352
353 t->tid_tab[tid] = ctx;
354}
355
356void
357remove_tid(struct adapter *sc, int tid)
358{
359 struct tid_info *t = &sc->tids;
360
361 t->tid_tab[tid] = NULL;
362 atomic_subtract_int(&t->tids_in_use, 1);
363}
364
365void
366release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
367{
368 struct wrqe *wr;
369 struct cpl_tid_release *req;
370
371 wr = alloc_wrqe(sizeof(*req), ctrlq);
372 if (wr == NULL) {
373 queue_tid_release(sc, tid); /* defer */
374 return;
375 }
376 req = wrtod(wr);
377
378 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
379
380 t4_wrq_tx(sc, wr);
381}
382
383static void
384queue_tid_release(struct adapter *sc, int tid)
385{
386
387 CXGBE_UNIMPLEMENTED("deferred tid release");
388}
389
390/*
391 * What mtu_idx to use, given a 4-tuple and/or an MSS cap
392 */
393int
394find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss)
395{
396 unsigned short *mtus = &sc->params.mtus[0];
397 int i = 0, mss;
398
399 KASSERT(inc != NULL || pmss > 0,
400 ("%s: at least one of inc/pmss must be specified", __func__));
401
402 mss = inc ? tcp_mssopt(inc) : pmss;
403 if (pmss > 0 && mss > pmss)
404 mss = pmss;
405
406 while (i < NMTUS - 1 && mtus[i + 1] <= mss + 40)
407 ++i;
408
409 return (i);
410}
411
412/*
413 * Determine the receive window size for a socket.
414 */
415u_long
416select_rcv_wnd(struct socket *so)
417{
418 unsigned long wnd;
419
420 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
421
422 wnd = sbspace(&so->so_rcv);
423 if (wnd < MIN_RCV_WND)
424 wnd = MIN_RCV_WND;
425
426 return min(wnd, MAX_RCV_WND);
427}
428
429int
430select_rcv_wscale(void)
431{
432 int wscale = 0;
433 unsigned long space = sb_max;
434
435 if (space > MAX_RCV_WND)
436 space = MAX_RCV_WND;
437
438 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
439 wscale++;
440
441 return (wscale);
442}
443
444extern int always_keepalive;
445#define VIID_SMACIDX(v) (((unsigned int)(v) & 0x7f) << 1)
446
447/*
448 * socket so could be a listening socket too.
449 */
450uint64_t
451calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e,
452 int mtu_idx, int rscale, int rx_credits, int ulp_mode)
453{
454 uint64_t opt0;
455
456 KASSERT(rx_credits <= M_RCV_BUFSIZ,
457 ("%s: rcv_bufsiz too high", __func__));
458
459 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) |
460 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits);
461
462 if (so != NULL) {
463 struct inpcb *inp = sotoinpcb(so);
464 struct tcpcb *tp = intotcpcb(inp);
465 int keepalive = always_keepalive ||
466 so_options_get(so) & SO_KEEPALIVE;
467
468 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0);
469 opt0 |= V_KEEP_ALIVE(keepalive != 0);
470 }
471
472 if (e != NULL)
473 opt0 |= V_L2T_IDX(e->idx);
474
475 if (pi != NULL) {
476 opt0 |= V_SMAC_SEL(VIID_SMACIDX(pi->viid));
477 opt0 |= V_TX_CHAN(pi->tx_chan);
478 }
479
480 return htobe64(opt0);
481}
482
483#define FILTER_SEL_WIDTH_P_FC (3 + 1)
484#define FILTER_SEL_WIDTH_VIN_P_FC (6 + 7 + FILTER_SEL_WIDTH_P_FC)
485#define FILTER_SEL_WIDTH_TAG_P_FC (3 + FILTER_SEL_WIDTH_VIN_P_FC)
486#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
487#define VLAN_NONE 0xfff
488#define FILTER_SEL_VLAN_NONE 0xffff
489
490uint32_t
491select_ntuple(struct port_info *pi, struct l2t_entry *e, uint32_t filter_mode)
492{
493 uint16_t viid = pi->viid;
494 uint32_t ntuple = 0;
495
496 if (filter_mode == HW_TPL_FR_MT_PR_IV_P_FC) {
497 if (e->vlan == VLAN_NONE)
498 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
499 else {
500 ntuple |= e->vlan << FILTER_SEL_WIDTH_P_FC;
501 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
502 }
503 ntuple |= e->lport << S_PORT;
504 ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
505 } else if (filter_mode == HW_TPL_FR_MT_PR_OV_P_FC) {
506 ntuple |= G_FW_VIID_VIN(viid) << FILTER_SEL_WIDTH_P_FC;
507 ntuple |= G_FW_VIID_PFN(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
508 ntuple |= G_FW_VIID_VIVLD(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
509 ntuple |= e->lport << S_PORT;
510 ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
511 }
512
513 return (htobe32(ntuple));
514}
515
516static int
517alloc_tid_tabs(struct tid_info *t)
518{
519 size_t size;
520 unsigned int i;
521
522 size = t->ntids * sizeof(*t->tid_tab) +
523 t->natids * sizeof(*t->atid_tab) +
524 t->nstids * sizeof(*t->stid_tab);
525
526 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT);
527 if (t->tid_tab == NULL)
528 return (ENOMEM);
529
530 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
531 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
532 t->afree = t->atid_tab;
533 t->atids_in_use = 0;
534 for (i = 1; i < t->natids; i++)
535 t->atid_tab[i - 1].next = &t->atid_tab[i];
536 t->atid_tab[t->natids - 1].next = NULL;
537
538 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
539 t->stid_tab = (union serv_entry *)&t->atid_tab[t->natids];
540 t->sfree = t->stid_tab;
541 t->stids_in_use = 0;
542 for (i = 1; i < t->nstids; i++)
543 t->stid_tab[i - 1].next = &t->stid_tab[i];
544 t->stid_tab[t->nstids - 1].next = NULL;
545
546 atomic_store_rel_int(&t->tids_in_use, 0);
547
548 return (0);
549}
550
551static void
552free_tid_tabs(struct tid_info *t)
553{
554 KASSERT(t->tids_in_use == 0,
555 ("%s: %d tids still in use.", __func__, t->tids_in_use));
556 KASSERT(t->atids_in_use == 0,
557 ("%s: %d atids still in use.", __func__, t->atids_in_use));
558 KASSERT(t->stids_in_use == 0,
559 ("%s: %d tids still in use.", __func__, t->stids_in_use));
560
561 free(t->tid_tab, M_CXGBE);
562 t->tid_tab = NULL;
563
564 if (mtx_initialized(&t->atid_lock))
565 mtx_destroy(&t->atid_lock);
566 if (mtx_initialized(&t->stid_lock))
567 mtx_destroy(&t->stid_lock);
568}
569
570static void
571free_tom_data(struct adapter *sc, struct tom_data *td)
572{
573 KASSERT(TAILQ_EMPTY(&td->toep_list),
574 ("%s: TOE PCB list is not empty.", __func__));
575 KASSERT(td->lctx_count == 0,
576 ("%s: lctx hash table is not empty.", __func__));
577
578 t4_uninit_l2t_cpl_handlers(sc);
579 t4_uninit_cpl_io_handlers(sc);
580 t4_uninit_ddp(sc, td);
571
572 if (td->listen_mask != 0)
573 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
574
575 if (mtx_initialized(&td->lctx_hash_lock))
576 mtx_destroy(&td->lctx_hash_lock);
577 if (mtx_initialized(&td->toep_list_lock))
578 mtx_destroy(&td->toep_list_lock);
579
580 free_tid_tabs(&sc->tids);
581 free(td, M_CXGBE);
582}
583
584/*
585 * Ground control to Major TOM
586 * Commencing countdown, engines on
587 */
588static int
589t4_tom_activate(struct adapter *sc)
590{
591 struct tom_data *td;
592 struct toedev *tod;
593 int i, rc;
594
595 ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
596
597 /* per-adapter softc for TOM */
598 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
599 if (td == NULL)
600 return (ENOMEM);
601
602 /* List of TOE PCBs and associated lock */
603 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
604 TAILQ_INIT(&td->toep_list);
605
606 /* Listen context */
607 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
608 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
609 &td->listen_mask, HASH_NOWAIT);
610
611 /* TID tables */
612 rc = alloc_tid_tabs(&sc->tids);
613 if (rc != 0)
614 goto done;
615
581
582 if (td->listen_mask != 0)
583 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
584
585 if (mtx_initialized(&td->lctx_hash_lock))
586 mtx_destroy(&td->lctx_hash_lock);
587 if (mtx_initialized(&td->toep_list_lock))
588 mtx_destroy(&td->toep_list_lock);
589
590 free_tid_tabs(&sc->tids);
591 free(td, M_CXGBE);
592}
593
594/*
595 * Ground control to Major TOM
596 * Commencing countdown, engines on
597 */
598static int
599t4_tom_activate(struct adapter *sc)
600{
601 struct tom_data *td;
602 struct toedev *tod;
603 int i, rc;
604
605 ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
606
607 /* per-adapter softc for TOM */
608 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
609 if (td == NULL)
610 return (ENOMEM);
611
612 /* List of TOE PCBs and associated lock */
613 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
614 TAILQ_INIT(&td->toep_list);
615
616 /* Listen context */
617 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
618 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
619 &td->listen_mask, HASH_NOWAIT);
620
621 /* TID tables */
622 rc = alloc_tid_tabs(&sc->tids);
623 if (rc != 0)
624 goto done;
625
626 t4_init_ddp(sc, td);
627
616 /* CPL handlers */
617 t4_init_connect_cpl_handlers(sc);
618 t4_init_l2t_cpl_handlers(sc);
619 t4_init_listen_cpl_handlers(sc);
620 t4_init_cpl_io_handlers(sc);
621
622 /* toedev ops */
623 tod = &td->tod;
624 init_toedev(tod);
625 tod->tod_softc = sc;
626 tod->tod_connect = t4_connect;
627 tod->tod_listen_start = t4_listen_start;
628 tod->tod_listen_stop = t4_listen_stop;
629 tod->tod_rcvd = t4_rcvd;
630 tod->tod_output = t4_tod_output;
631 tod->tod_send_rst = t4_send_rst;
632 tod->tod_send_fin = t4_send_fin;
633 tod->tod_pcb_detach = t4_pcb_detach;
634 tod->tod_l2_update = t4_l2_update;
635 tod->tod_syncache_added = t4_syncache_added;
636 tod->tod_syncache_removed = t4_syncache_removed;
637 tod->tod_syncache_respond = t4_syncache_respond;
638 tod->tod_offload_socket = t4_offload_socket;
639
640 for_each_port(sc, i)
641 TOEDEV(sc->port[i]->ifp) = &td->tod;
642
643 sc->tom_softc = td;
644 sc->flags |= TOM_INIT_DONE;
645 register_toedev(sc->tom_softc);
646
647done:
648 if (rc != 0)
649 free_tom_data(sc, td);
650 return (rc);
651}
652
653static int
654t4_tom_deactivate(struct adapter *sc)
655{
656 int rc = 0;
657 struct tom_data *td = sc->tom_softc;
658
659 ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
660
661 if (td == NULL)
662 return (0); /* XXX. KASSERT? */
663
664 if (sc->offload_map != 0)
665 return (EBUSY); /* at least one port has IFCAP_TOE enabled */
666
667 mtx_lock(&td->toep_list_lock);
668 if (!TAILQ_EMPTY(&td->toep_list))
669 rc = EBUSY;
670 mtx_unlock(&td->toep_list_lock);
671
672 mtx_lock(&td->lctx_hash_lock);
673 if (td->lctx_count > 0)
674 rc = EBUSY;
675 mtx_unlock(&td->lctx_hash_lock);
676
677 if (rc == 0) {
678 unregister_toedev(sc->tom_softc);
679 free_tom_data(sc, td);
680 sc->tom_softc = NULL;
681 sc->flags &= ~TOM_INIT_DONE;
682 }
683
684 return (rc);
685}
686
687static int
688t4_tom_mod_load(void)
689{
690 int rc;
628 /* CPL handlers */
629 t4_init_connect_cpl_handlers(sc);
630 t4_init_l2t_cpl_handlers(sc);
631 t4_init_listen_cpl_handlers(sc);
632 t4_init_cpl_io_handlers(sc);
633
634 /* toedev ops */
635 tod = &td->tod;
636 init_toedev(tod);
637 tod->tod_softc = sc;
638 tod->tod_connect = t4_connect;
639 tod->tod_listen_start = t4_listen_start;
640 tod->tod_listen_stop = t4_listen_stop;
641 tod->tod_rcvd = t4_rcvd;
642 tod->tod_output = t4_tod_output;
643 tod->tod_send_rst = t4_send_rst;
644 tod->tod_send_fin = t4_send_fin;
645 tod->tod_pcb_detach = t4_pcb_detach;
646 tod->tod_l2_update = t4_l2_update;
647 tod->tod_syncache_added = t4_syncache_added;
648 tod->tod_syncache_removed = t4_syncache_removed;
649 tod->tod_syncache_respond = t4_syncache_respond;
650 tod->tod_offload_socket = t4_offload_socket;
651
652 for_each_port(sc, i)
653 TOEDEV(sc->port[i]->ifp) = &td->tod;
654
655 sc->tom_softc = td;
656 sc->flags |= TOM_INIT_DONE;
657 register_toedev(sc->tom_softc);
658
659done:
660 if (rc != 0)
661 free_tom_data(sc, td);
662 return (rc);
663}
664
665static int
666t4_tom_deactivate(struct adapter *sc)
667{
668 int rc = 0;
669 struct tom_data *td = sc->tom_softc;
670
671 ADAPTER_LOCK_ASSERT_OWNED(sc); /* for sc->flags */
672
673 if (td == NULL)
674 return (0); /* XXX. KASSERT? */
675
676 if (sc->offload_map != 0)
677 return (EBUSY); /* at least one port has IFCAP_TOE enabled */
678
679 mtx_lock(&td->toep_list_lock);
680 if (!TAILQ_EMPTY(&td->toep_list))
681 rc = EBUSY;
682 mtx_unlock(&td->toep_list_lock);
683
684 mtx_lock(&td->lctx_hash_lock);
685 if (td->lctx_count > 0)
686 rc = EBUSY;
687 mtx_unlock(&td->lctx_hash_lock);
688
689 if (rc == 0) {
690 unregister_toedev(sc->tom_softc);
691 free_tom_data(sc, td);
692 sc->tom_softc = NULL;
693 sc->flags &= ~TOM_INIT_DONE;
694 }
695
696 return (rc);
697}
698
699static int
700t4_tom_mod_load(void)
701{
702 int rc;
703 struct protosw *tcp_protosw;
691
704
705 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
706 if (tcp_protosw == NULL)
707 return (ENOPROTOOPT);
708
709 bcopy(tcp_protosw, &ddp_protosw, sizeof(ddp_protosw));
710 bcopy(tcp_protosw->pr_usrreqs, &ddp_usrreqs, sizeof(ddp_usrreqs));
711 ddp_usrreqs.pru_soreceive = t4_soreceive_ddp;
712 ddp_protosw.pr_usrreqs = &ddp_usrreqs;
713
692 rc = t4_register_uld(&tom_uld_info);
693 if (rc != 0)
694 t4_tom_mod_unload();
695
696 return (rc);
697}
698
699static void
700tom_uninit(struct adapter *sc, void *arg __unused)
701{
702 /* Try to free resources (works only if no port has IFCAP_TOE) */
703 ADAPTER_LOCK(sc);
704 if (sc->flags & TOM_INIT_DONE)
705 t4_deactivate_uld(sc, ULD_TOM);
706 ADAPTER_UNLOCK(sc);
707}
708
709static int
710t4_tom_mod_unload(void)
711{
712 t4_iterate(tom_uninit, NULL);
713
714 if (t4_unregister_uld(&tom_uld_info) == EBUSY)
715 return (EBUSY);
716
717 return (0);
718}
719#endif /* TCP_OFFLOAD */
720
721static int
722t4_tom_modevent(module_t mod, int cmd, void *arg)
723{
724 int rc = 0;
725
726#ifdef TCP_OFFLOAD
727 switch (cmd) {
728 case MOD_LOAD:
729 rc = t4_tom_mod_load();
730 break;
731
732 case MOD_UNLOAD:
733 rc = t4_tom_mod_unload();
734 break;
735
736 default:
737 rc = EINVAL;
738 }
739#else
740 printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
741 rc = EOPNOTSUPP;
742#endif
743 return (rc);
744}
745
746static moduledata_t t4_tom_moddata= {
747 "t4_tom",
748 t4_tom_modevent,
749 0
750};
751
752MODULE_VERSION(t4_tom, 1);
753MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
754MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
755DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
714 rc = t4_register_uld(&tom_uld_info);
715 if (rc != 0)
716 t4_tom_mod_unload();
717
718 return (rc);
719}
720
721static void
722tom_uninit(struct adapter *sc, void *arg __unused)
723{
724 /* Try to free resources (works only if no port has IFCAP_TOE) */
725 ADAPTER_LOCK(sc);
726 if (sc->flags & TOM_INIT_DONE)
727 t4_deactivate_uld(sc, ULD_TOM);
728 ADAPTER_UNLOCK(sc);
729}
730
731static int
732t4_tom_mod_unload(void)
733{
734 t4_iterate(tom_uninit, NULL);
735
736 if (t4_unregister_uld(&tom_uld_info) == EBUSY)
737 return (EBUSY);
738
739 return (0);
740}
741#endif /* TCP_OFFLOAD */
742
743static int
744t4_tom_modevent(module_t mod, int cmd, void *arg)
745{
746 int rc = 0;
747
748#ifdef TCP_OFFLOAD
749 switch (cmd) {
750 case MOD_LOAD:
751 rc = t4_tom_mod_load();
752 break;
753
754 case MOD_UNLOAD:
755 rc = t4_tom_mod_unload();
756 break;
757
758 default:
759 rc = EINVAL;
760 }
761#else
762 printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
763 rc = EOPNOTSUPP;
764#endif
765 return (rc);
766}
767
768static moduledata_t t4_tom_moddata= {
769 "t4_tom",
770 t4_tom_modevent,
771 0
772};
773
774MODULE_VERSION(t4_tom, 1);
775MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
776MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
777DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);