t4_ddp.c revision 346913
1/*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/tom/t4_ddp.c 346913 2019-04-29 19:15:50Z np $"); 30 31#include "opt_inet.h" 32 33#include <sys/param.h> 34#include <sys/aio.h> 35#include <sys/file.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/ktr.h> 39#include <sys/module.h> 40#include <sys/protosw.h> 41#include <sys/proc.h> 42#include <sys/domain.h> 43#include <sys/socket.h> 44#include <sys/socketvar.h> 45#include <sys/taskqueue.h> 46#include <sys/uio.h> 47#include <netinet/in.h> 48#include <netinet/in_pcb.h> 49#include <netinet/ip.h> 50#include <netinet/tcp_var.h> 51#define TCPSTATES 52#include <netinet/tcp_fsm.h> 53#include <netinet/toecore.h> 54 55#include <vm/vm.h> 56#include <vm/vm_extern.h> 57#include <vm/vm_param.h> 58#include <vm/pmap.h> 59#include <vm/vm_map.h> 60#include <vm/vm_page.h> 61#include <vm/vm_object.h> 62 63#ifdef TCP_OFFLOAD 64#include "common/common.h" 65#include "common/t4_msg.h" 66#include "common/t4_regs.h" 67#include "common/t4_tcb.h" 68#include "tom/t4_tom.h" 69 70VNET_DECLARE(int, tcp_do_autorcvbuf); 71#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 72VNET_DECLARE(int, tcp_autorcvbuf_inc); 73#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 74VNET_DECLARE(int, tcp_autorcvbuf_max); 75#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 76 77/* 78 * Use the 'backend3' field in AIO jobs to store the amount of data 79 * received by the AIO job so far. 80 */ 81#define aio_received backend3 82 83static void aio_ddp_requeue_task(void *context, int pending); 84static void ddp_complete_all(struct toepcb *toep, int error); 85static void t4_aio_cancel_active(struct kaiocb *job); 86static void t4_aio_cancel_queued(struct kaiocb *job); 87 88static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 89static struct mtx ddp_orphan_pagesets_lock; 90static struct task ddp_orphan_task; 91 92#define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 93 94/* 95 * A page set holds information about a buffer used for DDP. The page 96 * set holds resources such as the VM pages backing the buffer (either 97 * held or wired) and the page pods associated with the buffer. 98 * Recently used page sets are cached to allow for efficient reuse of 99 * buffers (avoiding the need to re-fault in pages, hold them, etc.). 100 * Note that cached page sets keep the backing pages wired. The 101 * number of wired pages is capped by only allowing for two wired 102 * pagesets per connection. This is not a perfect cap, but is a 103 * trade-off for performance. 104 * 105 * If an application ping-pongs two buffers for a connection via 106 * aio_read(2) then those buffers should remain wired and expensive VM 107 * fault lookups should be avoided after each buffer has been used 108 * once. If an application uses more than two buffers then this will 109 * fall back to doing expensive VM fault lookups for each operation. 110 */ 111static void 112free_pageset(struct tom_data *td, struct pageset *ps) 113{ 114 vm_page_t p; 115 int i; 116 117 if (ps->prsv.prsv_nppods > 0) 118 t4_free_page_pods(&ps->prsv); 119 120 if (ps->flags & PS_WIRED) { 121 for (i = 0; i < ps->npages; i++) { 122 p = ps->pages[i]; 123 vm_page_lock(p); 124 vm_page_unwire(p, PQ_INACTIVE); 125 vm_page_unlock(p); 126 } 127 } else 128 vm_page_unhold_pages(ps->pages, ps->npages); 129 mtx_lock(&ddp_orphan_pagesets_lock); 130 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 131 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 132 mtx_unlock(&ddp_orphan_pagesets_lock); 133} 134 135static void 136ddp_free_orphan_pagesets(void *context, int pending) 137{ 138 struct pageset *ps; 139 140 mtx_lock(&ddp_orphan_pagesets_lock); 141 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 142 ps = TAILQ_FIRST(&ddp_orphan_pagesets); 143 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 144 mtx_unlock(&ddp_orphan_pagesets_lock); 145 if (ps->vm) 146 vmspace_free(ps->vm); 147 free(ps, M_CXGBE); 148 mtx_lock(&ddp_orphan_pagesets_lock); 149 } 150 mtx_unlock(&ddp_orphan_pagesets_lock); 151} 152 153static void 154recycle_pageset(struct toepcb *toep, struct pageset *ps) 155{ 156 157 DDP_ASSERT_LOCKED(toep); 158 if (!(toep->ddp.flags & DDP_DEAD) && ps->flags & PS_WIRED) { 159 KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 160 nitems(toep->ddp.db), ("too many wired pagesets")); 161 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 162 toep->ddp.cached_count++; 163 } else 164 free_pageset(toep->td, ps); 165} 166 167static void 168ddp_complete_one(struct kaiocb *job, int error) 169{ 170 long copied; 171 172 /* 173 * If this job had copied data out of the socket buffer before 174 * it was cancelled, report it as a short read rather than an 175 * error. 176 */ 177 copied = job->aio_received; 178 if (copied != 0 || error == 0) 179 aio_complete(job, copied, 0); 180 else 181 aio_complete(job, -1, error); 182} 183 184static void 185free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db) 186{ 187 188 if (db->job) { 189 /* 190 * XXX: If we are un-offloading the socket then we 191 * should requeue these on the socket somehow. If we 192 * got a FIN from the remote end, then this completes 193 * any remaining requests with an EOF read. 194 */ 195 if (!aio_clear_cancel_function(db->job)) 196 ddp_complete_one(db->job, 0); 197 } 198 199 if (db->ps) 200 free_pageset(td, db->ps); 201} 202 203void 204ddp_init_toep(struct toepcb *toep) 205{ 206 207 TAILQ_INIT(&toep->ddp.aiojobq); 208 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep); 209 toep->ddp.flags = DDP_OK; 210 toep->ddp.active_id = -1; 211 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 212} 213 214void 215ddp_uninit_toep(struct toepcb *toep) 216{ 217 218 mtx_destroy(&toep->ddp.lock); 219} 220 221void 222release_ddp_resources(struct toepcb *toep) 223{ 224 struct pageset *ps; 225 int i; 226 227 DDP_LOCK(toep); 228 toep->flags |= DDP_DEAD; 229 for (i = 0; i < nitems(toep->ddp.db); i++) { 230 free_ddp_buffer(toep->td, &toep->ddp.db[i]); 231 } 232 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 233 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 234 free_pageset(toep->td, ps); 235 } 236 ddp_complete_all(toep, 0); 237 DDP_UNLOCK(toep); 238} 239 240#ifdef INVARIANTS 241void 242ddp_assert_empty(struct toepcb *toep) 243{ 244 int i; 245 246 MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE)); 247 for (i = 0; i < nitems(toep->ddp.db); i++) { 248 MPASS(toep->ddp.db[i].job == NULL); 249 MPASS(toep->ddp.db[i].ps == NULL); 250 } 251 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 252 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 253} 254#endif 255 256static void 257complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 258 unsigned int db_idx) 259{ 260 unsigned int db_flag; 261 262 toep->ddp.active_count--; 263 if (toep->ddp.active_id == db_idx) { 264 if (toep->ddp.active_count == 0) { 265 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 266 ("%s: active_count mismatch", __func__)); 267 toep->ddp.active_id = -1; 268 } else 269 toep->ddp.active_id ^= 1; 270#ifdef VERBOSE_TRACES 271 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 272 toep->ddp.active_id); 273#endif 274 } else { 275 KASSERT(toep->ddp.active_count != 0 && 276 toep->ddp.active_id != -1, 277 ("%s: active count mismatch", __func__)); 278 } 279 280 db->cancel_pending = 0; 281 db->job = NULL; 282 recycle_pageset(toep, db->ps); 283 db->ps = NULL; 284 285 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 286 KASSERT(toep->ddp.flags & db_flag, 287 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 288 __func__, toep, toep->ddp.flags)); 289 toep->ddp.flags &= ~db_flag; 290} 291 292/* XXX: handle_ddp_data code duplication */ 293void 294insert_ddp_data(struct toepcb *toep, uint32_t n) 295{ 296 struct inpcb *inp = toep->inp; 297 struct tcpcb *tp = intotcpcb(inp); 298 struct ddp_buffer *db; 299 struct kaiocb *job; 300 size_t placed; 301 long copied; 302 unsigned int db_flag, db_idx; 303 304 INP_WLOCK_ASSERT(inp); 305 DDP_ASSERT_LOCKED(toep); 306 307 tp->rcv_nxt += n; 308#ifndef USE_DDP_RX_FLOW_CONTROL 309 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 310 tp->rcv_wnd -= n; 311#endif 312#ifndef USE_DDP_RX_FLOW_CONTROL 313 toep->rx_credits += n; 314#endif 315 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 316 __func__, n); 317 while (toep->ddp.active_count > 0) { 318 MPASS(toep->ddp.active_id != -1); 319 db_idx = toep->ddp.active_id; 320 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 321 MPASS((toep->ddp.flags & db_flag) != 0); 322 db = &toep->ddp.db[db_idx]; 323 job = db->job; 324 copied = job->aio_received; 325 placed = n; 326 if (placed > job->uaiocb.aio_nbytes - copied) 327 placed = job->uaiocb.aio_nbytes - copied; 328 if (placed > 0) 329 job->msgrcv = 1; 330 if (!aio_clear_cancel_function(job)) { 331 /* 332 * Update the copied length for when 333 * t4_aio_cancel_active() completes this 334 * request. 335 */ 336 job->aio_received += placed; 337 } else if (copied + placed != 0) { 338 CTR4(KTR_CXGBE, 339 "%s: completing %p (copied %ld, placed %lu)", 340 __func__, job, copied, placed); 341 /* XXX: This always completes if there is some data. */ 342 aio_complete(job, copied + placed, 0); 343 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 344 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 345 toep->ddp.waiting_count++; 346 } else 347 aio_cancel(job); 348 n -= placed; 349 complete_ddp_buffer(toep, db, db_idx); 350 } 351 352 MPASS(n == 0); 353} 354 355/* SET_TCB_FIELD sent as a ULP command looks like this */ 356#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 357 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 358 359/* RX_DATA_ACK sent as a ULP command looks like this */ 360#define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 361 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 362 363static inline void * 364mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 365 uint64_t word, uint64_t mask, uint64_t val) 366{ 367 struct ulptx_idata *ulpsc; 368 struct cpl_set_tcb_field_core *req; 369 370 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 371 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 372 373 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 374 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 375 ulpsc->len = htobe32(sizeof(*req)); 376 377 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 378 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 379 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 380 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 381 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 382 req->mask = htobe64(mask); 383 req->val = htobe64(val); 384 385 ulpsc = (struct ulptx_idata *)(req + 1); 386 if (LEN__SET_TCB_FIELD_ULP % 16) { 387 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 388 ulpsc->len = htobe32(0); 389 return (ulpsc + 1); 390 } 391 return (ulpsc); 392} 393 394static inline void * 395mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 396{ 397 struct ulptx_idata *ulpsc; 398 struct cpl_rx_data_ack_core *req; 399 400 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 401 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 402 403 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 404 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 405 ulpsc->len = htobe32(sizeof(*req)); 406 407 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 408 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 409 req->credit_dack = htobe32(F_RX_MODULATE_RX); 410 411 ulpsc = (struct ulptx_idata *)(req + 1); 412 if (LEN__RX_DATA_ACK_ULP % 16) { 413 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 414 ulpsc->len = htobe32(0); 415 return (ulpsc + 1); 416 } 417 return (ulpsc); 418} 419 420static struct wrqe * 421mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 422 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask) 423{ 424 struct wrqe *wr; 425 struct work_request_hdr *wrh; 426 struct ulp_txpkt *ulpmc; 427 int len; 428 429 KASSERT(db_idx == 0 || db_idx == 1, 430 ("%s: bad DDP buffer index %d", __func__, db_idx)); 431 432 /* 433 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 434 * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 435 * 436 * The work request header is 16B and always ends at a 16B boundary. 437 * The ULPTX master commands that follow must all end at 16B boundaries 438 * too so we round up the size to 16. 439 */ 440 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 441 roundup2(LEN__RX_DATA_ACK_ULP, 16); 442 443 wr = alloc_wrqe(len, toep->ctrlq); 444 if (wr == NULL) 445 return (NULL); 446 wrh = wrtod(wr); 447 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 448 ulpmc = (struct ulp_txpkt *)(wrh + 1); 449 450 /* Write the buffer's tag */ 451 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 452 W_TCB_RX_DDP_BUF0_TAG + db_idx, 453 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 454 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag)); 455 456 /* Update the current offset in the DDP buffer and its total length */ 457 if (db_idx == 0) 458 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 459 W_TCB_RX_DDP_BUF0_OFFSET, 460 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 461 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 462 V_TCB_RX_DDP_BUF0_OFFSET(offset) | 463 V_TCB_RX_DDP_BUF0_LEN(ps->len)); 464 else 465 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 466 W_TCB_RX_DDP_BUF1_OFFSET, 467 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 468 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 469 V_TCB_RX_DDP_BUF1_OFFSET(offset) | 470 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32)); 471 472 /* Update DDP flags */ 473 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, 474 ddp_flags_mask, ddp_flags); 475 476 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 477 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 478 479 return (wr); 480} 481 482static int 483handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 484{ 485 uint32_t report = be32toh(ddp_report); 486 unsigned int db_idx; 487 struct inpcb *inp = toep->inp; 488 struct ddp_buffer *db; 489 struct tcpcb *tp; 490 struct socket *so; 491 struct sockbuf *sb; 492 struct kaiocb *job; 493 long copied; 494 495 db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 496 497 if (__predict_false(!(report & F_DDP_INV))) 498 CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 499 500 INP_WLOCK(inp); 501 so = inp_inpcbtosocket(inp); 502 sb = &so->so_rcv; 503 DDP_LOCK(toep); 504 505 KASSERT(toep->ddp.active_id == db_idx, 506 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 507 toep->ddp.active_id, toep->tid)); 508 db = &toep->ddp.db[db_idx]; 509 job = db->job; 510 511 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) { 512 /* 513 * This can happen due to an administrative tcpdrop(8). 514 * Just fail the request with ECONNRESET. 515 */ 516 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 517 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 518 if (aio_clear_cancel_function(job)) 519 ddp_complete_one(job, ECONNRESET); 520 goto completed; 521 } 522 523 tp = intotcpcb(inp); 524 525 /* 526 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 527 * sequence number of the next byte to receive. The length of 528 * the data received for this message must be computed by 529 * comparing the new and old values of rcv_nxt. 530 * 531 * For RX_DATA_DDP, len might be non-zero, but it is only the 532 * length of the most recent DMA. It does not include the 533 * total length of the data received since the previous update 534 * for this DDP buffer. rcv_nxt is the sequence number of the 535 * first received byte from the most recent DMA. 536 */ 537 len += be32toh(rcv_nxt) - tp->rcv_nxt; 538 tp->rcv_nxt += len; 539 tp->t_rcvtime = ticks; 540#ifndef USE_DDP_RX_FLOW_CONTROL 541 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 542 tp->rcv_wnd -= len; 543#endif 544#ifdef VERBOSE_TRACES 545 CTR4(KTR_CXGBE, "%s: DDP[%d] placed %d bytes (%#x)", __func__, db_idx, 546 len, report); 547#endif 548 549 /* receive buffer autosize */ 550 MPASS(toep->vnet == so->so_vnet); 551 CURVNET_SET(toep->vnet); 552 SOCKBUF_LOCK(sb); 553 if (sb->sb_flags & SB_AUTOSIZE && 554 V_tcp_do_autorcvbuf && 555 sb->sb_hiwat < V_tcp_autorcvbuf_max && 556 len > (sbspace(sb) / 8 * 7)) { 557 unsigned int hiwat = sb->sb_hiwat; 558 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 559 V_tcp_autorcvbuf_max); 560 561 if (!sbreserve_locked(sb, newsize, so, NULL)) 562 sb->sb_flags &= ~SB_AUTOSIZE; 563 else 564 toep->rx_credits += newsize - hiwat; 565 } 566 SOCKBUF_UNLOCK(sb); 567 CURVNET_RESTORE(); 568 569#ifndef USE_DDP_RX_FLOW_CONTROL 570 toep->rx_credits += len; 571#endif 572 573 job->msgrcv = 1; 574 if (db->cancel_pending) { 575 /* 576 * Update the job's length but defer completion to the 577 * TCB_RPL callback. 578 */ 579 job->aio_received += len; 580 goto out; 581 } else if (!aio_clear_cancel_function(job)) { 582 /* 583 * Update the copied length for when 584 * t4_aio_cancel_active() completes this request. 585 */ 586 job->aio_received += len; 587 } else { 588 copied = job->aio_received; 589#ifdef VERBOSE_TRACES 590 CTR4(KTR_CXGBE, "%s: completing %p (copied %ld, placed %d)", 591 __func__, job, copied, len); 592#endif 593 aio_complete(job, copied + len, 0); 594 t4_rcvd(&toep->td->tod, tp); 595 } 596 597completed: 598 complete_ddp_buffer(toep, db, db_idx); 599 if (toep->ddp.waiting_count > 0) 600 ddp_queue_toep(toep); 601out: 602 DDP_UNLOCK(toep); 603 INP_WUNLOCK(inp); 604 605 return (0); 606} 607 608void 609handle_ddp_indicate(struct toepcb *toep) 610{ 611 612 DDP_ASSERT_LOCKED(toep); 613 MPASS(toep->ddp.active_count == 0); 614 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 615 if (toep->ddp.waiting_count == 0) { 616 /* 617 * The pending requests that triggered the request for an 618 * an indicate were cancelled. Those cancels should have 619 * already disabled DDP. Just ignore this as the data is 620 * going into the socket buffer anyway. 621 */ 622 return; 623 } 624 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 625 toep->tid, toep->ddp.waiting_count); 626 ddp_queue_toep(toep); 627} 628 629enum { 630 DDP_BUF0_INVALIDATED = 0x2, 631 DDP_BUF1_INVALIDATED 632}; 633 634CTASSERT(DDP_BUF0_INVALIDATED == CPL_COOKIE_DDP0); 635 636static int 637do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 638{ 639 struct adapter *sc = iq->adapter; 640 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 641 unsigned int tid = GET_TID(cpl); 642 unsigned int db_idx; 643 struct toepcb *toep; 644 struct inpcb *inp; 645 struct ddp_buffer *db; 646 struct kaiocb *job; 647 long copied; 648 649 if (cpl->status != CPL_ERR_NONE) 650 panic("XXX: tcp_rpl failed: %d", cpl->status); 651 652 toep = lookup_tid(sc, tid); 653 inp = toep->inp; 654 switch (cpl->cookie) { 655 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF0_INVALIDATED): 656 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF1_INVALIDATED): 657 /* 658 * XXX: This duplicates a lot of code with handle_ddp_data(). 659 */ 660 db_idx = G_COOKIE(cpl->cookie) - DDP_BUF0_INVALIDATED; 661 MPASS(db_idx < nitems(toep->ddp.db)); 662 INP_WLOCK(inp); 663 DDP_LOCK(toep); 664 db = &toep->ddp.db[db_idx]; 665 666 /* 667 * handle_ddp_data() should leave the job around until 668 * this callback runs once a cancel is pending. 669 */ 670 MPASS(db != NULL); 671 MPASS(db->job != NULL); 672 MPASS(db->cancel_pending); 673 674 /* 675 * XXX: It's not clear what happens if there is data 676 * placed when the buffer is invalidated. I suspect we 677 * need to read the TCB to see how much data was placed. 678 * 679 * For now this just pretends like nothing was placed. 680 * 681 * XXX: Note that if we did check the PCB we would need to 682 * also take care of updating the tp, etc. 683 */ 684 job = db->job; 685 copied = job->aio_received; 686 if (copied == 0) { 687 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 688 aio_cancel(job); 689 } else { 690 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 691 __func__, job, copied); 692 aio_complete(job, copied, 0); 693 t4_rcvd(&toep->td->tod, intotcpcb(inp)); 694 } 695 696 complete_ddp_buffer(toep, db, db_idx); 697 if (toep->ddp.waiting_count > 0) 698 ddp_queue_toep(toep); 699 DDP_UNLOCK(toep); 700 INP_WUNLOCK(inp); 701 break; 702 default: 703 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 704 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 705 } 706 707 return (0); 708} 709 710void 711handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 712{ 713 struct ddp_buffer *db; 714 struct kaiocb *job; 715 long copied; 716 unsigned int db_flag, db_idx; 717 int len, placed; 718 719 INP_WLOCK_ASSERT(toep->inp); 720 DDP_ASSERT_LOCKED(toep); 721 len = be32toh(rcv_nxt) - tp->rcv_nxt; 722 723 tp->rcv_nxt += len; 724#ifndef USE_DDP_RX_FLOW_CONTROL 725 toep->rx_credits += len; 726#endif 727 728 while (toep->ddp.active_count > 0) { 729 MPASS(toep->ddp.active_id != -1); 730 db_idx = toep->ddp.active_id; 731 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 732 MPASS((toep->ddp.flags & db_flag) != 0); 733 db = &toep->ddp.db[db_idx]; 734 job = db->job; 735 copied = job->aio_received; 736 placed = len; 737 if (placed > job->uaiocb.aio_nbytes - copied) 738 placed = job->uaiocb.aio_nbytes - copied; 739 if (placed > 0) 740 job->msgrcv = 1; 741 if (!aio_clear_cancel_function(job)) { 742 /* 743 * Update the copied length for when 744 * t4_aio_cancel_active() completes this 745 * request. 746 */ 747 job->aio_received += placed; 748 } else { 749 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 750 __func__, toep->tid, db_idx, placed); 751 aio_complete(job, copied + placed, 0); 752 } 753 len -= placed; 754 complete_ddp_buffer(toep, db, db_idx); 755 } 756 757 MPASS(len == 0); 758 ddp_complete_all(toep, 0); 759} 760 761#define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 762 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 763 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 764 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 765 766extern cpl_handler_t t4_cpl_handler[]; 767 768static int 769do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 770{ 771 struct adapter *sc = iq->adapter; 772 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 773 unsigned int tid = GET_TID(cpl); 774 uint32_t vld; 775 struct toepcb *toep = lookup_tid(sc, tid); 776 777 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 778 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 779 KASSERT(!(toep->flags & TPF_SYNQE), 780 ("%s: toep %p claims to be a synq entry", __func__, toep)); 781 782 vld = be32toh(cpl->ddpvld); 783 if (__predict_false(vld & DDP_ERR)) { 784 panic("%s: DDP error 0x%x (tid %d, toep %p)", 785 __func__, vld, tid, toep); 786 } 787 788 if (toep->ulp_mode == ULP_MODE_ISCSI) { 789 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 790 return (0); 791 } 792 793 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 794 795 return (0); 796} 797 798static int 799do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 800 struct mbuf *m) 801{ 802 struct adapter *sc = iq->adapter; 803 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 804 unsigned int tid = GET_TID(cpl); 805 struct toepcb *toep = lookup_tid(sc, tid); 806 807 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 808 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 809 KASSERT(!(toep->flags & TPF_SYNQE), 810 ("%s: toep %p claims to be a synq entry", __func__, toep)); 811 812 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 813 814 return (0); 815} 816 817static void 818enable_ddp(struct adapter *sc, struct toepcb *toep) 819{ 820 821 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 822 ("%s: toep %p has bad ddp_flags 0x%x", 823 __func__, toep, toep->ddp.flags)); 824 825 CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 826 __func__, toep->tid, time_uptime); 827 828 DDP_ASSERT_LOCKED(toep); 829 toep->ddp.flags |= DDP_SC_REQ; 830 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 831 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 832 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 833 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), 834 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0); 835 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 836 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 837} 838 839static int 840calculate_hcf(int n1, int n2) 841{ 842 int a, b, t; 843 844 if (n1 <= n2) { 845 a = n1; 846 b = n2; 847 } else { 848 a = n2; 849 b = n1; 850 } 851 852 while (a != 0) { 853 t = a; 854 a = b % a; 855 b = t; 856 } 857 858 return (b); 859} 860 861static inline int 862pages_to_nppods(int npages, int ddp_page_shift) 863{ 864 865 MPASS(ddp_page_shift >= PAGE_SHIFT); 866 867 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 868} 869 870static int 871alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 872 struct ppod_reservation *prsv) 873{ 874 vmem_addr_t addr; /* relative to start of region */ 875 876 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 877 &addr) != 0) 878 return (ENOMEM); 879 880 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 881 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 882 nppods, 1 << pr->pr_page_shift[pgsz_idx]); 883 884 /* 885 * The hardware tagmask includes an extra invalid bit but the arena was 886 * seeded with valid values only. An allocation out of this arena will 887 * fit inside the tagmask but won't have the invalid bit set. 888 */ 889 MPASS((addr & pr->pr_tag_mask) == addr); 890 MPASS((addr & pr->pr_invalid_bit) == 0); 891 892 prsv->prsv_pr = pr; 893 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 894 prsv->prsv_nppods = nppods; 895 896 return (0); 897} 898 899int 900t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 901{ 902 int i, hcf, seglen, idx, nppods; 903 struct ppod_reservation *prsv = &ps->prsv; 904 905 KASSERT(prsv->prsv_nppods == 0, 906 ("%s: page pods already allocated", __func__)); 907 908 /* 909 * The DDP page size is unrelated to the VM page size. We combine 910 * contiguous physical pages into larger segments to get the best DDP 911 * page size possible. This is the largest of the four sizes in 912 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 913 * the page list. 914 */ 915 hcf = 0; 916 for (i = 0; i < ps->npages; i++) { 917 seglen = PAGE_SIZE; 918 while (i < ps->npages - 1 && 919 ps->pages[i]->phys_addr + PAGE_SIZE == 920 ps->pages[i + 1]->phys_addr) { 921 seglen += PAGE_SIZE; 922 i++; 923 } 924 925 hcf = calculate_hcf(hcf, seglen); 926 if (hcf < (1 << pr->pr_page_shift[1])) { 927 idx = 0; 928 goto have_pgsz; /* give up, short circuit */ 929 } 930 } 931 932#define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 933 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 934 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 935 if ((hcf & PR_PAGE_MASK(idx)) == 0) 936 break; 937 } 938#undef PR_PAGE_MASK 939 940have_pgsz: 941 MPASS(idx <= M_PPOD_PGSZ); 942 943 nppods = pages_to_nppods(ps->npages, pr->pr_page_shift[idx]); 944 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 945 return (0); 946 MPASS(prsv->prsv_nppods > 0); 947 948 return (1); 949} 950 951int 952t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 953 struct ppod_reservation *prsv) 954{ 955 int hcf, seglen, idx, npages, nppods; 956 uintptr_t start_pva, end_pva, pva, p1; 957 958 MPASS(buf > 0); 959 MPASS(len > 0); 960 961 /* 962 * The DDP page size is unrelated to the VM page size. We combine 963 * contiguous physical pages into larger segments to get the best DDP 964 * page size possible. This is the largest of the four sizes in 965 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 966 * in the page list. 967 */ 968 hcf = 0; 969 start_pva = trunc_page(buf); 970 end_pva = trunc_page(buf + len - 1); 971 pva = start_pva; 972 while (pva <= end_pva) { 973 seglen = PAGE_SIZE; 974 p1 = pmap_kextract(pva); 975 pva += PAGE_SIZE; 976 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 977 seglen += PAGE_SIZE; 978 pva += PAGE_SIZE; 979 } 980 981 hcf = calculate_hcf(hcf, seglen); 982 if (hcf < (1 << pr->pr_page_shift[1])) { 983 idx = 0; 984 goto have_pgsz; /* give up, short circuit */ 985 } 986 } 987 988#define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 989 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 990 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 991 if ((hcf & PR_PAGE_MASK(idx)) == 0) 992 break; 993 } 994#undef PR_PAGE_MASK 995 996have_pgsz: 997 MPASS(idx <= M_PPOD_PGSZ); 998 999 npages = 1; 1000 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 1001 nppods = howmany(npages, PPOD_PAGES); 1002 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 1003 return (ENOMEM); 1004 MPASS(prsv->prsv_nppods > 0); 1005 1006 return (0); 1007} 1008 1009void 1010t4_free_page_pods(struct ppod_reservation *prsv) 1011{ 1012 struct ppod_region *pr = prsv->prsv_pr; 1013 vmem_addr_t addr; 1014 1015 MPASS(prsv != NULL); 1016 MPASS(prsv->prsv_nppods != 0); 1017 1018 addr = prsv->prsv_tag & pr->pr_tag_mask; 1019 MPASS((addr & pr->pr_invalid_bit) == 0); 1020 1021 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1022 pr->pr_arena, addr, prsv->prsv_nppods); 1023 1024 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1025 prsv->prsv_nppods = 0; 1026} 1027 1028#define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1029 1030int 1031t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1032 struct pageset *ps) 1033{ 1034 struct wrqe *wr; 1035 struct ulp_mem_io *ulpmc; 1036 struct ulptx_idata *ulpsc; 1037 struct pagepod *ppod; 1038 int i, j, k, n, chunk, len, ddp_pgsz, idx; 1039 u_int ppod_addr; 1040 uint32_t cmd; 1041 struct ppod_reservation *prsv = &ps->prsv; 1042 struct ppod_region *pr = prsv->prsv_pr; 1043 1044 KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1045 ("%s: page pods already written", __func__)); 1046 MPASS(prsv->prsv_nppods > 0); 1047 1048 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1049 if (is_t4(sc)) 1050 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1051 else 1052 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1053 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1054 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1055 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1056 1057 /* How many page pods are we writing in this cycle */ 1058 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1059 chunk = PPOD_SZ(n); 1060 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1061 1062 wr = alloc_wrqe(len, wrq); 1063 if (wr == NULL) 1064 return (ENOMEM); /* ok to just bail out */ 1065 ulpmc = wrtod(wr); 1066 1067 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1068 ulpmc->cmd = cmd; 1069 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1070 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1071 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1072 1073 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1074 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1075 ulpsc->len = htobe32(chunk); 1076 1077 ppod = (struct pagepod *)(ulpsc + 1); 1078 for (j = 0; j < n; i++, j++, ppod++) { 1079 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1080 V_PPOD_TID(tid) | prsv->prsv_tag); 1081 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1082 V_PPOD_OFST(ps->offset)); 1083 ppod->rsvd = 0; 1084 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 1085 for (k = 0; k < nitems(ppod->addr); k++) { 1086 if (idx < ps->npages) { 1087 ppod->addr[k] = 1088 htobe64(ps->pages[idx]->phys_addr); 1089 idx += ddp_pgsz / PAGE_SIZE; 1090 } else 1091 ppod->addr[k] = 0; 1092#if 0 1093 CTR5(KTR_CXGBE, 1094 "%s: tid %d ppod[%d]->addr[%d] = %p", 1095 __func__, toep->tid, i, k, 1096 htobe64(ppod->addr[k])); 1097#endif 1098 } 1099 1100 } 1101 1102 t4_wrq_tx(sc, wr); 1103 } 1104 ps->flags |= PS_PPODS_WRITTEN; 1105 1106 return (0); 1107} 1108 1109int 1110t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid, 1111 struct ppod_reservation *prsv, vm_offset_t buf, int buflen) 1112{ 1113 struct wrqe *wr; 1114 struct ulp_mem_io *ulpmc; 1115 struct ulptx_idata *ulpsc; 1116 struct pagepod *ppod; 1117 int i, j, k, n, chunk, len, ddp_pgsz; 1118 u_int ppod_addr, offset; 1119 uint32_t cmd; 1120 struct ppod_region *pr = prsv->prsv_pr; 1121 uintptr_t end_pva, pva, pa; 1122 1123 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1124 if (is_t4(sc)) 1125 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1126 else 1127 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1128 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1129 offset = buf & PAGE_MASK; 1130 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1131 pva = trunc_page(buf); 1132 end_pva = trunc_page(buf + buflen - 1); 1133 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1134 1135 /* How many page pods are we writing in this cycle */ 1136 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1137 MPASS(n > 0); 1138 chunk = PPOD_SZ(n); 1139 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1140 1141 wr = alloc_wrqe(len, wrq); 1142 if (wr == NULL) 1143 return (ENOMEM); /* ok to just bail out */ 1144 ulpmc = wrtod(wr); 1145 1146 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1147 ulpmc->cmd = cmd; 1148 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1149 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1150 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1151 1152 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1153 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1154 ulpsc->len = htobe32(chunk); 1155 1156 ppod = (struct pagepod *)(ulpsc + 1); 1157 for (j = 0; j < n; i++, j++, ppod++) { 1158 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1159 V_PPOD_TID(tid) | 1160 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1161 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1162 V_PPOD_OFST(offset)); 1163 ppod->rsvd = 0; 1164 1165 for (k = 0; k < nitems(ppod->addr); k++) { 1166 if (pva > end_pva) 1167 ppod->addr[k] = 0; 1168 else { 1169 pa = pmap_kextract(pva); 1170 ppod->addr[k] = htobe64(pa); 1171 pva += ddp_pgsz; 1172 } 1173#if 0 1174 CTR5(KTR_CXGBE, 1175 "%s: tid %d ppod[%d]->addr[%d] = %p", 1176 __func__, tid, i, k, 1177 htobe64(ppod->addr[k])); 1178#endif 1179 } 1180 1181 /* 1182 * Walk back 1 segment so that the first address in the 1183 * next pod is the same as the last one in the current 1184 * pod. 1185 */ 1186 pva -= ddp_pgsz; 1187 } 1188 1189 t4_wrq_tx(sc, wr); 1190 } 1191 1192 MPASS(pva <= end_pva); 1193 1194 return (0); 1195} 1196 1197static void 1198wire_pageset(struct pageset *ps) 1199{ 1200 vm_page_t p; 1201 int i; 1202 1203 KASSERT(!(ps->flags & PS_WIRED), ("pageset already wired")); 1204 1205 for (i = 0; i < ps->npages; i++) { 1206 p = ps->pages[i]; 1207 vm_page_lock(p); 1208 vm_page_wire(p); 1209 vm_page_unhold(p); 1210 vm_page_unlock(p); 1211 } 1212 ps->flags |= PS_WIRED; 1213} 1214 1215/* 1216 * Prepare a pageset for DDP. This wires the pageset and sets up page 1217 * pods. 1218 */ 1219static int 1220prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 1221{ 1222 struct tom_data *td = sc->tom_softc; 1223 1224 if (!(ps->flags & PS_WIRED)) 1225 wire_pageset(ps); 1226 if (ps->prsv.prsv_nppods == 0 && 1227 !t4_alloc_page_pods_for_ps(&td->pr, ps)) { 1228 return (0); 1229 } 1230 if (!(ps->flags & PS_PPODS_WRITTEN) && 1231 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 1232 return (0); 1233 } 1234 1235 return (1); 1236} 1237 1238int 1239t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 1240 const char *name) 1241{ 1242 int i; 1243 1244 MPASS(pr != NULL); 1245 MPASS(r->size > 0); 1246 1247 pr->pr_start = r->start; 1248 pr->pr_len = r->size; 1249 pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 1250 pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 1251 pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 1252 pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 1253 1254 /* The SGL -> page pod algorithm requires the sizes to be in order. */ 1255 for (i = 1; i < nitems(pr->pr_page_shift); i++) { 1256 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 1257 return (ENXIO); 1258 } 1259 1260 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 1261 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 1262 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 1263 return (ENXIO); 1264 pr->pr_alias_shift = fls(pr->pr_tag_mask); 1265 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 1266 1267 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 1268 M_FIRSTFIT | M_NOWAIT); 1269 if (pr->pr_arena == NULL) 1270 return (ENOMEM); 1271 1272 return (0); 1273} 1274 1275void 1276t4_free_ppod_region(struct ppod_region *pr) 1277{ 1278 1279 MPASS(pr != NULL); 1280 1281 if (pr->pr_arena) 1282 vmem_destroy(pr->pr_arena); 1283 bzero(pr, sizeof(*pr)); 1284} 1285 1286static int 1287pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 1288 int pgoff, int len) 1289{ 1290 1291 if (ps->start != start || ps->npages != npages || 1292 ps->offset != pgoff || ps->len != len) 1293 return (1); 1294 1295 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 1296} 1297 1298static int 1299hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 1300{ 1301 struct vmspace *vm; 1302 vm_map_t map; 1303 vm_offset_t start, end, pgoff; 1304 struct pageset *ps; 1305 int n; 1306 1307 DDP_ASSERT_LOCKED(toep); 1308 1309 /* 1310 * The AIO subsystem will cancel and drain all requests before 1311 * permitting a process to exit or exec, so p_vmspace should 1312 * be stable here. 1313 */ 1314 vm = job->userproc->p_vmspace; 1315 map = &vm->vm_map; 1316 start = (uintptr_t)job->uaiocb.aio_buf; 1317 pgoff = start & PAGE_MASK; 1318 end = round_page(start + job->uaiocb.aio_nbytes); 1319 start = trunc_page(start); 1320 1321 if (end - start > MAX_DDP_BUFFER_SIZE) { 1322 /* 1323 * Truncate the request to a short read. 1324 * Alternatively, we could DDP in chunks to the larger 1325 * buffer, but that would be quite a bit more work. 1326 * 1327 * When truncating, round the request down to avoid 1328 * crossing a cache line on the final transaction. 1329 */ 1330 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 1331#ifdef VERBOSE_TRACES 1332 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 1333 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 1334 (unsigned long)(end - (start + pgoff))); 1335 job->uaiocb.aio_nbytes = end - (start + pgoff); 1336#endif 1337 end = round_page(end); 1338 } 1339 1340 n = atop(end - start); 1341 1342 /* 1343 * Try to reuse a cached pageset. 1344 */ 1345 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 1346 if (pscmp(ps, vm, start, n, pgoff, 1347 job->uaiocb.aio_nbytes) == 0) { 1348 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1349 toep->ddp.cached_count--; 1350 *pps = ps; 1351 return (0); 1352 } 1353 } 1354 1355 /* 1356 * If there are too many cached pagesets to create a new one, 1357 * free a pageset before creating a new one. 1358 */ 1359 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 1360 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 1361 if (toep->ddp.active_count + toep->ddp.cached_count == 1362 nitems(toep->ddp.db)) { 1363 KASSERT(toep->ddp.cached_count > 0, 1364 ("no cached pageset to free")); 1365 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 1366 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1367 toep->ddp.cached_count--; 1368 free_pageset(toep->td, ps); 1369 } 1370 DDP_UNLOCK(toep); 1371 1372 /* Create a new pageset. */ 1373 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1374 M_ZERO); 1375 ps->pages = (vm_page_t *)(ps + 1); 1376 ps->vm_timestamp = map->timestamp; 1377 ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 1378 VM_PROT_WRITE, ps->pages, n); 1379 1380 DDP_LOCK(toep); 1381 if (ps->npages < 0) { 1382 free(ps, M_CXGBE); 1383 return (EFAULT); 1384 } 1385 1386 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 1387 ps->npages, n)); 1388 1389 ps->offset = pgoff; 1390 ps->len = job->uaiocb.aio_nbytes; 1391 atomic_add_int(&vm->vm_refcnt, 1); 1392 ps->vm = vm; 1393 ps->start = start; 1394 1395 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1396 __func__, toep->tid, ps, job, ps->npages); 1397 *pps = ps; 1398 return (0); 1399} 1400 1401static void 1402ddp_complete_all(struct toepcb *toep, int error) 1403{ 1404 struct kaiocb *job; 1405 1406 DDP_ASSERT_LOCKED(toep); 1407 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 1408 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1409 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1410 toep->ddp.waiting_count--; 1411 if (aio_clear_cancel_function(job)) 1412 ddp_complete_one(job, error); 1413 } 1414} 1415 1416static void 1417aio_ddp_cancel_one(struct kaiocb *job) 1418{ 1419 long copied; 1420 1421 /* 1422 * If this job had copied data out of the socket buffer before 1423 * it was cancelled, report it as a short read rather than an 1424 * error. 1425 */ 1426 copied = job->aio_received; 1427 if (copied != 0) 1428 aio_complete(job, copied, 0); 1429 else 1430 aio_cancel(job); 1431} 1432 1433/* 1434 * Called when the main loop wants to requeue a job to retry it later. 1435 * Deals with the race of the job being cancelled while it was being 1436 * examined. 1437 */ 1438static void 1439aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 1440{ 1441 1442 DDP_ASSERT_LOCKED(toep); 1443 if (!(toep->ddp.flags & DDP_DEAD) && 1444 aio_set_cancel_function(job, t4_aio_cancel_queued)) { 1445 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 1446 toep->ddp.waiting_count++; 1447 } else 1448 aio_ddp_cancel_one(job); 1449} 1450 1451static void 1452aio_ddp_requeue(struct toepcb *toep) 1453{ 1454 struct adapter *sc = td_adapter(toep->td); 1455 struct socket *so; 1456 struct sockbuf *sb; 1457 struct inpcb *inp; 1458 struct kaiocb *job; 1459 struct ddp_buffer *db; 1460 size_t copied, offset, resid; 1461 struct pageset *ps; 1462 struct mbuf *m; 1463 uint64_t ddp_flags, ddp_flags_mask; 1464 struct wrqe *wr; 1465 int buf_flag, db_idx, error; 1466 1467 DDP_ASSERT_LOCKED(toep); 1468 1469restart: 1470 if (toep->ddp.flags & DDP_DEAD) { 1471 MPASS(toep->ddp.waiting_count == 0); 1472 MPASS(toep->ddp.active_count == 0); 1473 return; 1474 } 1475 1476 if (toep->ddp.waiting_count == 0 || 1477 toep->ddp.active_count == nitems(toep->ddp.db)) { 1478 return; 1479 } 1480 1481 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1482 so = job->fd_file->f_data; 1483 sb = &so->so_rcv; 1484 SOCKBUF_LOCK(sb); 1485 1486 /* We will never get anything unless we are or were connected. */ 1487 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1488 SOCKBUF_UNLOCK(sb); 1489 ddp_complete_all(toep, ENOTCONN); 1490 return; 1491 } 1492 1493 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 1494 ("%s: pending sockbuf data and DDP is active", __func__)); 1495 1496 /* Abort if socket has reported problems. */ 1497 /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 1498 if (so->so_error && sbavail(sb) == 0) { 1499 toep->ddp.waiting_count--; 1500 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1501 if (!aio_clear_cancel_function(job)) { 1502 SOCKBUF_UNLOCK(sb); 1503 goto restart; 1504 } 1505 1506 /* 1507 * If this job has previously copied some data, report 1508 * a short read and leave the error to be reported by 1509 * a future request. 1510 */ 1511 copied = job->aio_received; 1512 if (copied != 0) { 1513 SOCKBUF_UNLOCK(sb); 1514 aio_complete(job, copied, 0); 1515 goto restart; 1516 } 1517 error = so->so_error; 1518 so->so_error = 0; 1519 SOCKBUF_UNLOCK(sb); 1520 aio_complete(job, -1, error); 1521 goto restart; 1522 } 1523 1524 /* 1525 * Door is closed. If there is pending data in the socket buffer, 1526 * deliver it. If there are pending DDP requests, wait for those 1527 * to complete. Once they have completed, return EOF reads. 1528 */ 1529 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1530 SOCKBUF_UNLOCK(sb); 1531 if (toep->ddp.active_count != 0) 1532 return; 1533 ddp_complete_all(toep, 0); 1534 return; 1535 } 1536 1537 /* 1538 * If DDP is not enabled and there is no pending socket buffer 1539 * data, try to enable DDP. 1540 */ 1541 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 1542 SOCKBUF_UNLOCK(sb); 1543 1544 /* 1545 * Wait for the card to ACK that DDP is enabled before 1546 * queueing any buffers. Currently this waits for an 1547 * indicate to arrive. This could use a TCB_SET_FIELD_RPL 1548 * message to know that DDP was enabled instead of waiting 1549 * for the indicate which would avoid copying the indicate 1550 * if no data is pending. 1551 * 1552 * XXX: Might want to limit the indicate size to the size 1553 * of the first queued request. 1554 */ 1555 if ((toep->ddp.flags & DDP_SC_REQ) == 0) 1556 enable_ddp(sc, toep); 1557 return; 1558 } 1559 SOCKBUF_UNLOCK(sb); 1560 1561 /* 1562 * If another thread is queueing a buffer for DDP, let it 1563 * drain any work and return. 1564 */ 1565 if (toep->ddp.queueing != NULL) 1566 return; 1567 1568 /* Take the next job to prep it for DDP. */ 1569 toep->ddp.waiting_count--; 1570 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1571 if (!aio_clear_cancel_function(job)) 1572 goto restart; 1573 toep->ddp.queueing = job; 1574 1575 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 1576 error = hold_aio(toep, job, &ps); 1577 if (error != 0) { 1578 ddp_complete_one(job, error); 1579 toep->ddp.queueing = NULL; 1580 goto restart; 1581 } 1582 1583 SOCKBUF_LOCK(sb); 1584 if (so->so_error && sbavail(sb) == 0) { 1585 copied = job->aio_received; 1586 if (copied != 0) { 1587 SOCKBUF_UNLOCK(sb); 1588 recycle_pageset(toep, ps); 1589 aio_complete(job, copied, 0); 1590 toep->ddp.queueing = NULL; 1591 goto restart; 1592 } 1593 1594 error = so->so_error; 1595 so->so_error = 0; 1596 SOCKBUF_UNLOCK(sb); 1597 recycle_pageset(toep, ps); 1598 aio_complete(job, -1, error); 1599 toep->ddp.queueing = NULL; 1600 goto restart; 1601 } 1602 1603 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1604 SOCKBUF_UNLOCK(sb); 1605 recycle_pageset(toep, ps); 1606 if (toep->ddp.active_count != 0) { 1607 /* 1608 * The door is closed, but there are still pending 1609 * DDP buffers. Requeue. These jobs will all be 1610 * completed once those buffers drain. 1611 */ 1612 aio_ddp_requeue_one(toep, job); 1613 toep->ddp.queueing = NULL; 1614 return; 1615 } 1616 ddp_complete_one(job, 0); 1617 ddp_complete_all(toep, 0); 1618 toep->ddp.queueing = NULL; 1619 return; 1620 } 1621 1622sbcopy: 1623 /* 1624 * If the toep is dead, there shouldn't be any data in the socket 1625 * buffer, so the above case should have handled this. 1626 */ 1627 MPASS(!(toep->ddp.flags & DDP_DEAD)); 1628 1629 /* 1630 * If there is pending data in the socket buffer (either 1631 * from before the requests were queued or a DDP indicate), 1632 * copy those mbufs out directly. 1633 */ 1634 copied = 0; 1635 offset = ps->offset + job->aio_received; 1636 MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 1637 resid = job->uaiocb.aio_nbytes - job->aio_received; 1638 m = sb->sb_mb; 1639 KASSERT(m == NULL || toep->ddp.active_count == 0, 1640 ("%s: sockbuf data with active DDP", __func__)); 1641 while (m != NULL && resid > 0) { 1642 struct iovec iov[1]; 1643 struct uio uio; 1644 int error; 1645 1646 iov[0].iov_base = mtod(m, void *); 1647 iov[0].iov_len = m->m_len; 1648 if (iov[0].iov_len > resid) 1649 iov[0].iov_len = resid; 1650 uio.uio_iov = iov; 1651 uio.uio_iovcnt = 1; 1652 uio.uio_offset = 0; 1653 uio.uio_resid = iov[0].iov_len; 1654 uio.uio_segflg = UIO_SYSSPACE; 1655 uio.uio_rw = UIO_WRITE; 1656 error = uiomove_fromphys(ps->pages, offset + copied, 1657 uio.uio_resid, &uio); 1658 MPASS(error == 0 && uio.uio_resid == 0); 1659 copied += uio.uio_offset; 1660 resid -= uio.uio_offset; 1661 m = m->m_next; 1662 } 1663 if (copied != 0) { 1664 sbdrop_locked(sb, copied); 1665 job->aio_received += copied; 1666 job->msgrcv = 1; 1667 copied = job->aio_received; 1668 inp = sotoinpcb(so); 1669 if (!INP_TRY_WLOCK(inp)) { 1670 /* 1671 * The reference on the socket file descriptor in 1672 * the AIO job should keep 'sb' and 'inp' stable. 1673 * Our caller has a reference on the 'toep' that 1674 * keeps it stable. 1675 */ 1676 SOCKBUF_UNLOCK(sb); 1677 DDP_UNLOCK(toep); 1678 INP_WLOCK(inp); 1679 DDP_LOCK(toep); 1680 SOCKBUF_LOCK(sb); 1681 1682 /* 1683 * If the socket has been closed, we should detect 1684 * that and complete this request if needed on 1685 * the next trip around the loop. 1686 */ 1687 } 1688 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 1689 INP_WUNLOCK(inp); 1690 if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 1691 /* 1692 * We filled the entire buffer with socket 1693 * data, DDP is not being used, or the socket 1694 * is being shut down, so complete the 1695 * request. 1696 */ 1697 SOCKBUF_UNLOCK(sb); 1698 recycle_pageset(toep, ps); 1699 aio_complete(job, copied, 0); 1700 toep->ddp.queueing = NULL; 1701 goto restart; 1702 } 1703 1704 /* 1705 * If DDP is not enabled, requeue this request and restart. 1706 * This will either enable DDP or wait for more data to 1707 * arrive on the socket buffer. 1708 */ 1709 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 1710 SOCKBUF_UNLOCK(sb); 1711 recycle_pageset(toep, ps); 1712 aio_ddp_requeue_one(toep, job); 1713 toep->ddp.queueing = NULL; 1714 goto restart; 1715 } 1716 1717 /* 1718 * An indicate might have arrived and been added to 1719 * the socket buffer while it was unlocked after the 1720 * copy to lock the INP. If so, restart the copy. 1721 */ 1722 if (sbavail(sb) != 0) 1723 goto sbcopy; 1724 } 1725 SOCKBUF_UNLOCK(sb); 1726 1727 if (prep_pageset(sc, toep, ps) == 0) { 1728 recycle_pageset(toep, ps); 1729 aio_ddp_requeue_one(toep, job); 1730 toep->ddp.queueing = NULL; 1731 1732 /* 1733 * XXX: Need to retry this later. Mostly need a trigger 1734 * when page pods are freed up. 1735 */ 1736 printf("%s: prep_pageset failed\n", __func__); 1737 return; 1738 } 1739 1740 /* Determine which DDP buffer to use. */ 1741 if (toep->ddp.db[0].job == NULL) { 1742 db_idx = 0; 1743 } else { 1744 MPASS(toep->ddp.db[1].job == NULL); 1745 db_idx = 1; 1746 } 1747 1748 ddp_flags = 0; 1749 ddp_flags_mask = 0; 1750 if (db_idx == 0) { 1751 ddp_flags |= V_TF_DDP_BUF0_VALID(1); 1752 if (so->so_state & SS_NBIO) 1753 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 1754 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 1755 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 1756 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 1757 buf_flag = DDP_BUF0_ACTIVE; 1758 } else { 1759 ddp_flags |= V_TF_DDP_BUF1_VALID(1); 1760 if (so->so_state & SS_NBIO) 1761 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 1762 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 1763 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 1764 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 1765 buf_flag = DDP_BUF1_ACTIVE; 1766 } 1767 MPASS((toep->ddp.flags & buf_flag) == 0); 1768 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 1769 MPASS(db_idx == 0); 1770 MPASS(toep->ddp.active_id == -1); 1771 MPASS(toep->ddp.active_count == 0); 1772 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 1773 } 1774 1775 /* 1776 * The TID for this connection should still be valid. If DDP_DEAD 1777 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 1778 * this far anyway. Even if the socket is closing on the other 1779 * end, the AIO job holds a reference on this end of the socket 1780 * which will keep it open and keep the TCP PCB attached until 1781 * after the job is completed. 1782 */ 1783 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received, 1784 ddp_flags, ddp_flags_mask); 1785 if (wr == NULL) { 1786 recycle_pageset(toep, ps); 1787 aio_ddp_requeue_one(toep, job); 1788 toep->ddp.queueing = NULL; 1789 1790 /* 1791 * XXX: Need a way to kick a retry here. 1792 * 1793 * XXX: We know the fixed size needed and could 1794 * preallocate this using a blocking request at the 1795 * start of the task to avoid having to handle this 1796 * edge case. 1797 */ 1798 printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 1799 return; 1800 } 1801 1802 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 1803 free_wrqe(wr); 1804 recycle_pageset(toep, ps); 1805 aio_ddp_cancel_one(job); 1806 toep->ddp.queueing = NULL; 1807 goto restart; 1808 } 1809 1810#ifdef VERBOSE_TRACES 1811 CTR5(KTR_CXGBE, "%s: scheduling %p for DDP[%d] (flags %#lx/%#lx)", 1812 __func__, job, db_idx, ddp_flags, ddp_flags_mask); 1813#endif 1814 /* Give the chip the go-ahead. */ 1815 t4_wrq_tx(sc, wr); 1816 db = &toep->ddp.db[db_idx]; 1817 db->cancel_pending = 0; 1818 db->job = job; 1819 db->ps = ps; 1820 toep->ddp.queueing = NULL; 1821 toep->ddp.flags |= buf_flag; 1822 toep->ddp.active_count++; 1823 if (toep->ddp.active_count == 1) { 1824 MPASS(toep->ddp.active_id == -1); 1825 toep->ddp.active_id = db_idx; 1826 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 1827 toep->ddp.active_id); 1828 } 1829 goto restart; 1830} 1831 1832void 1833ddp_queue_toep(struct toepcb *toep) 1834{ 1835 1836 DDP_ASSERT_LOCKED(toep); 1837 if (toep->ddp.flags & DDP_TASK_ACTIVE) 1838 return; 1839 toep->ddp.flags |= DDP_TASK_ACTIVE; 1840 hold_toepcb(toep); 1841 soaio_enqueue(&toep->ddp.requeue_task); 1842} 1843 1844static void 1845aio_ddp_requeue_task(void *context, int pending) 1846{ 1847 struct toepcb *toep = context; 1848 1849 DDP_LOCK(toep); 1850 aio_ddp_requeue(toep); 1851 toep->ddp.flags &= ~DDP_TASK_ACTIVE; 1852 DDP_UNLOCK(toep); 1853 1854 free_toepcb(toep); 1855} 1856 1857static void 1858t4_aio_cancel_active(struct kaiocb *job) 1859{ 1860 struct socket *so = job->fd_file->f_data; 1861 struct tcpcb *tp = so_sototcpcb(so); 1862 struct toepcb *toep = tp->t_toe; 1863 struct adapter *sc = td_adapter(toep->td); 1864 uint64_t valid_flag; 1865 int i; 1866 1867 DDP_LOCK(toep); 1868 if (aio_cancel_cleared(job)) { 1869 DDP_UNLOCK(toep); 1870 aio_ddp_cancel_one(job); 1871 return; 1872 } 1873 1874 for (i = 0; i < nitems(toep->ddp.db); i++) { 1875 if (toep->ddp.db[i].job == job) { 1876 /* Should only ever get one cancel request for a job. */ 1877 MPASS(toep->ddp.db[i].cancel_pending == 0); 1878 1879 /* 1880 * Invalidate this buffer. It will be 1881 * cancelled or partially completed once the 1882 * card ACKs the invalidate. 1883 */ 1884 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 1885 V_TF_DDP_BUF1_VALID(1); 1886 t4_set_tcb_field(sc, toep->ctrlq, toep, 1887 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 1888 i + DDP_BUF0_INVALIDATED); 1889 toep->ddp.db[i].cancel_pending = 1; 1890 CTR2(KTR_CXGBE, "%s: request %p marked pending", 1891 __func__, job); 1892 break; 1893 } 1894 } 1895 DDP_UNLOCK(toep); 1896} 1897 1898static void 1899t4_aio_cancel_queued(struct kaiocb *job) 1900{ 1901 struct socket *so = job->fd_file->f_data; 1902 struct tcpcb *tp = so_sototcpcb(so); 1903 struct toepcb *toep = tp->t_toe; 1904 1905 DDP_LOCK(toep); 1906 if (!aio_cancel_cleared(job)) { 1907 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1908 toep->ddp.waiting_count--; 1909 if (toep->ddp.waiting_count == 0) 1910 ddp_queue_toep(toep); 1911 } 1912 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 1913 DDP_UNLOCK(toep); 1914 1915 aio_ddp_cancel_one(job); 1916} 1917 1918int 1919t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 1920{ 1921 struct tcpcb *tp = so_sototcpcb(so); 1922 struct toepcb *toep = tp->t_toe; 1923 1924 1925 /* Ignore writes. */ 1926 if (job->uaiocb.aio_lio_opcode != LIO_READ) 1927 return (EOPNOTSUPP); 1928 1929 DDP_LOCK(toep); 1930 1931 /* 1932 * XXX: Think about possibly returning errors for ENOTCONN, 1933 * etc. Perhaps the caller would only queue the request 1934 * if it failed with EOPNOTSUPP? 1935 */ 1936 1937#ifdef VERBOSE_TRACES 1938 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 1939#endif 1940 if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 1941 panic("new job was cancelled"); 1942 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 1943 toep->ddp.waiting_count++; 1944 toep->ddp.flags |= DDP_OK; 1945 1946 /* 1947 * Try to handle this request synchronously. If this has 1948 * to block because the task is running, it will just bail 1949 * and let the task handle it instead. 1950 */ 1951 aio_ddp_requeue(toep); 1952 DDP_UNLOCK(toep); 1953 return (0); 1954} 1955 1956void 1957t4_ddp_mod_load(void) 1958{ 1959 1960 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1961 CPL_COOKIE_DDP0); 1962 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1963 CPL_COOKIE_DDP1); 1964 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 1965 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 1966 TAILQ_INIT(&ddp_orphan_pagesets); 1967 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 1968 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 1969} 1970 1971void 1972t4_ddp_mod_unload(void) 1973{ 1974 1975 taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 1976 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 1977 mtx_destroy(&ddp_orphan_pagesets_lock); 1978 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 1979 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 1980 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 1981 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 1982} 1983#endif 1984