iw_cxgbe.h revision 355240
1/* 2 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * - Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials 20 * provided with the distribution. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * SOFTWARE. 30 * 31 * $FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h 355240 2019-11-30 19:21:29Z np $ 32 */ 33#ifndef __IW_CXGB4_H__ 34#define __IW_CXGB4_H__ 35 36#include <linux/list.h> 37#include <linux/spinlock.h> 38#include <linux/idr.h> 39#include <linux/completion.h> 40#include <linux/netdevice.h> 41#include <linux/sched.h> 42#include <linux/pci.h> 43#include <linux/dma-mapping.h> 44#include <linux/wait.h> 45#include <linux/kref.h> 46#include <linux/timer.h> 47#include <linux/io.h> 48#include <sys/vmem.h> 49 50#include <asm/byteorder.h> 51 52#include <netinet/in.h> 53#include <netinet/toecore.h> 54 55#include <rdma/ib_verbs.h> 56#include <rdma/iw_cm.h> 57 58#undef prefetch 59 60#include "common/common.h" 61#include "common/t4_msg.h" 62#include "common/t4_regs.h" 63#include "common/t4_tcb.h" 64#include "t4_l2t.h" 65 66#define DRV_NAME "iw_cxgbe" 67#define MOD DRV_NAME ":" 68#define KTR_IW_CXGBE KTR_SPARE3 69 70extern int c4iw_debug; 71extern int use_dsgl; 72extern int inline_threshold; 73 74#define PDBG(fmt, args...) \ 75do { \ 76 if (c4iw_debug) \ 77 printf(MOD fmt, ## args); \ 78} while (0) 79 80#include "t4.h" 81 82static inline void *cplhdr(struct mbuf *m) 83{ 84 return mtod(m, void*); 85} 86 87#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start) 88#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start) 89 90#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ 91#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ 92 93struct c4iw_id_table { 94 u32 flags; 95 u32 start; /* logical minimal id */ 96 u32 last; /* hint for find */ 97 u32 max; 98 spinlock_t lock; 99 unsigned long *table; 100}; 101 102struct c4iw_resource { 103 struct c4iw_id_table tpt_table; 104 struct c4iw_id_table qid_table; 105 struct c4iw_id_table pdid_table; 106}; 107 108struct c4iw_qid_list { 109 struct list_head entry; 110 u32 qid; 111}; 112 113struct c4iw_dev_ucontext { 114 struct list_head qpids; 115 struct list_head cqids; 116 struct mutex lock; 117}; 118 119enum c4iw_rdev_flags { 120 T4_FATAL_ERROR = (1<<0), 121 T4_STATUS_PAGE_DISABLED = (1<<1), 122}; 123 124struct c4iw_stat { 125 u64 total; 126 u64 cur; 127 u64 max; 128 u64 fail; 129}; 130 131struct c4iw_stats { 132 struct mutex lock; 133 struct c4iw_stat qid; 134 struct c4iw_stat pd; 135 struct c4iw_stat stag; 136 struct c4iw_stat pbl; 137 struct c4iw_stat rqt; 138}; 139 140struct c4iw_hw_queue { 141 int t4_eq_status_entries; 142 int t4_max_eq_size; 143 int t4_max_iq_size; 144 int t4_max_rq_size; 145 int t4_max_sq_size; 146 int t4_max_qp_depth; 147 int t4_max_cq_depth; 148 int t4_stat_len; 149}; 150 151struct c4iw_rdev { 152 struct adapter *adap; 153 struct c4iw_resource resource; 154 unsigned long qpshift; 155 u32 qpmask; 156 unsigned long cqshift; 157 u32 cqmask; 158 struct c4iw_dev_ucontext uctx; 159 vmem_t *rqt_arena; 160 vmem_t *pbl_arena; 161 u32 flags; 162 struct c4iw_stats stats; 163 struct c4iw_hw_queue hw_queue; 164 struct t4_dev_status_page *status_page; 165 unsigned long bar2_pa; 166 void __iomem *bar2_kva; 167 unsigned int bar2_len; 168 struct workqueue_struct *free_workq; 169}; 170 171static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 172{ 173 return rdev->flags & T4_FATAL_ERROR; 174} 175 176static inline int c4iw_num_stags(struct c4iw_rdev *rdev) 177{ 178 return (int)(rdev->adap->vres.stag.size >> 5); 179} 180 181#define C4IW_WR_TO (60*HZ) 182 183struct c4iw_wr_wait { 184 int ret; 185 struct completion completion; 186}; 187 188static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) 189{ 190 wr_waitp->ret = 0; 191 init_completion(&wr_waitp->completion); 192} 193 194static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) 195{ 196 wr_waitp->ret = ret; 197 complete(&wr_waitp->completion); 198} 199 200static inline int 201c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, 202 u32 hwtid, u32 qpid, struct socket *so, const char *func) 203{ 204 struct adapter *sc = rdev->adap; 205 unsigned to = C4IW_WR_TO; 206 int ret; 207 int timedout = 0; 208 struct timeval t1, t2; 209 210 if (c4iw_fatal_error(rdev)) { 211 wr_waitp->ret = -EIO; 212 goto out; 213 } 214 215 getmicrotime(&t1); 216 do { 217 /* If waiting for reply in rdma_init()/rdma_fini() threads, then 218 * check if there are any connection errors. 219 */ 220 if (so && so->so_error) { 221 wr_waitp->ret = -ECONNRESET; 222 CTR5(KTR_IW_CXGBE, "%s - Connection ERROR %u for sock %p" 223 "tid %u qpid %u", func, 224 so->so_error, so, hwtid, qpid); 225 break; 226 } 227 228 ret = wait_for_completion_timeout(&wr_waitp->completion, to); 229 if (!ret) { 230 getmicrotime(&t2); 231 timevalsub(&t2, &t1); 232 printf("%s - Device %s not responding after %ld.%06ld " 233 "seconds - tid %u qpid %u\n", func, 234 device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec, 235 hwtid, qpid); 236 if (c4iw_fatal_error(rdev)) { 237 wr_waitp->ret = -EIO; 238 break; 239 } 240 to = to << 2; 241 timedout = 1; 242 } 243 } while (!ret); 244 245out: 246 if (timedout) { 247 getmicrotime(&t2); 248 timevalsub(&t2, &t1); 249 printf("%s - Device %s reply after %ld.%06ld seconds - " 250 "tid %u qpid %u\n", func, device_get_nameunit(sc->dev), 251 t2.tv_sec, t2.tv_usec, hwtid, qpid); 252 } 253 if (wr_waitp->ret) 254 CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc, 255 wr_waitp->ret, hwtid, qpid); 256 return (wr_waitp->ret); 257} 258 259struct c4iw_dev { 260 struct ib_device ibdev; 261 struct c4iw_rdev rdev; 262 u32 device_cap_flags; 263 struct idr cqidr; 264 struct idr qpidr; 265 struct idr mmidr; 266 spinlock_t lock; 267 struct dentry *debugfs_root; 268 u32 avail_ird; 269}; 270 271static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 272{ 273 return container_of(ibdev, struct c4iw_dev, ibdev); 274} 275 276static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) 277{ 278 return container_of(rdev, struct c4iw_dev, rdev); 279} 280 281static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) 282{ 283 return idr_find(&rhp->cqidr, cqid); 284} 285 286static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) 287{ 288 return idr_find(&rhp->qpidr, qpid); 289} 290 291static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) 292{ 293 return idr_find(&rhp->mmidr, mmid); 294} 295 296static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, 297 void *handle, u32 id, int lock) 298{ 299 int ret; 300 int newid; 301 302 do { 303 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) 304 return -ENOMEM; 305 if (lock) 306 spin_lock_irq(&rhp->lock); 307 ret = idr_get_new_above(idr, handle, id, &newid); 308 BUG_ON(!ret && newid != id); 309 if (lock) 310 spin_unlock_irq(&rhp->lock); 311 } while (ret == -EAGAIN); 312 313 return ret; 314} 315 316static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, 317 void *handle, u32 id) 318{ 319 return _insert_handle(rhp, idr, handle, id, 1); 320} 321 322static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, 323 void *handle, u32 id) 324{ 325 return _insert_handle(rhp, idr, handle, id, 0); 326} 327 328static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, 329 u32 id, int lock) 330{ 331 if (lock) 332 spin_lock_irq(&rhp->lock); 333 idr_remove(idr, id); 334 if (lock) 335 spin_unlock_irq(&rhp->lock); 336} 337 338static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) 339{ 340 _remove_handle(rhp, idr, id, 1); 341} 342 343static inline void remove_handle_nolock(struct c4iw_dev *rhp, 344 struct idr *idr, u32 id) 345{ 346 _remove_handle(rhp, idr, id, 0); 347} 348 349extern int c4iw_max_read_depth; 350 351static inline int cur_max_read_depth(struct c4iw_dev *dev) 352{ 353 return min(dev->rdev.adap->params.max_ordird_qp, c4iw_max_read_depth); 354} 355 356struct c4iw_pd { 357 struct ib_pd ibpd; 358 u32 pdid; 359 struct c4iw_dev *rhp; 360}; 361 362static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) 363{ 364 return container_of(ibpd, struct c4iw_pd, ibpd); 365} 366 367struct tpt_attributes { 368 u64 len; 369 u64 va_fbo; 370 enum fw_ri_mem_perms perms; 371 u32 stag; 372 u32 pdid; 373 u32 qpid; 374 u32 pbl_addr; 375 u32 pbl_size; 376 u32 state:1; 377 u32 type:2; 378 u32 rsvd:1; 379 u32 remote_invaliate_disable:1; 380 u32 zbva:1; 381 u32 mw_bind_enable:1; 382 u32 page_size:5; 383}; 384 385struct c4iw_mr { 386 struct ib_mr ibmr; 387 struct ib_umem *umem; 388 struct c4iw_dev *rhp; 389 u64 kva; 390 struct tpt_attributes attr; 391 u64 *mpl; 392 dma_addr_t mpl_addr; 393 u32 max_mpl_len; 394 u32 mpl_len; 395}; 396 397static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) 398{ 399 return container_of(ibmr, struct c4iw_mr, ibmr); 400} 401 402struct c4iw_mw { 403 struct ib_mw ibmw; 404 struct c4iw_dev *rhp; 405 u64 kva; 406 struct tpt_attributes attr; 407}; 408 409static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) 410{ 411 return container_of(ibmw, struct c4iw_mw, ibmw); 412} 413 414struct c4iw_cq { 415 struct ib_cq ibcq; 416 struct c4iw_dev *rhp; 417 struct t4_cq cq; 418 spinlock_t lock; 419 spinlock_t comp_handler_lock; 420 atomic_t refcnt; 421 wait_queue_head_t wait; 422}; 423 424static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) 425{ 426 return container_of(ibcq, struct c4iw_cq, ibcq); 427} 428 429struct c4iw_mpa_attributes { 430 u8 initiator; 431 u8 recv_marker_enabled; 432 u8 xmit_marker_enabled; 433 u8 crc_enabled; 434 u8 enhanced_rdma_conn; 435 u8 version; 436 u8 p2p_type; 437}; 438 439struct c4iw_qp_attributes { 440 u32 scq; 441 u32 rcq; 442 u32 sq_num_entries; 443 u32 rq_num_entries; 444 u32 sq_max_sges; 445 u32 sq_max_sges_rdma_write; 446 u32 rq_max_sges; 447 u32 state; 448 u8 enable_rdma_read; 449 u8 enable_rdma_write; 450 u8 enable_bind; 451 u8 enable_mmid0_fastreg; 452 u32 max_ord; 453 u32 max_ird; 454 u32 pd; 455 u32 next_state; 456 char terminate_buffer[52]; 457 u32 terminate_msg_len; 458 u8 is_terminate_local; 459 struct c4iw_mpa_attributes mpa_attr; 460 struct c4iw_ep *llp_stream_handle; 461 u8 layer_etype; 462 u8 ecode; 463 u16 sq_db_inc; 464 u16 rq_db_inc; 465 u8 send_term; 466}; 467 468struct c4iw_qp { 469 struct ib_qp ibqp; 470 struct c4iw_dev *rhp; 471 struct c4iw_ep *ep; 472 struct c4iw_qp_attributes attr; 473 struct t4_wq wq; 474 spinlock_t lock; 475 struct mutex mutex; 476 struct kref kref; 477 wait_queue_head_t wait; 478 struct timer_list timer; 479 int sq_sig_all; 480 struct work_struct free_work; 481 struct c4iw_ucontext *ucontext; 482}; 483 484static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) 485{ 486 return container_of(ibqp, struct c4iw_qp, ibqp); 487} 488 489struct c4iw_ucontext { 490 struct ib_ucontext ibucontext; 491 struct c4iw_dev_ucontext uctx; 492 u32 key; 493 spinlock_t mmap_lock; 494 struct list_head mmaps; 495 struct kref kref; 496}; 497 498static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 499{ 500 return container_of(c, struct c4iw_ucontext, ibucontext); 501} 502 503void _c4iw_free_ucontext(struct kref *kref); 504 505static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) 506{ 507 kref_put(&ucontext->kref, _c4iw_free_ucontext); 508} 509static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) 510{ 511 kref_get(&ucontext->kref); 512} 513 514struct c4iw_mm_entry { 515 struct list_head entry; 516 u64 addr; 517 u32 key; 518 unsigned len; 519}; 520 521static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, 522 u32 key, unsigned len) 523{ 524 struct list_head *pos, *nxt; 525 struct c4iw_mm_entry *mm; 526 527 spin_lock(&ucontext->mmap_lock); 528 list_for_each_safe(pos, nxt, &ucontext->mmaps) { 529 530 mm = list_entry(pos, struct c4iw_mm_entry, entry); 531 if (mm->key == key && mm->len == len) { 532 list_del_init(&mm->entry); 533 spin_unlock(&ucontext->mmap_lock); 534 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", 535 __func__, key, (unsigned long long) mm->addr, 536 mm->len); 537 return mm; 538 } 539 } 540 spin_unlock(&ucontext->mmap_lock); 541 return NULL; 542} 543 544static inline void insert_mmap(struct c4iw_ucontext *ucontext, 545 struct c4iw_mm_entry *mm) 546{ 547 spin_lock(&ucontext->mmap_lock); 548 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key, 549 (unsigned long long) mm->addr, mm->len); 550 list_add_tail(&mm->entry, &ucontext->mmaps); 551 spin_unlock(&ucontext->mmap_lock); 552} 553 554enum c4iw_qp_attr_mask { 555 C4IW_QP_ATTR_NEXT_STATE = 1 << 0, 556 C4IW_QP_ATTR_SQ_DB = 1<<1, 557 C4IW_QP_ATTR_RQ_DB = 1<<2, 558 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, 559 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, 560 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, 561 C4IW_QP_ATTR_MAX_ORD = 1 << 11, 562 C4IW_QP_ATTR_MAX_IRD = 1 << 12, 563 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, 564 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, 565 C4IW_QP_ATTR_MPA_ATTR = 1 << 24, 566 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, 567 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | 568 C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 569 C4IW_QP_ATTR_MAX_ORD | 570 C4IW_QP_ATTR_MAX_IRD | 571 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 572 C4IW_QP_ATTR_STREAM_MSG_BUFFER | 573 C4IW_QP_ATTR_MPA_ATTR | 574 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) 575}; 576 577int c4iw_modify_qp(struct c4iw_dev *rhp, 578 struct c4iw_qp *qhp, 579 enum c4iw_qp_attr_mask mask, 580 struct c4iw_qp_attributes *attrs, 581 int internal); 582 583enum c4iw_qp_state { 584 C4IW_QP_STATE_IDLE, 585 C4IW_QP_STATE_RTS, 586 C4IW_QP_STATE_ERROR, 587 C4IW_QP_STATE_TERMINATE, 588 C4IW_QP_STATE_CLOSING, 589 C4IW_QP_STATE_TOT 590}; 591 592/* 593 * IW_CXGBE event bits. 594 * These bits are used for handling all events for a particular 'ep' serially. 595 */ 596#define C4IW_EVENT_SOCKET 0x0001 597#define C4IW_EVENT_TIMEOUT 0x0002 598#define C4IW_EVENT_TERM 0x0004 599 600static inline int c4iw_convert_state(enum ib_qp_state ib_state) 601{ 602 switch (ib_state) { 603 case IB_QPS_RESET: 604 case IB_QPS_INIT: 605 return C4IW_QP_STATE_IDLE; 606 case IB_QPS_RTS: 607 return C4IW_QP_STATE_RTS; 608 case IB_QPS_SQD: 609 return C4IW_QP_STATE_CLOSING; 610 case IB_QPS_SQE: 611 return C4IW_QP_STATE_TERMINATE; 612 case IB_QPS_ERR: 613 return C4IW_QP_STATE_ERROR; 614 default: 615 return -1; 616 } 617} 618 619static inline int to_ib_qp_state(int c4iw_qp_state) 620{ 621 switch (c4iw_qp_state) { 622 case C4IW_QP_STATE_IDLE: 623 return IB_QPS_INIT; 624 case C4IW_QP_STATE_RTS: 625 return IB_QPS_RTS; 626 case C4IW_QP_STATE_CLOSING: 627 return IB_QPS_SQD; 628 case C4IW_QP_STATE_TERMINATE: 629 return IB_QPS_SQE; 630 case C4IW_QP_STATE_ERROR: 631 return IB_QPS_ERR; 632 } 633 return IB_QPS_ERR; 634} 635 636#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN 637 638static inline u32 c4iw_ib_to_tpt_access(int a) 639{ 640 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 641 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | 642 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | 643 FW_RI_MEM_ACCESS_LOCAL_READ; 644} 645 646static inline u32 c4iw_ib_to_tpt_bind_access(int acc) 647{ 648 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 649 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); 650} 651 652enum c4iw_mmid_state { 653 C4IW_STAG_STATE_VALID, 654 C4IW_STAG_STATE_INVALID 655}; 656 657#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications" 658 659#define MPA_KEY_REQ "MPA ID Req Frame" 660#define MPA_KEY_REP "MPA ID Rep Frame" 661 662#define MPA_MAX_PRIVATE_DATA 256 663#define MPA_ENHANCED_RDMA_CONN 0x10 664#define MPA_REJECT 0x20 665#define MPA_CRC 0x40 666#define MPA_MARKERS 0x80 667#define MPA_FLAGS_MASK 0xE0 668 669#define MPA_V2_PEER2PEER_MODEL 0x8000 670#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 671#define MPA_V2_RDMA_WRITE_RTR 0x8000 672#define MPA_V2_RDMA_READ_RTR 0x4000 673#define MPA_V2_IRD_ORD_MASK 0x3FFF 674 675#define c4iw_put_ep(ep) { \ 676 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \ 677 __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \ 678 WARN_ON(atomic_read(&(ep)->kref.refcount) < 1); \ 679 kref_put(&((ep)->kref), _c4iw_free_ep); \ 680} 681 682#define c4iw_get_ep(ep) { \ 683 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \ 684 __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \ 685 kref_get(&((ep)->kref)); \ 686} 687 688void _c4iw_free_ep(struct kref *kref); 689 690struct mpa_message { 691 u8 key[16]; 692 u8 flags; 693 u8 revision; 694 __be16 private_data_size; 695 u8 private_data[0]; 696}; 697 698struct mpa_v2_conn_params { 699 __be16 ird; 700 __be16 ord; 701}; 702 703struct terminate_message { 704 u8 layer_etype; 705 u8 ecode; 706 __be16 hdrct_rsvd; 707 u8 len_hdrs[0]; 708}; 709 710#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) 711 712enum c4iw_layers_types { 713 LAYER_RDMAP = 0x00, 714 LAYER_DDP = 0x10, 715 LAYER_MPA = 0x20, 716 RDMAP_LOCAL_CATA = 0x00, 717 RDMAP_REMOTE_PROT = 0x01, 718 RDMAP_REMOTE_OP = 0x02, 719 DDP_LOCAL_CATA = 0x00, 720 DDP_TAGGED_ERR = 0x01, 721 DDP_UNTAGGED_ERR = 0x02, 722 DDP_LLP = 0x03 723}; 724 725enum c4iw_rdma_ecodes { 726 RDMAP_INV_STAG = 0x00, 727 RDMAP_BASE_BOUNDS = 0x01, 728 RDMAP_ACC_VIOL = 0x02, 729 RDMAP_STAG_NOT_ASSOC = 0x03, 730 RDMAP_TO_WRAP = 0x04, 731 RDMAP_INV_VERS = 0x05, 732 RDMAP_INV_OPCODE = 0x06, 733 RDMAP_STREAM_CATA = 0x07, 734 RDMAP_GLOBAL_CATA = 0x08, 735 RDMAP_CANT_INV_STAG = 0x09, 736 RDMAP_UNSPECIFIED = 0xff 737}; 738 739enum c4iw_ddp_ecodes { 740 DDPT_INV_STAG = 0x00, 741 DDPT_BASE_BOUNDS = 0x01, 742 DDPT_STAG_NOT_ASSOC = 0x02, 743 DDPT_TO_WRAP = 0x03, 744 DDPT_INV_VERS = 0x04, 745 DDPU_INV_QN = 0x01, 746 DDPU_INV_MSN_NOBUF = 0x02, 747 DDPU_INV_MSN_RANGE = 0x03, 748 DDPU_INV_MO = 0x04, 749 DDPU_MSG_TOOBIG = 0x05, 750 DDPU_INV_VERS = 0x06 751}; 752 753enum c4iw_mpa_ecodes { 754 MPA_CRC_ERR = 0x02, 755 MPA_MARKER_ERR = 0x03, 756 MPA_LOCAL_CATA = 0x05, 757 MPA_INSUFF_IRD = 0x06, 758 MPA_NOMATCH_RTR = 0x07, 759}; 760 761enum c4iw_ep_state { 762 IDLE = 0, 763 LISTEN, 764 CONNECTING, 765 MPA_REQ_WAIT, 766 MPA_REQ_SENT, 767 MPA_REQ_RCVD, 768 MPA_REP_SENT, 769 FPDU_MODE, 770 ABORTING, 771 CLOSING, 772 MORIBUND, 773 DEAD, 774}; 775 776enum c4iw_ep_flags { 777 PEER_ABORT_IN_PROGRESS = 0, 778 ABORT_REQ_IN_PROGRESS = 1, 779 RELEASE_RESOURCES = 2, 780 CLOSE_SENT = 3, 781 TIMEOUT = 4, 782 QP_REFERENCED = 5, 783 STOP_MPA_TIMER = 7, 784}; 785 786enum c4iw_ep_history { 787 ACT_OPEN_REQ = 0, 788 ACT_OFLD_CONN = 1, 789 ACT_OPEN_RPL = 2, 790 ACT_ESTAB = 3, 791 PASS_ACCEPT_REQ = 4, 792 PASS_ESTAB = 5, 793 ABORT_UPCALL = 6, 794 ESTAB_UPCALL = 7, 795 CLOSE_UPCALL = 8, 796 ULP_ACCEPT = 9, 797 ULP_REJECT = 10, 798 TIMEDOUT = 11, 799 PEER_ABORT = 12, 800 PEER_CLOSE = 13, 801 CONNREQ_UPCALL = 14, 802 ABORT_CONN = 15, 803 DISCONN_UPCALL = 16, 804 EP_DISC_CLOSE = 17, 805 EP_DISC_ABORT = 18, 806 CONN_RPL_UPCALL = 19, 807 ACT_RETRY_NOMEM = 20, 808 ACT_RETRY_INUSE = 21, 809 CLOSE_CON_RPL = 22, 810 EP_DISC_FAIL = 24, 811 QP_REFED = 25, 812 QP_DEREFED = 26, 813 CM_ID_REFED = 27, 814 CM_ID_DEREFED = 28 815}; 816 817struct c4iw_ep_common { 818 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */ 819 struct iw_cm_id *cm_id; 820 struct c4iw_qp *qp; 821 struct c4iw_dev *dev; 822 enum c4iw_ep_state state; 823 struct kref kref; 824 struct mutex mutex; 825 struct sockaddr_storage local_addr; 826 struct sockaddr_storage remote_addr; 827 struct c4iw_wr_wait wr_wait; 828 unsigned long flags; 829 unsigned long history; 830 int rpl_err; 831 int rpl_done; 832 struct thread *thread; 833 struct socket *so; 834 int ep_events; 835}; 836 837struct c4iw_listen_ep { 838 struct c4iw_ep_common com; 839 unsigned int stid; 840 int backlog; 841 struct list_head listen_ep_list; /* list of all listener ep's bound 842 to one port address */ 843}; 844 845struct c4iw_ep { 846 struct c4iw_ep_common com; 847 struct c4iw_listen_ep *parent_ep; 848 struct timer_list timer; 849 unsigned int atid; 850 u32 hwtid; 851 u32 snd_seq; 852 u32 rcv_seq; 853 struct l2t_entry *l2t; 854 struct dst_entry *dst; 855 struct c4iw_mpa_attributes mpa_attr; 856 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; 857 unsigned int mpa_pkt_len; 858 u32 ird; 859 u32 ord; 860 u32 tx_chan; 861 u32 mtu; 862 u16 mss; 863 u16 plen; 864 u16 rss_qid; 865 u16 txq_idx; 866 u16 ctrlq_idx; 867 u8 tos; 868 u8 retry_with_mpa_v1; 869 u8 tried_with_mpa_v1; 870}; 871 872static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) 873{ 874 return cm_id->provider_data; 875} 876 877static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) 878{ 879 return cm_id->provider_data; 880} 881 882static inline int compute_wscale(int win) 883{ 884 int wscale = 0; 885 886 while (wscale < 14 && (65535<<wscale) < win) 887 wscale++; 888 return wscale; 889} 890 891u32 c4iw_id_alloc(struct c4iw_id_table *alloc); 892void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); 893int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 894 u32 reserved, u32 flags); 895void c4iw_id_table_free(struct c4iw_id_table *alloc); 896 897typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m); 898 899int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 900 struct l2t_entry *l2t); 901u32 c4iw_get_resource(struct c4iw_id_table *id_table); 902void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); 903int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); 904int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); 905int c4iw_pblpool_create(struct c4iw_rdev *rdev); 906int c4iw_rqtpool_create(struct c4iw_rdev *rdev); 907void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); 908void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); 909void c4iw_destroy_resource(struct c4iw_resource *rscp); 910int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); 911int c4iw_register_device(struct c4iw_dev *dev); 912void c4iw_unregister_device(struct c4iw_dev *dev); 913int __init c4iw_cm_init(void); 914void __exit c4iw_cm_term(void); 915void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 916 struct c4iw_dev_ucontext *uctx); 917void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 918 struct c4iw_dev_ucontext *uctx); 919int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 920int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 921 struct ib_send_wr **bad_wr); 922int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 923 struct ib_recv_wr **bad_wr); 924int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 925int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); 926int c4iw_destroy_listen(struct iw_cm_id *cm_id); 927int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 928int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 929void c4iw_qp_add_ref(struct ib_qp *qp); 930void c4iw_qp_rem_ref(struct ib_qp *qp); 931struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 932 u32 max_num_sg); 933int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 934 int sg_nents, unsigned int *sg_offset); 935int c4iw_dealloc_mw(struct ib_mw *mw); 936struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 937 struct ib_udata *udata); 938struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 939 virt, int acc, struct ib_udata *udata); 940struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); 941int c4iw_dereg_mr(struct ib_mr *ib_mr); 942void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 943int c4iw_destroy_cq(struct ib_cq *ib_cq); 944struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, 945 const struct ib_cq_init_attr *attr, 946 struct ib_ucontext *ib_context, 947 struct ib_udata *udata); 948int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 949int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 950int c4iw_destroy_qp(struct ib_qp *ib_qp); 951struct ib_qp *c4iw_create_qp(struct ib_pd *pd, 952 struct ib_qp_init_attr *attrs, 953 struct ib_udata *udata); 954int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 955 int attr_mask, struct ib_udata *udata); 956int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 957 int attr_mask, struct ib_qp_init_attr *init_attr); 958struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); 959u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); 960void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); 961u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); 962void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); 963int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m); 964void c4iw_flush_hw_cq(struct c4iw_cq *cq); 965void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 966int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); 967int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); 968int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); 969int c4iw_flush_sq(struct c4iw_qp *qhp); 970int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *); 971u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 972int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 973u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 974void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 975 struct c4iw_dev_ucontext *uctx); 976u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 977void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 978 struct c4iw_dev_ucontext *uctx); 979void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); 980#endif 981