1256694Snp/* 2256694Snp * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3256694Snp * 4256694Snp * This software is available to you under a choice of one of two 5256694Snp * licenses. You may choose to be licensed under the terms of the GNU 6256694Snp * General Public License (GPL) Version 2, available from the file 7256694Snp * COPYING in the main directory of this source tree, or the 8256694Snp * OpenIB.org BSD license below: 9256694Snp * 10256694Snp * Redistribution and use in source and binary forms, with or 11256694Snp * without modification, are permitted provided that the following 12256694Snp * conditions are met: 13256694Snp * 14256694Snp * - Redistributions of source code must retain the above 15256694Snp * copyright notice, this list of conditions and the following 16256694Snp * disclaimer. 17256694Snp * - Redistributions in binary form must reproduce the above 18256694Snp * copyright notice, this list of conditions and the following 19256694Snp * disclaimer in the documentation and/or other materials 20256694Snp * provided with the distribution. 21256694Snp * 22256694Snp * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23256694Snp * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24256694Snp * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 25256694Snp * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 26256694Snp * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 27256694Snp * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28256694Snp * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29256694Snp * SOFTWARE. 30256694Snp * 31256694Snp * $FreeBSD: releng/10.3/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h 273246 2014-10-18 07:07:34Z hselasky $ 32256694Snp */ 33256694Snp#ifndef __IW_CXGB4_H__ 34256694Snp#define __IW_CXGB4_H__ 35256694Snp 36256694Snp#include <linux/list.h> 37256694Snp#include <linux/spinlock.h> 38256694Snp#include <linux/idr.h> 39256694Snp#include <linux/completion.h> 40256694Snp#include <linux/netdevice.h> 41256694Snp#include <linux/sched.h> 42256694Snp#include <linux/pci.h> 43256694Snp#include <linux/dma-mapping.h> 44256694Snp#include <linux/wait.h> 45256694Snp#include <linux/kref.h> 46256694Snp#include <linux/timer.h> 47256694Snp#include <linux/io.h> 48256694Snp 49256694Snp#include <asm/byteorder.h> 50256694Snp 51256694Snp#include <netinet/in.h> 52256694Snp#include <netinet/toecore.h> 53256694Snp 54256694Snp#include <rdma/ib_verbs.h> 55256694Snp#include <rdma/iw_cm.h> 56256694Snp 57256694Snp#undef prefetch 58256694Snp 59256694Snp#include "common/common.h" 60256694Snp#include "common/t4_msg.h" 61256694Snp#include "common/t4_regs.h" 62256694Snp#include "common/t4_tcb.h" 63256694Snp#include "t4_l2t.h" 64256694Snp 65256694Snp#define DRV_NAME "iw_cxgbe" 66256694Snp#define MOD DRV_NAME ":" 67256694Snp#define KTR_IW_CXGBE KTR_SPARE3 68256694Snp 69256694Snpextern int c4iw_debug; 70256694Snp#define PDBG(fmt, args...) \ 71256694Snpdo { \ 72256694Snp if (c4iw_debug) \ 73256694Snp printf(MOD fmt, ## args); \ 74256694Snp} while (0) 75256694Snp 76256694Snp#include "t4.h" 77256694Snp 78256694Snpstatic inline void *cplhdr(struct mbuf *m) 79256694Snp{ 80256694Snp return mtod(m, void*); 81256694Snp} 82256694Snp 83256694Snp#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start) 84256694Snp#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start) 85256694Snp 86256694Snp#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ 87256694Snp#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ 88256694Snp 89256694Snpstruct c4iw_id_table { 90256694Snp u32 flags; 91256694Snp u32 start; /* logical minimal id */ 92256694Snp u32 last; /* hint for find */ 93256694Snp u32 max; 94256694Snp spinlock_t lock; 95256694Snp unsigned long *table; 96256694Snp}; 97256694Snp 98256694Snpstruct c4iw_resource { 99256694Snp struct c4iw_id_table tpt_table; 100256694Snp struct c4iw_id_table qid_table; 101256694Snp struct c4iw_id_table pdid_table; 102256694Snp}; 103256694Snp 104256694Snpstruct c4iw_qid_list { 105256694Snp struct list_head entry; 106256694Snp u32 qid; 107256694Snp}; 108256694Snp 109256694Snpstruct c4iw_dev_ucontext { 110256694Snp struct list_head qpids; 111256694Snp struct list_head cqids; 112256694Snp struct mutex lock; 113256694Snp}; 114256694Snp 115256694Snpenum c4iw_rdev_flags { 116256694Snp T4_FATAL_ERROR = (1<<0), 117256694Snp}; 118256694Snp 119256694Snpstruct c4iw_stat { 120256694Snp u64 total; 121256694Snp u64 cur; 122256694Snp u64 max; 123256694Snp u64 fail; 124256694Snp}; 125256694Snp 126256694Snpstruct c4iw_stats { 127256694Snp struct mutex lock; 128256694Snp struct c4iw_stat qid; 129256694Snp struct c4iw_stat pd; 130256694Snp struct c4iw_stat stag; 131256694Snp struct c4iw_stat pbl; 132256694Snp struct c4iw_stat rqt; 133256694Snp u64 db_full; 134256694Snp u64 db_empty; 135256694Snp u64 db_drop; 136256694Snp u64 db_state_transitions; 137256694Snp}; 138256694Snp 139256694Snpstruct c4iw_rdev { 140256694Snp struct adapter *adap; 141256694Snp struct c4iw_resource resource; 142256694Snp unsigned long qpshift; 143256694Snp u32 qpmask; 144256694Snp unsigned long cqshift; 145256694Snp u32 cqmask; 146256694Snp struct c4iw_dev_ucontext uctx; 147256694Snp struct gen_pool *pbl_pool; 148256694Snp struct gen_pool *rqt_pool; 149256694Snp u32 flags; 150256694Snp struct c4iw_stats stats; 151256694Snp}; 152256694Snp 153256694Snpstatic inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 154256694Snp{ 155256694Snp return rdev->flags & T4_FATAL_ERROR; 156256694Snp} 157256694Snp 158256694Snpstatic inline int c4iw_num_stags(struct c4iw_rdev *rdev) 159256694Snp{ 160256694Snp return min((int)T4_MAX_NUM_STAG, (int)(rdev->adap->vres.stag.size >> 5)); 161256694Snp} 162256694Snp 163256694Snp#define C4IW_WR_TO (10*HZ) 164256694Snp 165256694Snpstruct c4iw_wr_wait { 166256694Snp int ret; 167256694Snp atomic_t completion; 168256694Snp}; 169256694Snp 170256694Snpstatic inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) 171256694Snp{ 172256694Snp wr_waitp->ret = 0; 173256694Snp atomic_set(&wr_waitp->completion, 0); 174256694Snp} 175256694Snp 176256694Snpstatic inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) 177256694Snp{ 178256694Snp wr_waitp->ret = ret; 179256694Snp atomic_set(&wr_waitp->completion, 1); 180256694Snp wakeup(wr_waitp); 181256694Snp} 182256694Snp 183256694Snpstatic inline int 184256694Snpc4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, 185256694Snp u32 hwtid, u32 qpid, const char *func) 186256694Snp{ 187256694Snp struct adapter *sc = rdev->adap; 188256694Snp unsigned to = C4IW_WR_TO; 189256694Snp 190256694Snp while (!atomic_read(&wr_waitp->completion)) { 191256694Snp tsleep(wr_waitp, 0, "c4iw_wait", to); 192256694Snp if (SIGPENDING(curthread)) { 193256694Snp printf("%s - Device %s not responding - " 194256694Snp "tid %u qpid %u\n", func, 195256694Snp device_get_nameunit(sc->dev), hwtid, qpid); 196256694Snp if (c4iw_fatal_error(rdev)) { 197256694Snp wr_waitp->ret = -EIO; 198256694Snp break; 199256694Snp } 200256694Snp to = to << 2; 201256694Snp } 202256694Snp } 203256694Snp if (wr_waitp->ret) 204256694Snp CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u", 205256694Snp device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid); 206256694Snp return (wr_waitp->ret); 207256694Snp} 208256694Snp 209256694Snpenum db_state { 210256694Snp NORMAL = 0, 211256694Snp FLOW_CONTROL = 1, 212256694Snp RECOVERY = 2 213256694Snp}; 214256694Snp 215256694Snpstruct c4iw_dev { 216256694Snp struct ib_device ibdev; 217256694Snp struct c4iw_rdev rdev; 218256694Snp u32 device_cap_flags; 219256694Snp struct idr cqidr; 220256694Snp struct idr qpidr; 221256694Snp struct idr mmidr; 222256694Snp spinlock_t lock; 223256694Snp struct dentry *debugfs_root; 224256694Snp enum db_state db_state; 225256694Snp int qpcnt; 226256694Snp}; 227256694Snp 228256694Snpstatic inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 229256694Snp{ 230256694Snp return container_of(ibdev, struct c4iw_dev, ibdev); 231256694Snp} 232256694Snp 233256694Snpstatic inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) 234256694Snp{ 235256694Snp return container_of(rdev, struct c4iw_dev, rdev); 236256694Snp} 237256694Snp 238256694Snpstatic inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) 239256694Snp{ 240256694Snp return idr_find(&rhp->cqidr, cqid); 241256694Snp} 242256694Snp 243256694Snpstatic inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) 244256694Snp{ 245256694Snp return idr_find(&rhp->qpidr, qpid); 246256694Snp} 247256694Snp 248256694Snpstatic inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) 249256694Snp{ 250256694Snp return idr_find(&rhp->mmidr, mmid); 251256694Snp} 252256694Snp 253256694Snpstatic inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, 254256694Snp void *handle, u32 id, int lock) 255256694Snp{ 256256694Snp int ret; 257256694Snp int newid; 258256694Snp 259256694Snp do { 260256694Snp if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) 261256694Snp return -ENOMEM; 262256694Snp if (lock) 263256694Snp spin_lock_irq(&rhp->lock); 264256694Snp ret = idr_get_new_above(idr, handle, id, &newid); 265256694Snp BUG_ON(!ret && newid != id); 266256694Snp if (lock) 267256694Snp spin_unlock_irq(&rhp->lock); 268256694Snp } while (ret == -EAGAIN); 269256694Snp 270256694Snp return ret; 271256694Snp} 272256694Snp 273256694Snpstatic inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, 274256694Snp void *handle, u32 id) 275256694Snp{ 276256694Snp return _insert_handle(rhp, idr, handle, id, 1); 277256694Snp} 278256694Snp 279256694Snpstatic inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, 280256694Snp void *handle, u32 id) 281256694Snp{ 282256694Snp return _insert_handle(rhp, idr, handle, id, 0); 283256694Snp} 284256694Snp 285256694Snpstatic inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, 286256694Snp u32 id, int lock) 287256694Snp{ 288256694Snp if (lock) 289256694Snp spin_lock_irq(&rhp->lock); 290256694Snp idr_remove(idr, id); 291256694Snp if (lock) 292256694Snp spin_unlock_irq(&rhp->lock); 293256694Snp} 294256694Snp 295256694Snpstatic inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) 296256694Snp{ 297256694Snp _remove_handle(rhp, idr, id, 1); 298256694Snp} 299256694Snp 300256694Snpstatic inline void remove_handle_nolock(struct c4iw_dev *rhp, 301256694Snp struct idr *idr, u32 id) 302256694Snp{ 303256694Snp _remove_handle(rhp, idr, id, 0); 304256694Snp} 305256694Snp 306256694Snpstruct c4iw_pd { 307256694Snp struct ib_pd ibpd; 308256694Snp u32 pdid; 309256694Snp struct c4iw_dev *rhp; 310256694Snp}; 311256694Snp 312256694Snpstatic inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) 313256694Snp{ 314256694Snp return container_of(ibpd, struct c4iw_pd, ibpd); 315256694Snp} 316256694Snp 317256694Snpstruct tpt_attributes { 318256694Snp u64 len; 319256694Snp u64 va_fbo; 320256694Snp enum fw_ri_mem_perms perms; 321256694Snp u32 stag; 322256694Snp u32 pdid; 323256694Snp u32 qpid; 324256694Snp u32 pbl_addr; 325256694Snp u32 pbl_size; 326256694Snp u32 state:1; 327256694Snp u32 type:2; 328256694Snp u32 rsvd:1; 329256694Snp u32 remote_invaliate_disable:1; 330256694Snp u32 zbva:1; 331256694Snp u32 mw_bind_enable:1; 332256694Snp u32 page_size:5; 333256694Snp}; 334256694Snp 335256694Snpstruct c4iw_mr { 336256694Snp struct ib_mr ibmr; 337256694Snp struct ib_umem *umem; 338256694Snp struct c4iw_dev *rhp; 339256694Snp u64 kva; 340256694Snp struct tpt_attributes attr; 341256694Snp}; 342256694Snp 343256694Snpstatic inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) 344256694Snp{ 345256694Snp return container_of(ibmr, struct c4iw_mr, ibmr); 346256694Snp} 347256694Snp 348256694Snpstruct c4iw_mw { 349256694Snp struct ib_mw ibmw; 350256694Snp struct c4iw_dev *rhp; 351256694Snp u64 kva; 352256694Snp struct tpt_attributes attr; 353256694Snp}; 354256694Snp 355256694Snpstatic inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) 356256694Snp{ 357256694Snp return container_of(ibmw, struct c4iw_mw, ibmw); 358256694Snp} 359256694Snp 360256694Snpstruct c4iw_fr_page_list { 361256694Snp struct ib_fast_reg_page_list ibpl; 362256694Snp DECLARE_PCI_UNMAP_ADDR(mapping); 363256694Snp dma_addr_t dma_addr; 364256694Snp struct c4iw_dev *dev; 365256694Snp int size; 366256694Snp}; 367256694Snp 368256694Snpstatic inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( 369256694Snp struct ib_fast_reg_page_list *ibpl) 370256694Snp{ 371256694Snp return container_of(ibpl, struct c4iw_fr_page_list, ibpl); 372256694Snp} 373256694Snp 374256694Snpstruct c4iw_cq { 375256694Snp struct ib_cq ibcq; 376256694Snp struct c4iw_dev *rhp; 377256694Snp struct t4_cq cq; 378256694Snp spinlock_t lock; 379256694Snp spinlock_t comp_handler_lock; 380256694Snp atomic_t refcnt; 381256694Snp wait_queue_head_t wait; 382256694Snp}; 383256694Snp 384256694Snpstatic inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) 385256694Snp{ 386256694Snp return container_of(ibcq, struct c4iw_cq, ibcq); 387256694Snp} 388256694Snp 389256694Snpstruct c4iw_mpa_attributes { 390256694Snp u8 initiator; 391256694Snp u8 recv_marker_enabled; 392256694Snp u8 xmit_marker_enabled; 393256694Snp u8 crc_enabled; 394256694Snp u8 enhanced_rdma_conn; 395256694Snp u8 version; 396256694Snp u8 p2p_type; 397256694Snp}; 398256694Snp 399256694Snpstruct c4iw_qp_attributes { 400256694Snp u32 scq; 401256694Snp u32 rcq; 402256694Snp u32 sq_num_entries; 403256694Snp u32 rq_num_entries; 404256694Snp u32 sq_max_sges; 405256694Snp u32 sq_max_sges_rdma_write; 406256694Snp u32 rq_max_sges; 407256694Snp u32 state; 408256694Snp u8 enable_rdma_read; 409256694Snp u8 enable_rdma_write; 410256694Snp u8 enable_bind; 411256694Snp u8 enable_mmid0_fastreg; 412256694Snp u32 max_ord; 413256694Snp u32 max_ird; 414256694Snp u32 pd; 415256694Snp u32 next_state; 416256694Snp char terminate_buffer[52]; 417256694Snp u32 terminate_msg_len; 418256694Snp u8 is_terminate_local; 419256694Snp struct c4iw_mpa_attributes mpa_attr; 420256694Snp struct c4iw_ep *llp_stream_handle; 421256694Snp u8 layer_etype; 422256694Snp u8 ecode; 423256694Snp u16 sq_db_inc; 424256694Snp u16 rq_db_inc; 425256694Snp}; 426256694Snp 427256694Snpstruct c4iw_qp { 428256694Snp struct ib_qp ibqp; 429256694Snp struct c4iw_dev *rhp; 430256694Snp struct c4iw_ep *ep; 431256694Snp struct c4iw_qp_attributes attr; 432256694Snp struct t4_wq wq; 433256694Snp spinlock_t lock; 434256694Snp struct mutex mutex; 435256694Snp atomic_t refcnt; 436256694Snp wait_queue_head_t wait; 437256694Snp struct timer_list timer; 438256694Snp}; 439256694Snp 440256694Snpstatic inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) 441256694Snp{ 442256694Snp return container_of(ibqp, struct c4iw_qp, ibqp); 443256694Snp} 444256694Snp 445256694Snpstruct c4iw_ucontext { 446256694Snp struct ib_ucontext ibucontext; 447256694Snp struct c4iw_dev_ucontext uctx; 448256694Snp u32 key; 449256694Snp spinlock_t mmap_lock; 450256694Snp struct list_head mmaps; 451256694Snp}; 452256694Snp 453256694Snpstatic inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 454256694Snp{ 455256694Snp return container_of(c, struct c4iw_ucontext, ibucontext); 456256694Snp} 457256694Snp 458256694Snpstruct c4iw_mm_entry { 459256694Snp struct list_head entry; 460256694Snp u64 addr; 461256694Snp u32 key; 462256694Snp unsigned len; 463256694Snp}; 464256694Snp 465256694Snpstatic inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, 466256694Snp u32 key, unsigned len) 467256694Snp{ 468256694Snp struct list_head *pos, *nxt; 469256694Snp struct c4iw_mm_entry *mm; 470256694Snp 471256694Snp spin_lock(&ucontext->mmap_lock); 472256694Snp list_for_each_safe(pos, nxt, &ucontext->mmaps) { 473256694Snp 474256694Snp mm = list_entry(pos, struct c4iw_mm_entry, entry); 475256694Snp if (mm->key == key && mm->len == len) { 476256694Snp list_del_init(&mm->entry); 477256694Snp spin_unlock(&ucontext->mmap_lock); 478256694Snp CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", 479256694Snp __func__, key, (unsigned long long) mm->addr, 480256694Snp mm->len); 481256694Snp return mm; 482256694Snp } 483256694Snp } 484256694Snp spin_unlock(&ucontext->mmap_lock); 485256694Snp return NULL; 486256694Snp} 487256694Snp 488256694Snpstatic inline void insert_mmap(struct c4iw_ucontext *ucontext, 489256694Snp struct c4iw_mm_entry *mm) 490256694Snp{ 491256694Snp spin_lock(&ucontext->mmap_lock); 492256694Snp CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key, 493256694Snp (unsigned long long) mm->addr, mm->len); 494256694Snp list_add_tail(&mm->entry, &ucontext->mmaps); 495256694Snp spin_unlock(&ucontext->mmap_lock); 496256694Snp} 497256694Snp 498256694Snpenum c4iw_qp_attr_mask { 499256694Snp C4IW_QP_ATTR_NEXT_STATE = 1 << 0, 500256694Snp C4IW_QP_ATTR_SQ_DB = 1<<1, 501256694Snp C4IW_QP_ATTR_RQ_DB = 1<<2, 502256694Snp C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, 503256694Snp C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, 504256694Snp C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, 505256694Snp C4IW_QP_ATTR_MAX_ORD = 1 << 11, 506256694Snp C4IW_QP_ATTR_MAX_IRD = 1 << 12, 507256694Snp C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, 508256694Snp C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, 509256694Snp C4IW_QP_ATTR_MPA_ATTR = 1 << 24, 510256694Snp C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, 511256694Snp C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | 512256694Snp C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 513256694Snp C4IW_QP_ATTR_MAX_ORD | 514256694Snp C4IW_QP_ATTR_MAX_IRD | 515256694Snp C4IW_QP_ATTR_LLP_STREAM_HANDLE | 516256694Snp C4IW_QP_ATTR_STREAM_MSG_BUFFER | 517256694Snp C4IW_QP_ATTR_MPA_ATTR | 518256694Snp C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) 519256694Snp}; 520256694Snp 521256694Snpint c4iw_modify_qp(struct c4iw_dev *rhp, 522256694Snp struct c4iw_qp *qhp, 523256694Snp enum c4iw_qp_attr_mask mask, 524256694Snp struct c4iw_qp_attributes *attrs, 525256694Snp int internal); 526256694Snp 527256694Snpenum c4iw_qp_state { 528256694Snp C4IW_QP_STATE_IDLE, 529256694Snp C4IW_QP_STATE_RTS, 530256694Snp C4IW_QP_STATE_ERROR, 531256694Snp C4IW_QP_STATE_TERMINATE, 532256694Snp C4IW_QP_STATE_CLOSING, 533256694Snp C4IW_QP_STATE_TOT 534256694Snp}; 535256694Snp 536256694Snpstatic inline int c4iw_convert_state(enum ib_qp_state ib_state) 537256694Snp{ 538256694Snp switch (ib_state) { 539256694Snp case IB_QPS_RESET: 540256694Snp case IB_QPS_INIT: 541256694Snp return C4IW_QP_STATE_IDLE; 542256694Snp case IB_QPS_RTS: 543256694Snp return C4IW_QP_STATE_RTS; 544256694Snp case IB_QPS_SQD: 545256694Snp return C4IW_QP_STATE_CLOSING; 546256694Snp case IB_QPS_SQE: 547256694Snp return C4IW_QP_STATE_TERMINATE; 548256694Snp case IB_QPS_ERR: 549256694Snp return C4IW_QP_STATE_ERROR; 550256694Snp default: 551256694Snp return -1; 552256694Snp } 553256694Snp} 554256694Snp 555256694Snpstatic inline int to_ib_qp_state(int c4iw_qp_state) 556256694Snp{ 557256694Snp switch (c4iw_qp_state) { 558256694Snp case C4IW_QP_STATE_IDLE: 559256694Snp return IB_QPS_INIT; 560256694Snp case C4IW_QP_STATE_RTS: 561256694Snp return IB_QPS_RTS; 562256694Snp case C4IW_QP_STATE_CLOSING: 563256694Snp return IB_QPS_SQD; 564256694Snp case C4IW_QP_STATE_TERMINATE: 565256694Snp return IB_QPS_SQE; 566256694Snp case C4IW_QP_STATE_ERROR: 567256694Snp return IB_QPS_ERR; 568256694Snp } 569256694Snp return IB_QPS_ERR; 570256694Snp} 571256694Snp 572256694Snpstatic inline u32 c4iw_ib_to_tpt_access(int a) 573256694Snp{ 574256694Snp return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 575256694Snp (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | 576256694Snp (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | 577256694Snp FW_RI_MEM_ACCESS_LOCAL_READ; 578256694Snp} 579256694Snp 580256694Snpstatic inline u32 c4iw_ib_to_tpt_bind_access(int acc) 581256694Snp{ 582256694Snp return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 583256694Snp (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); 584256694Snp} 585256694Snp 586256694Snpenum c4iw_mmid_state { 587256694Snp C4IW_STAG_STATE_VALID, 588256694Snp C4IW_STAG_STATE_INVALID 589256694Snp}; 590256694Snp 591256694Snp#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications" 592256694Snp 593256694Snp#define MPA_KEY_REQ "MPA ID Req Frame" 594256694Snp#define MPA_KEY_REP "MPA ID Rep Frame" 595256694Snp 596256694Snp#define MPA_MAX_PRIVATE_DATA 256 597256694Snp#define MPA_ENHANCED_RDMA_CONN 0x10 598256694Snp#define MPA_REJECT 0x20 599256694Snp#define MPA_CRC 0x40 600256694Snp#define MPA_MARKERS 0x80 601256694Snp#define MPA_FLAGS_MASK 0xE0 602256694Snp 603256694Snp#define MPA_V2_PEER2PEER_MODEL 0x8000 604256694Snp#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 605256694Snp#define MPA_V2_RDMA_WRITE_RTR 0x8000 606256694Snp#define MPA_V2_RDMA_READ_RTR 0x4000 607256694Snp#define MPA_V2_IRD_ORD_MASK 0x3FFF 608256694Snp 609256694Snp/* Fixme: Use atomic_read for kref.count as same as Linux */ 610256694Snp#define c4iw_put_ep(ep) { \ 611256694Snp CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \ 612256694Snp __func__, __LINE__, ep, (ep)->kref.count); \ 613256694Snp WARN_ON((ep)->kref.count < 1); \ 614256694Snp kref_put(&((ep)->kref), _c4iw_free_ep); \ 615256694Snp} 616256694Snp 617256694Snp/* Fixme: Use atomic_read for kref.count as same as Linux */ 618256694Snp#define c4iw_get_ep(ep) { \ 619256694Snp CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \ 620256694Snp __func__, __LINE__, ep, (ep)->kref.count); \ 621256694Snp kref_get(&((ep)->kref)); \ 622256694Snp} 623256694Snp 624256694Snpvoid _c4iw_free_ep(struct kref *kref); 625256694Snp 626256694Snpstruct mpa_message { 627256694Snp u8 key[16]; 628256694Snp u8 flags; 629256694Snp u8 revision; 630256694Snp __be16 private_data_size; 631256694Snp u8 private_data[0]; 632256694Snp}; 633256694Snp 634256694Snpstruct mpa_v2_conn_params { 635256694Snp __be16 ird; 636256694Snp __be16 ord; 637256694Snp}; 638256694Snp 639256694Snpstruct terminate_message { 640256694Snp u8 layer_etype; 641256694Snp u8 ecode; 642256694Snp __be16 hdrct_rsvd; 643256694Snp u8 len_hdrs[0]; 644256694Snp}; 645256694Snp 646256694Snp#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) 647256694Snp 648256694Snpenum c4iw_layers_types { 649256694Snp LAYER_RDMAP = 0x00, 650256694Snp LAYER_DDP = 0x10, 651256694Snp LAYER_MPA = 0x20, 652256694Snp RDMAP_LOCAL_CATA = 0x00, 653256694Snp RDMAP_REMOTE_PROT = 0x01, 654256694Snp RDMAP_REMOTE_OP = 0x02, 655256694Snp DDP_LOCAL_CATA = 0x00, 656256694Snp DDP_TAGGED_ERR = 0x01, 657256694Snp DDP_UNTAGGED_ERR = 0x02, 658256694Snp DDP_LLP = 0x03 659256694Snp}; 660256694Snp 661256694Snpenum c4iw_rdma_ecodes { 662256694Snp RDMAP_INV_STAG = 0x00, 663256694Snp RDMAP_BASE_BOUNDS = 0x01, 664256694Snp RDMAP_ACC_VIOL = 0x02, 665256694Snp RDMAP_STAG_NOT_ASSOC = 0x03, 666256694Snp RDMAP_TO_WRAP = 0x04, 667256694Snp RDMAP_INV_VERS = 0x05, 668256694Snp RDMAP_INV_OPCODE = 0x06, 669256694Snp RDMAP_STREAM_CATA = 0x07, 670256694Snp RDMAP_GLOBAL_CATA = 0x08, 671256694Snp RDMAP_CANT_INV_STAG = 0x09, 672256694Snp RDMAP_UNSPECIFIED = 0xff 673256694Snp}; 674256694Snp 675256694Snpenum c4iw_ddp_ecodes { 676256694Snp DDPT_INV_STAG = 0x00, 677256694Snp DDPT_BASE_BOUNDS = 0x01, 678256694Snp DDPT_STAG_NOT_ASSOC = 0x02, 679256694Snp DDPT_TO_WRAP = 0x03, 680256694Snp DDPT_INV_VERS = 0x04, 681256694Snp DDPU_INV_QN = 0x01, 682256694Snp DDPU_INV_MSN_NOBUF = 0x02, 683256694Snp DDPU_INV_MSN_RANGE = 0x03, 684256694Snp DDPU_INV_MO = 0x04, 685256694Snp DDPU_MSG_TOOBIG = 0x05, 686256694Snp DDPU_INV_VERS = 0x06 687256694Snp}; 688256694Snp 689256694Snpenum c4iw_mpa_ecodes { 690256694Snp MPA_CRC_ERR = 0x02, 691256694Snp MPA_MARKER_ERR = 0x03, 692256694Snp MPA_LOCAL_CATA = 0x05, 693256694Snp MPA_INSUFF_IRD = 0x06, 694256694Snp MPA_NOMATCH_RTR = 0x07, 695256694Snp}; 696256694Snp 697256694Snpenum c4iw_ep_state { 698256694Snp IDLE = 0, 699256694Snp LISTEN, 700256694Snp CONNECTING, 701256694Snp MPA_REQ_WAIT, 702256694Snp MPA_REQ_SENT, 703256694Snp MPA_REQ_RCVD, 704256694Snp MPA_REP_SENT, 705256694Snp FPDU_MODE, 706256694Snp ABORTING, 707256694Snp CLOSING, 708256694Snp MORIBUND, 709256694Snp DEAD, 710256694Snp}; 711256694Snp 712256694Snpenum c4iw_ep_flags { 713256694Snp PEER_ABORT_IN_PROGRESS = 0, 714256694Snp ABORT_REQ_IN_PROGRESS = 1, 715256694Snp RELEASE_RESOURCES = 2, 716256694Snp CLOSE_SENT = 3, 717256694Snp TIMEOUT = 4 718256694Snp}; 719256694Snp 720256694Snpenum c4iw_ep_history { 721256694Snp ACT_OPEN_REQ = 0, 722256694Snp ACT_OFLD_CONN = 1, 723256694Snp ACT_OPEN_RPL = 2, 724256694Snp ACT_ESTAB = 3, 725256694Snp PASS_ACCEPT_REQ = 4, 726256694Snp PASS_ESTAB = 5, 727256694Snp ABORT_UPCALL = 6, 728256694Snp ESTAB_UPCALL = 7, 729256694Snp CLOSE_UPCALL = 8, 730256694Snp ULP_ACCEPT = 9, 731256694Snp ULP_REJECT = 10, 732256694Snp TIMEDOUT = 11, 733256694Snp PEER_ABORT = 12, 734256694Snp PEER_CLOSE = 13, 735256694Snp CONNREQ_UPCALL = 14, 736256694Snp ABORT_CONN = 15, 737256694Snp DISCONN_UPCALL = 16, 738256694Snp EP_DISC_CLOSE = 17, 739256694Snp EP_DISC_ABORT = 18, 740256694Snp CONN_RPL_UPCALL = 19, 741256694Snp ACT_RETRY_NOMEM = 20, 742256694Snp ACT_RETRY_INUSE = 21 743256694Snp}; 744256694Snp 745256694Snpstruct c4iw_ep_common { 746256694Snp TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */ 747256694Snp struct iw_cm_id *cm_id; 748256694Snp struct c4iw_qp *qp; 749256694Snp struct c4iw_dev *dev; 750256694Snp enum c4iw_ep_state state; 751256694Snp struct kref kref; 752256694Snp struct mutex mutex; 753256694Snp struct sockaddr_in local_addr; 754256694Snp struct sockaddr_in remote_addr; 755256694Snp struct c4iw_wr_wait wr_wait; 756256694Snp unsigned long flags; 757256694Snp unsigned long history; 758256694Snp int rpl_err; 759256694Snp int rpl_done; 760256694Snp struct thread *thread; 761256694Snp struct socket *so; 762256694Snp}; 763256694Snp 764256694Snpstruct c4iw_listen_ep { 765256694Snp struct c4iw_ep_common com; 766256694Snp unsigned int stid; 767256694Snp int backlog; 768256694Snp}; 769256694Snp 770256694Snpstruct c4iw_ep { 771256694Snp struct c4iw_ep_common com; 772256694Snp struct c4iw_ep *parent_ep; 773256694Snp struct timer_list timer; 774256694Snp struct list_head entry; 775256694Snp unsigned int atid; 776256694Snp u32 hwtid; 777256694Snp u32 snd_seq; 778256694Snp u32 rcv_seq; 779256694Snp struct l2t_entry *l2t; 780256694Snp struct dst_entry *dst; 781256694Snp struct c4iw_mpa_attributes mpa_attr; 782256694Snp u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; 783256694Snp unsigned int mpa_pkt_len; 784256694Snp u32 ird; 785256694Snp u32 ord; 786256694Snp u32 smac_idx; 787256694Snp u32 tx_chan; 788256694Snp u32 mtu; 789256694Snp u16 mss; 790256694Snp u16 emss; 791256694Snp u16 plen; 792256694Snp u16 rss_qid; 793256694Snp u16 txq_idx; 794256694Snp u16 ctrlq_idx; 795256694Snp u8 tos; 796256694Snp u8 retry_with_mpa_v1; 797256694Snp u8 tried_with_mpa_v1; 798256694Snp}; 799256694Snp 800256694Snpstatic inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) 801256694Snp{ 802256694Snp return cm_id->provider_data; 803256694Snp} 804256694Snp 805256694Snpstatic inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) 806256694Snp{ 807256694Snp return cm_id->provider_data; 808256694Snp} 809256694Snp 810256694Snpstatic inline int compute_wscale(int win) 811256694Snp{ 812256694Snp int wscale = 0; 813256694Snp 814256694Snp while (wscale < 14 && (65535<<wscale) < win) 815256694Snp wscale++; 816256694Snp return wscale; 817256694Snp} 818256694Snp 819256694Snpu32 c4iw_id_alloc(struct c4iw_id_table *alloc); 820256694Snpvoid c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); 821256694Snpint c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 822256694Snp u32 reserved, u32 flags); 823256694Snpvoid c4iw_id_table_free(struct c4iw_id_table *alloc); 824256694Snp 825256694Snptypedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m); 826256694Snp 827256694Snpint c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 828256694Snp struct l2t_entry *l2t); 829256694Snpu32 c4iw_get_resource(struct c4iw_id_table *id_table); 830256694Snpvoid c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); 831256694Snpint c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); 832256694Snpint c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); 833256694Snpint c4iw_pblpool_create(struct c4iw_rdev *rdev); 834256694Snpint c4iw_rqtpool_create(struct c4iw_rdev *rdev); 835256694Snpvoid c4iw_pblpool_destroy(struct c4iw_rdev *rdev); 836256694Snpvoid c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); 837256694Snpvoid c4iw_destroy_resource(struct c4iw_resource *rscp); 838256694Snpint c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); 839256694Snpint c4iw_register_device(struct c4iw_dev *dev); 840256694Snpvoid c4iw_unregister_device(struct c4iw_dev *dev); 841256694Snpint __init c4iw_cm_init(void); 842256694Snpvoid __exit c4iw_cm_term(void); 843256694Snpvoid c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 844256694Snp struct c4iw_dev_ucontext *uctx); 845256694Snpvoid c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 846256694Snp struct c4iw_dev_ucontext *uctx); 847256694Snpint c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 848256694Snpint c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 849256694Snp struct ib_send_wr **bad_wr); 850256694Snpint c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 851256694Snp struct ib_recv_wr **bad_wr); 852256694Snpint c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 853256694Snp struct ib_mw_bind *mw_bind); 854256694Snpint c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 855256694Snpint c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); 856256694Snpint c4iw_destroy_listen(struct iw_cm_id *cm_id); 857256694Snpint c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 858256694Snpint c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 859256694Snpvoid c4iw_qp_add_ref(struct ib_qp *qp); 860256694Snpvoid c4iw_qp_rem_ref(struct ib_qp *qp); 861256694Snpvoid c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list); 862256694Snpstruct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( 863256694Snp struct ib_device *device, 864256694Snp int page_list_len); 865256694Snpstruct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); 866256694Snpint c4iw_dealloc_mw(struct ib_mw *mw); 867256694Snpstruct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); 868256694Snpstruct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 869256694Snp virt, int acc, struct ib_udata *udata, int mr_id); 870256694Snpstruct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); 871256694Snpstruct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, 872256694Snp struct ib_phys_buf *buffer_list, 873256694Snp int num_phys_buf, 874256694Snp int acc, 875256694Snp u64 *iova_start); 876256694Snpint c4iw_reregister_phys_mem(struct ib_mr *mr, 877256694Snp int mr_rereg_mask, 878256694Snp struct ib_pd *pd, 879256694Snp struct ib_phys_buf *buffer_list, 880256694Snp int num_phys_buf, 881256694Snp int acc, u64 *iova_start); 882256694Snpint c4iw_dereg_mr(struct ib_mr *ib_mr); 883256694Snpint c4iw_destroy_cq(struct ib_cq *ib_cq); 884256694Snpstruct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, 885256694Snp int vector, 886256694Snp struct ib_ucontext *ib_context, 887256694Snp struct ib_udata *udata); 888256694Snpint c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 889256694Snpint c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 890256694Snpint c4iw_destroy_qp(struct ib_qp *ib_qp); 891256694Snpstruct ib_qp *c4iw_create_qp(struct ib_pd *pd, 892256694Snp struct ib_qp_init_attr *attrs, 893256694Snp struct ib_udata *udata); 894256694Snpint c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 895256694Snp int attr_mask, struct ib_udata *udata); 896256694Snpint c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 897256694Snp int attr_mask, struct ib_qp_init_attr *init_attr); 898256694Snpstruct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); 899256694Snpu32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); 900256694Snpvoid c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); 901256694Snpu32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); 902256694Snpvoid c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); 903256694Snpint c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m); 904256694Snpvoid c4iw_flush_hw_cq(struct t4_cq *cq); 905256694Snpvoid c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 906256694Snpvoid c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 907256694Snpint c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); 908256694Snpint c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); 909256694Snpint c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); 910256694Snpint c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *); 911256694Snpu16 c4iw_rqes_posted(struct c4iw_qp *qhp); 912256694Snpint c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 913256694Snpu32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 914256694Snpvoid c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 915256694Snp struct c4iw_dev_ucontext *uctx); 916256694Snpu32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 917256694Snpvoid c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, 918256694Snp struct c4iw_dev_ucontext *uctx); 919256694Snpvoid c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); 920256694Snp 921256694Snpextern struct cxgb4_client t4c_client; 922256694Snpextern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 923256694Snpextern int c4iw_max_read_depth; 924256694Snp 925256694Snp#include <sys/blist.h> 926256694Snpstruct gen_pool { 927256694Snp blist_t gen_list; 928256694Snp daddr_t gen_base; 929256694Snp int gen_chunk_shift; 930256694Snp struct mutex gen_lock; 931256694Snp}; 932256694Snp 933256694Snpstatic __inline struct gen_pool * 934256694Snpgen_pool_create(daddr_t base, u_int chunk_shift, u_int len) 935256694Snp{ 936256694Snp struct gen_pool *gp; 937256694Snp 938256694Snp gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT); 939256694Snp if (gp == NULL) 940256694Snp return (NULL); 941256694Snp 942256694Snp memset(gp, 0, sizeof(struct gen_pool)); 943256694Snp gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT); 944256694Snp if (gp->gen_list == NULL) { 945256694Snp free(gp, M_DEVBUF); 946256694Snp return (NULL); 947256694Snp } 948256694Snp blist_free(gp->gen_list, 0, len >> chunk_shift); 949256694Snp gp->gen_base = base; 950256694Snp gp->gen_chunk_shift = chunk_shift; 951256694Snp //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF); 952256694Snp mutex_init(&gp->gen_lock); 953256694Snp 954256694Snp return (gp); 955256694Snp} 956256694Snp 957256694Snpstatic __inline unsigned long 958256694Snpgen_pool_alloc(struct gen_pool *gp, int size) 959256694Snp{ 960256694Snp int chunks; 961256694Snp daddr_t blkno; 962256694Snp 963256694Snp chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift; 964256694Snp mutex_lock(&gp->gen_lock); 965256694Snp blkno = blist_alloc(gp->gen_list, chunks); 966256694Snp mutex_unlock(&gp->gen_lock); 967256694Snp 968256694Snp if (blkno == SWAPBLK_NONE) 969256694Snp return (0); 970256694Snp 971256694Snp return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno)); 972256694Snp} 973256694Snp 974256694Snpstatic __inline void 975256694Snpgen_pool_free(struct gen_pool *gp, daddr_t address, int size) 976256694Snp{ 977256694Snp int chunks; 978256694Snp daddr_t blkno; 979256694Snp 980256694Snp chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift; 981256694Snp blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift); 982256694Snp mutex_lock(&gp->gen_lock); 983256694Snp blist_free(gp->gen_list, blkno, chunks); 984256694Snp mutex_unlock(&gp->gen_lock); 985256694Snp} 986256694Snp 987256694Snpstatic __inline void 988256694Snpgen_pool_destroy(struct gen_pool *gp) 989256694Snp{ 990256694Snp blist_destroy(gp->gen_list); 991256694Snp free(gp, M_DEVBUF); 992256694Snp} 993256694Snp 994256694Snp#if defined(__i386__) || defined(__amd64__) 995256694Snp#define L1_CACHE_BYTES 128 996256694Snp#else 997256694Snp#define L1_CACHE_BYTES 32 998256694Snp#endif 999256694Snp 1000256694Snpstatic inline 1001256694Snpint idr_for_each(struct idr *idp, 1002256694Snp int (*fn)(int id, void *p, void *data), void *data) 1003256694Snp{ 1004256694Snp int n, id, max, error = 0; 1005256694Snp struct idr_layer *p; 1006256694Snp struct idr_layer *pa[MAX_LEVEL]; 1007256694Snp struct idr_layer **paa = &pa[0]; 1008256694Snp 1009256694Snp n = idp->layers * IDR_BITS; 1010256694Snp p = idp->top; 1011256694Snp max = 1 << n; 1012256694Snp 1013256694Snp id = 0; 1014256694Snp while (id < max) { 1015256694Snp while (n > 0 && p) { 1016256694Snp n -= IDR_BITS; 1017256694Snp *paa++ = p; 1018256694Snp p = p->ary[(id >> n) & IDR_MASK]; 1019256694Snp } 1020256694Snp 1021256694Snp if (p) { 1022256694Snp error = fn(id, (void *)p, data); 1023256694Snp if (error) 1024256694Snp break; 1025256694Snp } 1026256694Snp 1027256694Snp id += 1 << n; 1028256694Snp while (n < fls(id)) { 1029256694Snp n += IDR_BITS; 1030256694Snp p = *--paa; 1031256694Snp } 1032256694Snp } 1033256694Snp 1034256694Snp return error; 1035256694Snp} 1036256694Snp 1037256694Snpvoid c4iw_cm_init_cpl(struct adapter *); 1038256694Snpvoid c4iw_cm_term_cpl(struct adapter *); 1039256694Snp 1040256694Snpvoid your_reg_device(struct c4iw_dev *dev); 1041256694Snp 1042256694Snp#define SGE_CTRLQ_NUM 0 1043256694Snp 1044256694Snpextern int spg_creds;/* Status Page size in credit units(1 unit = 64) */ 1045256694Snp#endif 1046