Deleted Added
full compact
iw_cxgbe.h (256694) iw_cxgbe.h (273135)
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 *
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 *
31 * $FreeBSD: head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h 256694 2013-10-17 18:37:25Z np $
31 * $FreeBSD: head/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h 273135 2014-10-15 13:40:29Z hselasky $
32 */
33#ifndef __IW_CXGB4_H__
34#define __IW_CXGB4_H__
35
36#include <linux/list.h>
37#include <linux/spinlock.h>
38#include <linux/idr.h>
39#include <linux/completion.h>
40#include <linux/netdevice.h>
41#include <linux/sched.h>
42#include <linux/pci.h>
43#include <linux/dma-mapping.h>
32 */
33#ifndef __IW_CXGB4_H__
34#define __IW_CXGB4_H__
35
36#include <linux/list.h>
37#include <linux/spinlock.h>
38#include <linux/idr.h>
39#include <linux/completion.h>
40#include <linux/netdevice.h>
41#include <linux/sched.h>
42#include <linux/pci.h>
43#include <linux/dma-mapping.h>
44#include <linux/inet.h>
45#include <linux/wait.h>
46#include <linux/kref.h>
47#include <linux/timer.h>
48#include <linux/io.h>
49
50#include <asm/byteorder.h>
51
52#include <netinet/in.h>
53#include <netinet/toecore.h>
54
55#include <rdma/ib_verbs.h>
56#include <rdma/iw_cm.h>
57
58#undef prefetch
59
60#include "common/common.h"
61#include "common/t4_msg.h"
62#include "common/t4_regs.h"
63#include "common/t4_tcb.h"
64#include "t4_l2t.h"
65
66#define DRV_NAME "iw_cxgbe"
67#define MOD DRV_NAME ":"
68#define KTR_IW_CXGBE KTR_SPARE3
69
70extern int c4iw_debug;
71#define PDBG(fmt, args...) \
72do { \
73 if (c4iw_debug) \
74 printf(MOD fmt, ## args); \
75} while (0)
76
77#include "t4.h"
78
79static inline void *cplhdr(struct mbuf *m)
80{
81 return mtod(m, void*);
82}
83
84#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
85#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
86
87#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
88#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
89
90struct c4iw_id_table {
91 u32 flags;
92 u32 start; /* logical minimal id */
93 u32 last; /* hint for find */
94 u32 max;
95 spinlock_t lock;
96 unsigned long *table;
97};
98
99struct c4iw_resource {
100 struct c4iw_id_table tpt_table;
101 struct c4iw_id_table qid_table;
102 struct c4iw_id_table pdid_table;
103};
104
105struct c4iw_qid_list {
106 struct list_head entry;
107 u32 qid;
108};
109
110struct c4iw_dev_ucontext {
111 struct list_head qpids;
112 struct list_head cqids;
113 struct mutex lock;
114};
115
116enum c4iw_rdev_flags {
117 T4_FATAL_ERROR = (1<<0),
118};
119
120struct c4iw_stat {
121 u64 total;
122 u64 cur;
123 u64 max;
124 u64 fail;
125};
126
127struct c4iw_stats {
128 struct mutex lock;
129 struct c4iw_stat qid;
130 struct c4iw_stat pd;
131 struct c4iw_stat stag;
132 struct c4iw_stat pbl;
133 struct c4iw_stat rqt;
134 u64 db_full;
135 u64 db_empty;
136 u64 db_drop;
137 u64 db_state_transitions;
138};
139
140struct c4iw_rdev {
141 struct adapter *adap;
142 struct c4iw_resource resource;
143 unsigned long qpshift;
144 u32 qpmask;
145 unsigned long cqshift;
146 u32 cqmask;
147 struct c4iw_dev_ucontext uctx;
148 struct gen_pool *pbl_pool;
149 struct gen_pool *rqt_pool;
150 u32 flags;
151 struct c4iw_stats stats;
152};
153
154static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
155{
156 return rdev->flags & T4_FATAL_ERROR;
157}
158
159static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
160{
161 return min((int)T4_MAX_NUM_STAG, (int)(rdev->adap->vres.stag.size >> 5));
162}
163
164#define C4IW_WR_TO (10*HZ)
165
166struct c4iw_wr_wait {
167 int ret;
168 atomic_t completion;
169};
170
171static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
172{
173 wr_waitp->ret = 0;
174 atomic_set(&wr_waitp->completion, 0);
175}
176
177static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
178{
179 wr_waitp->ret = ret;
180 atomic_set(&wr_waitp->completion, 1);
181 wakeup(wr_waitp);
182}
183
184static inline int
185c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
186 u32 hwtid, u32 qpid, const char *func)
187{
188 struct adapter *sc = rdev->adap;
189 unsigned to = C4IW_WR_TO;
190
191 while (!atomic_read(&wr_waitp->completion)) {
192 tsleep(wr_waitp, 0, "c4iw_wait", to);
193 if (SIGPENDING(curthread)) {
194 printf("%s - Device %s not responding - "
195 "tid %u qpid %u\n", func,
196 device_get_nameunit(sc->dev), hwtid, qpid);
197 if (c4iw_fatal_error(rdev)) {
198 wr_waitp->ret = -EIO;
199 break;
200 }
201 to = to << 2;
202 }
203 }
204 if (wr_waitp->ret)
205 CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u",
206 device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid);
207 return (wr_waitp->ret);
208}
209
210enum db_state {
211 NORMAL = 0,
212 FLOW_CONTROL = 1,
213 RECOVERY = 2
214};
215
216struct c4iw_dev {
217 struct ib_device ibdev;
218 struct c4iw_rdev rdev;
219 u32 device_cap_flags;
220 struct idr cqidr;
221 struct idr qpidr;
222 struct idr mmidr;
223 spinlock_t lock;
224 struct dentry *debugfs_root;
225 enum db_state db_state;
226 int qpcnt;
227};
228
229static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
230{
231 return container_of(ibdev, struct c4iw_dev, ibdev);
232}
233
234static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
235{
236 return container_of(rdev, struct c4iw_dev, rdev);
237}
238
239static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
240{
241 return idr_find(&rhp->cqidr, cqid);
242}
243
244static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
245{
246 return idr_find(&rhp->qpidr, qpid);
247}
248
249static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
250{
251 return idr_find(&rhp->mmidr, mmid);
252}
253
254static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
255 void *handle, u32 id, int lock)
256{
257 int ret;
258 int newid;
259
260 do {
261 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
262 return -ENOMEM;
263 if (lock)
264 spin_lock_irq(&rhp->lock);
265 ret = idr_get_new_above(idr, handle, id, &newid);
266 BUG_ON(!ret && newid != id);
267 if (lock)
268 spin_unlock_irq(&rhp->lock);
269 } while (ret == -EAGAIN);
270
271 return ret;
272}
273
274static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
275 void *handle, u32 id)
276{
277 return _insert_handle(rhp, idr, handle, id, 1);
278}
279
280static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
281 void *handle, u32 id)
282{
283 return _insert_handle(rhp, idr, handle, id, 0);
284}
285
286static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
287 u32 id, int lock)
288{
289 if (lock)
290 spin_lock_irq(&rhp->lock);
291 idr_remove(idr, id);
292 if (lock)
293 spin_unlock_irq(&rhp->lock);
294}
295
296static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
297{
298 _remove_handle(rhp, idr, id, 1);
299}
300
301static inline void remove_handle_nolock(struct c4iw_dev *rhp,
302 struct idr *idr, u32 id)
303{
304 _remove_handle(rhp, idr, id, 0);
305}
306
307struct c4iw_pd {
308 struct ib_pd ibpd;
309 u32 pdid;
310 struct c4iw_dev *rhp;
311};
312
313static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
314{
315 return container_of(ibpd, struct c4iw_pd, ibpd);
316}
317
318struct tpt_attributes {
319 u64 len;
320 u64 va_fbo;
321 enum fw_ri_mem_perms perms;
322 u32 stag;
323 u32 pdid;
324 u32 qpid;
325 u32 pbl_addr;
326 u32 pbl_size;
327 u32 state:1;
328 u32 type:2;
329 u32 rsvd:1;
330 u32 remote_invaliate_disable:1;
331 u32 zbva:1;
332 u32 mw_bind_enable:1;
333 u32 page_size:5;
334};
335
336struct c4iw_mr {
337 struct ib_mr ibmr;
338 struct ib_umem *umem;
339 struct c4iw_dev *rhp;
340 u64 kva;
341 struct tpt_attributes attr;
342};
343
344static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
345{
346 return container_of(ibmr, struct c4iw_mr, ibmr);
347}
348
349struct c4iw_mw {
350 struct ib_mw ibmw;
351 struct c4iw_dev *rhp;
352 u64 kva;
353 struct tpt_attributes attr;
354};
355
356static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
357{
358 return container_of(ibmw, struct c4iw_mw, ibmw);
359}
360
361struct c4iw_fr_page_list {
362 struct ib_fast_reg_page_list ibpl;
363 DECLARE_PCI_UNMAP_ADDR(mapping);
364 dma_addr_t dma_addr;
365 struct c4iw_dev *dev;
366 int size;
367};
368
369static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
370 struct ib_fast_reg_page_list *ibpl)
371{
372 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
373}
374
375struct c4iw_cq {
376 struct ib_cq ibcq;
377 struct c4iw_dev *rhp;
378 struct t4_cq cq;
379 spinlock_t lock;
380 spinlock_t comp_handler_lock;
381 atomic_t refcnt;
382 wait_queue_head_t wait;
383};
384
385static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
386{
387 return container_of(ibcq, struct c4iw_cq, ibcq);
388}
389
390struct c4iw_mpa_attributes {
391 u8 initiator;
392 u8 recv_marker_enabled;
393 u8 xmit_marker_enabled;
394 u8 crc_enabled;
395 u8 enhanced_rdma_conn;
396 u8 version;
397 u8 p2p_type;
398};
399
400struct c4iw_qp_attributes {
401 u32 scq;
402 u32 rcq;
403 u32 sq_num_entries;
404 u32 rq_num_entries;
405 u32 sq_max_sges;
406 u32 sq_max_sges_rdma_write;
407 u32 rq_max_sges;
408 u32 state;
409 u8 enable_rdma_read;
410 u8 enable_rdma_write;
411 u8 enable_bind;
412 u8 enable_mmid0_fastreg;
413 u32 max_ord;
414 u32 max_ird;
415 u32 pd;
416 u32 next_state;
417 char terminate_buffer[52];
418 u32 terminate_msg_len;
419 u8 is_terminate_local;
420 struct c4iw_mpa_attributes mpa_attr;
421 struct c4iw_ep *llp_stream_handle;
422 u8 layer_etype;
423 u8 ecode;
424 u16 sq_db_inc;
425 u16 rq_db_inc;
426};
427
428struct c4iw_qp {
429 struct ib_qp ibqp;
430 struct c4iw_dev *rhp;
431 struct c4iw_ep *ep;
432 struct c4iw_qp_attributes attr;
433 struct t4_wq wq;
434 spinlock_t lock;
435 struct mutex mutex;
436 atomic_t refcnt;
437 wait_queue_head_t wait;
438 struct timer_list timer;
439};
440
441static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
442{
443 return container_of(ibqp, struct c4iw_qp, ibqp);
444}
445
446struct c4iw_ucontext {
447 struct ib_ucontext ibucontext;
448 struct c4iw_dev_ucontext uctx;
449 u32 key;
450 spinlock_t mmap_lock;
451 struct list_head mmaps;
452};
453
454static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
455{
456 return container_of(c, struct c4iw_ucontext, ibucontext);
457}
458
459struct c4iw_mm_entry {
460 struct list_head entry;
461 u64 addr;
462 u32 key;
463 unsigned len;
464};
465
466static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
467 u32 key, unsigned len)
468{
469 struct list_head *pos, *nxt;
470 struct c4iw_mm_entry *mm;
471
472 spin_lock(&ucontext->mmap_lock);
473 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
474
475 mm = list_entry(pos, struct c4iw_mm_entry, entry);
476 if (mm->key == key && mm->len == len) {
477 list_del_init(&mm->entry);
478 spin_unlock(&ucontext->mmap_lock);
479 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
480 __func__, key, (unsigned long long) mm->addr,
481 mm->len);
482 return mm;
483 }
484 }
485 spin_unlock(&ucontext->mmap_lock);
486 return NULL;
487}
488
489static inline void insert_mmap(struct c4iw_ucontext *ucontext,
490 struct c4iw_mm_entry *mm)
491{
492 spin_lock(&ucontext->mmap_lock);
493 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
494 (unsigned long long) mm->addr, mm->len);
495 list_add_tail(&mm->entry, &ucontext->mmaps);
496 spin_unlock(&ucontext->mmap_lock);
497}
498
499enum c4iw_qp_attr_mask {
500 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
501 C4IW_QP_ATTR_SQ_DB = 1<<1,
502 C4IW_QP_ATTR_RQ_DB = 1<<2,
503 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
504 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
505 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
506 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
507 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
508 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
509 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
510 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
511 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
512 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
513 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
514 C4IW_QP_ATTR_MAX_ORD |
515 C4IW_QP_ATTR_MAX_IRD |
516 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
517 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
518 C4IW_QP_ATTR_MPA_ATTR |
519 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
520};
521
522int c4iw_modify_qp(struct c4iw_dev *rhp,
523 struct c4iw_qp *qhp,
524 enum c4iw_qp_attr_mask mask,
525 struct c4iw_qp_attributes *attrs,
526 int internal);
527
528enum c4iw_qp_state {
529 C4IW_QP_STATE_IDLE,
530 C4IW_QP_STATE_RTS,
531 C4IW_QP_STATE_ERROR,
532 C4IW_QP_STATE_TERMINATE,
533 C4IW_QP_STATE_CLOSING,
534 C4IW_QP_STATE_TOT
535};
536
537static inline int c4iw_convert_state(enum ib_qp_state ib_state)
538{
539 switch (ib_state) {
540 case IB_QPS_RESET:
541 case IB_QPS_INIT:
542 return C4IW_QP_STATE_IDLE;
543 case IB_QPS_RTS:
544 return C4IW_QP_STATE_RTS;
545 case IB_QPS_SQD:
546 return C4IW_QP_STATE_CLOSING;
547 case IB_QPS_SQE:
548 return C4IW_QP_STATE_TERMINATE;
549 case IB_QPS_ERR:
550 return C4IW_QP_STATE_ERROR;
551 default:
552 return -1;
553 }
554}
555
556static inline int to_ib_qp_state(int c4iw_qp_state)
557{
558 switch (c4iw_qp_state) {
559 case C4IW_QP_STATE_IDLE:
560 return IB_QPS_INIT;
561 case C4IW_QP_STATE_RTS:
562 return IB_QPS_RTS;
563 case C4IW_QP_STATE_CLOSING:
564 return IB_QPS_SQD;
565 case C4IW_QP_STATE_TERMINATE:
566 return IB_QPS_SQE;
567 case C4IW_QP_STATE_ERROR:
568 return IB_QPS_ERR;
569 }
570 return IB_QPS_ERR;
571}
572
573static inline u32 c4iw_ib_to_tpt_access(int a)
574{
575 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
576 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
577 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
578 FW_RI_MEM_ACCESS_LOCAL_READ;
579}
580
581static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
582{
583 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
584 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
585}
586
587enum c4iw_mmid_state {
588 C4IW_STAG_STATE_VALID,
589 C4IW_STAG_STATE_INVALID
590};
591
592#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
593
594#define MPA_KEY_REQ "MPA ID Req Frame"
595#define MPA_KEY_REP "MPA ID Rep Frame"
596
597#define MPA_MAX_PRIVATE_DATA 256
598#define MPA_ENHANCED_RDMA_CONN 0x10
599#define MPA_REJECT 0x20
600#define MPA_CRC 0x40
601#define MPA_MARKERS 0x80
602#define MPA_FLAGS_MASK 0xE0
603
604#define MPA_V2_PEER2PEER_MODEL 0x8000
605#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
606#define MPA_V2_RDMA_WRITE_RTR 0x8000
607#define MPA_V2_RDMA_READ_RTR 0x4000
608#define MPA_V2_IRD_ORD_MASK 0x3FFF
609
610/* Fixme: Use atomic_read for kref.count as same as Linux */
611#define c4iw_put_ep(ep) { \
612 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
613 __func__, __LINE__, ep, (ep)->kref.count); \
614 WARN_ON((ep)->kref.count < 1); \
615 kref_put(&((ep)->kref), _c4iw_free_ep); \
616}
617
618/* Fixme: Use atomic_read for kref.count as same as Linux */
619#define c4iw_get_ep(ep) { \
620 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
621 __func__, __LINE__, ep, (ep)->kref.count); \
622 kref_get(&((ep)->kref)); \
623}
624
625void _c4iw_free_ep(struct kref *kref);
626
627struct mpa_message {
628 u8 key[16];
629 u8 flags;
630 u8 revision;
631 __be16 private_data_size;
632 u8 private_data[0];
633};
634
635struct mpa_v2_conn_params {
636 __be16 ird;
637 __be16 ord;
638};
639
640struct terminate_message {
641 u8 layer_etype;
642 u8 ecode;
643 __be16 hdrct_rsvd;
644 u8 len_hdrs[0];
645};
646
647#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
648
649enum c4iw_layers_types {
650 LAYER_RDMAP = 0x00,
651 LAYER_DDP = 0x10,
652 LAYER_MPA = 0x20,
653 RDMAP_LOCAL_CATA = 0x00,
654 RDMAP_REMOTE_PROT = 0x01,
655 RDMAP_REMOTE_OP = 0x02,
656 DDP_LOCAL_CATA = 0x00,
657 DDP_TAGGED_ERR = 0x01,
658 DDP_UNTAGGED_ERR = 0x02,
659 DDP_LLP = 0x03
660};
661
662enum c4iw_rdma_ecodes {
663 RDMAP_INV_STAG = 0x00,
664 RDMAP_BASE_BOUNDS = 0x01,
665 RDMAP_ACC_VIOL = 0x02,
666 RDMAP_STAG_NOT_ASSOC = 0x03,
667 RDMAP_TO_WRAP = 0x04,
668 RDMAP_INV_VERS = 0x05,
669 RDMAP_INV_OPCODE = 0x06,
670 RDMAP_STREAM_CATA = 0x07,
671 RDMAP_GLOBAL_CATA = 0x08,
672 RDMAP_CANT_INV_STAG = 0x09,
673 RDMAP_UNSPECIFIED = 0xff
674};
675
676enum c4iw_ddp_ecodes {
677 DDPT_INV_STAG = 0x00,
678 DDPT_BASE_BOUNDS = 0x01,
679 DDPT_STAG_NOT_ASSOC = 0x02,
680 DDPT_TO_WRAP = 0x03,
681 DDPT_INV_VERS = 0x04,
682 DDPU_INV_QN = 0x01,
683 DDPU_INV_MSN_NOBUF = 0x02,
684 DDPU_INV_MSN_RANGE = 0x03,
685 DDPU_INV_MO = 0x04,
686 DDPU_MSG_TOOBIG = 0x05,
687 DDPU_INV_VERS = 0x06
688};
689
690enum c4iw_mpa_ecodes {
691 MPA_CRC_ERR = 0x02,
692 MPA_MARKER_ERR = 0x03,
693 MPA_LOCAL_CATA = 0x05,
694 MPA_INSUFF_IRD = 0x06,
695 MPA_NOMATCH_RTR = 0x07,
696};
697
698enum c4iw_ep_state {
699 IDLE = 0,
700 LISTEN,
701 CONNECTING,
702 MPA_REQ_WAIT,
703 MPA_REQ_SENT,
704 MPA_REQ_RCVD,
705 MPA_REP_SENT,
706 FPDU_MODE,
707 ABORTING,
708 CLOSING,
709 MORIBUND,
710 DEAD,
711};
712
713enum c4iw_ep_flags {
714 PEER_ABORT_IN_PROGRESS = 0,
715 ABORT_REQ_IN_PROGRESS = 1,
716 RELEASE_RESOURCES = 2,
717 CLOSE_SENT = 3,
718 TIMEOUT = 4
719};
720
721enum c4iw_ep_history {
722 ACT_OPEN_REQ = 0,
723 ACT_OFLD_CONN = 1,
724 ACT_OPEN_RPL = 2,
725 ACT_ESTAB = 3,
726 PASS_ACCEPT_REQ = 4,
727 PASS_ESTAB = 5,
728 ABORT_UPCALL = 6,
729 ESTAB_UPCALL = 7,
730 CLOSE_UPCALL = 8,
731 ULP_ACCEPT = 9,
732 ULP_REJECT = 10,
733 TIMEDOUT = 11,
734 PEER_ABORT = 12,
735 PEER_CLOSE = 13,
736 CONNREQ_UPCALL = 14,
737 ABORT_CONN = 15,
738 DISCONN_UPCALL = 16,
739 EP_DISC_CLOSE = 17,
740 EP_DISC_ABORT = 18,
741 CONN_RPL_UPCALL = 19,
742 ACT_RETRY_NOMEM = 20,
743 ACT_RETRY_INUSE = 21
744};
745
746struct c4iw_ep_common {
747 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
748 struct iw_cm_id *cm_id;
749 struct c4iw_qp *qp;
750 struct c4iw_dev *dev;
751 enum c4iw_ep_state state;
752 struct kref kref;
753 struct mutex mutex;
754 struct sockaddr_in local_addr;
755 struct sockaddr_in remote_addr;
756 struct c4iw_wr_wait wr_wait;
757 unsigned long flags;
758 unsigned long history;
759 int rpl_err;
760 int rpl_done;
761 struct thread *thread;
762 struct socket *so;
763};
764
765struct c4iw_listen_ep {
766 struct c4iw_ep_common com;
767 unsigned int stid;
768 int backlog;
769};
770
771struct c4iw_ep {
772 struct c4iw_ep_common com;
773 struct c4iw_ep *parent_ep;
774 struct timer_list timer;
775 struct list_head entry;
776 unsigned int atid;
777 u32 hwtid;
778 u32 snd_seq;
779 u32 rcv_seq;
780 struct l2t_entry *l2t;
781 struct dst_entry *dst;
782 struct c4iw_mpa_attributes mpa_attr;
783 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
784 unsigned int mpa_pkt_len;
785 u32 ird;
786 u32 ord;
787 u32 smac_idx;
788 u32 tx_chan;
789 u32 mtu;
790 u16 mss;
791 u16 emss;
792 u16 plen;
793 u16 rss_qid;
794 u16 txq_idx;
795 u16 ctrlq_idx;
796 u8 tos;
797 u8 retry_with_mpa_v1;
798 u8 tried_with_mpa_v1;
799};
800
801static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
802{
803 return cm_id->provider_data;
804}
805
806static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
807{
808 return cm_id->provider_data;
809}
810
811static inline int compute_wscale(int win)
812{
813 int wscale = 0;
814
815 while (wscale < 14 && (65535<<wscale) < win)
816 wscale++;
817 return wscale;
818}
819
820u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
821void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
822int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
823 u32 reserved, u32 flags);
824void c4iw_id_table_free(struct c4iw_id_table *alloc);
825
826typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
827
828int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
829 struct l2t_entry *l2t);
830u32 c4iw_get_resource(struct c4iw_id_table *id_table);
831void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
832int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
833int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
834int c4iw_pblpool_create(struct c4iw_rdev *rdev);
835int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
836void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
837void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
838void c4iw_destroy_resource(struct c4iw_resource *rscp);
839int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
840int c4iw_register_device(struct c4iw_dev *dev);
841void c4iw_unregister_device(struct c4iw_dev *dev);
842int __init c4iw_cm_init(void);
843void __exit c4iw_cm_term(void);
844void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
845 struct c4iw_dev_ucontext *uctx);
846void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
847 struct c4iw_dev_ucontext *uctx);
848int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
849int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
850 struct ib_send_wr **bad_wr);
851int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
852 struct ib_recv_wr **bad_wr);
853int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
854 struct ib_mw_bind *mw_bind);
855int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
856int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
857int c4iw_destroy_listen(struct iw_cm_id *cm_id);
858int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
859int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
860void c4iw_qp_add_ref(struct ib_qp *qp);
861void c4iw_qp_rem_ref(struct ib_qp *qp);
862void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
863struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
864 struct ib_device *device,
865 int page_list_len);
866struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
867int c4iw_dealloc_mw(struct ib_mw *mw);
868struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
869struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
870 virt, int acc, struct ib_udata *udata, int mr_id);
871struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
872struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
873 struct ib_phys_buf *buffer_list,
874 int num_phys_buf,
875 int acc,
876 u64 *iova_start);
877int c4iw_reregister_phys_mem(struct ib_mr *mr,
878 int mr_rereg_mask,
879 struct ib_pd *pd,
880 struct ib_phys_buf *buffer_list,
881 int num_phys_buf,
882 int acc, u64 *iova_start);
883int c4iw_dereg_mr(struct ib_mr *ib_mr);
884int c4iw_destroy_cq(struct ib_cq *ib_cq);
885struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
886 int vector,
887 struct ib_ucontext *ib_context,
888 struct ib_udata *udata);
889int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
890int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
891int c4iw_destroy_qp(struct ib_qp *ib_qp);
892struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
893 struct ib_qp_init_attr *attrs,
894 struct ib_udata *udata);
895int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
896 int attr_mask, struct ib_udata *udata);
897int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
898 int attr_mask, struct ib_qp_init_attr *init_attr);
899struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
900u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
901void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
902u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
903void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
904int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
905void c4iw_flush_hw_cq(struct t4_cq *cq);
906void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
907void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
908int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
909int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
910int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
911int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
912u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
913int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
914u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
915void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
916 struct c4iw_dev_ucontext *uctx);
917u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
918void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
919 struct c4iw_dev_ucontext *uctx);
920void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
921
922extern struct cxgb4_client t4c_client;
923extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
924extern int c4iw_max_read_depth;
925
926#include <sys/blist.h>
927struct gen_pool {
928 blist_t gen_list;
929 daddr_t gen_base;
930 int gen_chunk_shift;
931 struct mutex gen_lock;
932};
933
934static __inline struct gen_pool *
935gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
936{
937 struct gen_pool *gp;
938
939 gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
940 if (gp == NULL)
941 return (NULL);
942
943 memset(gp, 0, sizeof(struct gen_pool));
944 gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
945 if (gp->gen_list == NULL) {
946 free(gp, M_DEVBUF);
947 return (NULL);
948 }
949 blist_free(gp->gen_list, 0, len >> chunk_shift);
950 gp->gen_base = base;
951 gp->gen_chunk_shift = chunk_shift;
952 //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
953 mutex_init(&gp->gen_lock);
954
955 return (gp);
956}
957
958static __inline unsigned long
959gen_pool_alloc(struct gen_pool *gp, int size)
960{
961 int chunks;
962 daddr_t blkno;
963
964 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
965 mutex_lock(&gp->gen_lock);
966 blkno = blist_alloc(gp->gen_list, chunks);
967 mutex_unlock(&gp->gen_lock);
968
969 if (blkno == SWAPBLK_NONE)
970 return (0);
971
972 return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
973}
974
975static __inline void
976gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
977{
978 int chunks;
979 daddr_t blkno;
980
981 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
982 blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
983 mutex_lock(&gp->gen_lock);
984 blist_free(gp->gen_list, blkno, chunks);
985 mutex_unlock(&gp->gen_lock);
986}
987
988static __inline void
989gen_pool_destroy(struct gen_pool *gp)
990{
991 blist_destroy(gp->gen_list);
992 free(gp, M_DEVBUF);
993}
994
995#if defined(__i386__) || defined(__amd64__)
996#define L1_CACHE_BYTES 128
997#else
998#define L1_CACHE_BYTES 32
999#endif
1000
1001static inline
1002int idr_for_each(struct idr *idp,
1003 int (*fn)(int id, void *p, void *data), void *data)
1004{
1005 int n, id, max, error = 0;
1006 struct idr_layer *p;
1007 struct idr_layer *pa[MAX_LEVEL];
1008 struct idr_layer **paa = &pa[0];
1009
1010 n = idp->layers * IDR_BITS;
1011 p = idp->top;
1012 max = 1 << n;
1013
1014 id = 0;
1015 while (id < max) {
1016 while (n > 0 && p) {
1017 n -= IDR_BITS;
1018 *paa++ = p;
1019 p = p->ary[(id >> n) & IDR_MASK];
1020 }
1021
1022 if (p) {
1023 error = fn(id, (void *)p, data);
1024 if (error)
1025 break;
1026 }
1027
1028 id += 1 << n;
1029 while (n < fls(id)) {
1030 n += IDR_BITS;
1031 p = *--paa;
1032 }
1033 }
1034
1035 return error;
1036}
1037
1038void c4iw_cm_init_cpl(struct adapter *);
1039void c4iw_cm_term_cpl(struct adapter *);
1040
1041void your_reg_device(struct c4iw_dev *dev);
1042
1043#define SGE_CTRLQ_NUM 0
1044
1045extern int spg_creds;/* Status Page size in credit units(1 unit = 64) */
1046#endif
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
48
49#include <asm/byteorder.h>
50
51#include <netinet/in.h>
52#include <netinet/toecore.h>
53
54#include <rdma/ib_verbs.h>
55#include <rdma/iw_cm.h>
56
57#undef prefetch
58
59#include "common/common.h"
60#include "common/t4_msg.h"
61#include "common/t4_regs.h"
62#include "common/t4_tcb.h"
63#include "t4_l2t.h"
64
65#define DRV_NAME "iw_cxgbe"
66#define MOD DRV_NAME ":"
67#define KTR_IW_CXGBE KTR_SPARE3
68
69extern int c4iw_debug;
70#define PDBG(fmt, args...) \
71do { \
72 if (c4iw_debug) \
73 printf(MOD fmt, ## args); \
74} while (0)
75
76#include "t4.h"
77
78static inline void *cplhdr(struct mbuf *m)
79{
80 return mtod(m, void*);
81}
82
83#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
84#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
85
86#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
87#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
88
89struct c4iw_id_table {
90 u32 flags;
91 u32 start; /* logical minimal id */
92 u32 last; /* hint for find */
93 u32 max;
94 spinlock_t lock;
95 unsigned long *table;
96};
97
98struct c4iw_resource {
99 struct c4iw_id_table tpt_table;
100 struct c4iw_id_table qid_table;
101 struct c4iw_id_table pdid_table;
102};
103
104struct c4iw_qid_list {
105 struct list_head entry;
106 u32 qid;
107};
108
109struct c4iw_dev_ucontext {
110 struct list_head qpids;
111 struct list_head cqids;
112 struct mutex lock;
113};
114
115enum c4iw_rdev_flags {
116 T4_FATAL_ERROR = (1<<0),
117};
118
119struct c4iw_stat {
120 u64 total;
121 u64 cur;
122 u64 max;
123 u64 fail;
124};
125
126struct c4iw_stats {
127 struct mutex lock;
128 struct c4iw_stat qid;
129 struct c4iw_stat pd;
130 struct c4iw_stat stag;
131 struct c4iw_stat pbl;
132 struct c4iw_stat rqt;
133 u64 db_full;
134 u64 db_empty;
135 u64 db_drop;
136 u64 db_state_transitions;
137};
138
139struct c4iw_rdev {
140 struct adapter *adap;
141 struct c4iw_resource resource;
142 unsigned long qpshift;
143 u32 qpmask;
144 unsigned long cqshift;
145 u32 cqmask;
146 struct c4iw_dev_ucontext uctx;
147 struct gen_pool *pbl_pool;
148 struct gen_pool *rqt_pool;
149 u32 flags;
150 struct c4iw_stats stats;
151};
152
153static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
154{
155 return rdev->flags & T4_FATAL_ERROR;
156}
157
158static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
159{
160 return min((int)T4_MAX_NUM_STAG, (int)(rdev->adap->vres.stag.size >> 5));
161}
162
163#define C4IW_WR_TO (10*HZ)
164
165struct c4iw_wr_wait {
166 int ret;
167 atomic_t completion;
168};
169
170static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
171{
172 wr_waitp->ret = 0;
173 atomic_set(&wr_waitp->completion, 0);
174}
175
176static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
177{
178 wr_waitp->ret = ret;
179 atomic_set(&wr_waitp->completion, 1);
180 wakeup(wr_waitp);
181}
182
183static inline int
184c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
185 u32 hwtid, u32 qpid, const char *func)
186{
187 struct adapter *sc = rdev->adap;
188 unsigned to = C4IW_WR_TO;
189
190 while (!atomic_read(&wr_waitp->completion)) {
191 tsleep(wr_waitp, 0, "c4iw_wait", to);
192 if (SIGPENDING(curthread)) {
193 printf("%s - Device %s not responding - "
194 "tid %u qpid %u\n", func,
195 device_get_nameunit(sc->dev), hwtid, qpid);
196 if (c4iw_fatal_error(rdev)) {
197 wr_waitp->ret = -EIO;
198 break;
199 }
200 to = to << 2;
201 }
202 }
203 if (wr_waitp->ret)
204 CTR4(KTR_IW_CXGBE, "%s: FW reply %d tid %u qpid %u",
205 device_get_nameunit(sc->dev), wr_waitp->ret, hwtid, qpid);
206 return (wr_waitp->ret);
207}
208
209enum db_state {
210 NORMAL = 0,
211 FLOW_CONTROL = 1,
212 RECOVERY = 2
213};
214
215struct c4iw_dev {
216 struct ib_device ibdev;
217 struct c4iw_rdev rdev;
218 u32 device_cap_flags;
219 struct idr cqidr;
220 struct idr qpidr;
221 struct idr mmidr;
222 spinlock_t lock;
223 struct dentry *debugfs_root;
224 enum db_state db_state;
225 int qpcnt;
226};
227
228static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
229{
230 return container_of(ibdev, struct c4iw_dev, ibdev);
231}
232
233static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
234{
235 return container_of(rdev, struct c4iw_dev, rdev);
236}
237
238static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
239{
240 return idr_find(&rhp->cqidr, cqid);
241}
242
243static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
244{
245 return idr_find(&rhp->qpidr, qpid);
246}
247
248static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
249{
250 return idr_find(&rhp->mmidr, mmid);
251}
252
253static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
254 void *handle, u32 id, int lock)
255{
256 int ret;
257 int newid;
258
259 do {
260 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
261 return -ENOMEM;
262 if (lock)
263 spin_lock_irq(&rhp->lock);
264 ret = idr_get_new_above(idr, handle, id, &newid);
265 BUG_ON(!ret && newid != id);
266 if (lock)
267 spin_unlock_irq(&rhp->lock);
268 } while (ret == -EAGAIN);
269
270 return ret;
271}
272
273static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
274 void *handle, u32 id)
275{
276 return _insert_handle(rhp, idr, handle, id, 1);
277}
278
279static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
280 void *handle, u32 id)
281{
282 return _insert_handle(rhp, idr, handle, id, 0);
283}
284
285static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
286 u32 id, int lock)
287{
288 if (lock)
289 spin_lock_irq(&rhp->lock);
290 idr_remove(idr, id);
291 if (lock)
292 spin_unlock_irq(&rhp->lock);
293}
294
295static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
296{
297 _remove_handle(rhp, idr, id, 1);
298}
299
300static inline void remove_handle_nolock(struct c4iw_dev *rhp,
301 struct idr *idr, u32 id)
302{
303 _remove_handle(rhp, idr, id, 0);
304}
305
306struct c4iw_pd {
307 struct ib_pd ibpd;
308 u32 pdid;
309 struct c4iw_dev *rhp;
310};
311
312static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
313{
314 return container_of(ibpd, struct c4iw_pd, ibpd);
315}
316
317struct tpt_attributes {
318 u64 len;
319 u64 va_fbo;
320 enum fw_ri_mem_perms perms;
321 u32 stag;
322 u32 pdid;
323 u32 qpid;
324 u32 pbl_addr;
325 u32 pbl_size;
326 u32 state:1;
327 u32 type:2;
328 u32 rsvd:1;
329 u32 remote_invaliate_disable:1;
330 u32 zbva:1;
331 u32 mw_bind_enable:1;
332 u32 page_size:5;
333};
334
335struct c4iw_mr {
336 struct ib_mr ibmr;
337 struct ib_umem *umem;
338 struct c4iw_dev *rhp;
339 u64 kva;
340 struct tpt_attributes attr;
341};
342
343static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
344{
345 return container_of(ibmr, struct c4iw_mr, ibmr);
346}
347
348struct c4iw_mw {
349 struct ib_mw ibmw;
350 struct c4iw_dev *rhp;
351 u64 kva;
352 struct tpt_attributes attr;
353};
354
355static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
356{
357 return container_of(ibmw, struct c4iw_mw, ibmw);
358}
359
360struct c4iw_fr_page_list {
361 struct ib_fast_reg_page_list ibpl;
362 DECLARE_PCI_UNMAP_ADDR(mapping);
363 dma_addr_t dma_addr;
364 struct c4iw_dev *dev;
365 int size;
366};
367
368static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
369 struct ib_fast_reg_page_list *ibpl)
370{
371 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
372}
373
374struct c4iw_cq {
375 struct ib_cq ibcq;
376 struct c4iw_dev *rhp;
377 struct t4_cq cq;
378 spinlock_t lock;
379 spinlock_t comp_handler_lock;
380 atomic_t refcnt;
381 wait_queue_head_t wait;
382};
383
384static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
385{
386 return container_of(ibcq, struct c4iw_cq, ibcq);
387}
388
389struct c4iw_mpa_attributes {
390 u8 initiator;
391 u8 recv_marker_enabled;
392 u8 xmit_marker_enabled;
393 u8 crc_enabled;
394 u8 enhanced_rdma_conn;
395 u8 version;
396 u8 p2p_type;
397};
398
399struct c4iw_qp_attributes {
400 u32 scq;
401 u32 rcq;
402 u32 sq_num_entries;
403 u32 rq_num_entries;
404 u32 sq_max_sges;
405 u32 sq_max_sges_rdma_write;
406 u32 rq_max_sges;
407 u32 state;
408 u8 enable_rdma_read;
409 u8 enable_rdma_write;
410 u8 enable_bind;
411 u8 enable_mmid0_fastreg;
412 u32 max_ord;
413 u32 max_ird;
414 u32 pd;
415 u32 next_state;
416 char terminate_buffer[52];
417 u32 terminate_msg_len;
418 u8 is_terminate_local;
419 struct c4iw_mpa_attributes mpa_attr;
420 struct c4iw_ep *llp_stream_handle;
421 u8 layer_etype;
422 u8 ecode;
423 u16 sq_db_inc;
424 u16 rq_db_inc;
425};
426
427struct c4iw_qp {
428 struct ib_qp ibqp;
429 struct c4iw_dev *rhp;
430 struct c4iw_ep *ep;
431 struct c4iw_qp_attributes attr;
432 struct t4_wq wq;
433 spinlock_t lock;
434 struct mutex mutex;
435 atomic_t refcnt;
436 wait_queue_head_t wait;
437 struct timer_list timer;
438};
439
440static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
441{
442 return container_of(ibqp, struct c4iw_qp, ibqp);
443}
444
445struct c4iw_ucontext {
446 struct ib_ucontext ibucontext;
447 struct c4iw_dev_ucontext uctx;
448 u32 key;
449 spinlock_t mmap_lock;
450 struct list_head mmaps;
451};
452
453static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
454{
455 return container_of(c, struct c4iw_ucontext, ibucontext);
456}
457
458struct c4iw_mm_entry {
459 struct list_head entry;
460 u64 addr;
461 u32 key;
462 unsigned len;
463};
464
465static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
466 u32 key, unsigned len)
467{
468 struct list_head *pos, *nxt;
469 struct c4iw_mm_entry *mm;
470
471 spin_lock(&ucontext->mmap_lock);
472 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
473
474 mm = list_entry(pos, struct c4iw_mm_entry, entry);
475 if (mm->key == key && mm->len == len) {
476 list_del_init(&mm->entry);
477 spin_unlock(&ucontext->mmap_lock);
478 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
479 __func__, key, (unsigned long long) mm->addr,
480 mm->len);
481 return mm;
482 }
483 }
484 spin_unlock(&ucontext->mmap_lock);
485 return NULL;
486}
487
488static inline void insert_mmap(struct c4iw_ucontext *ucontext,
489 struct c4iw_mm_entry *mm)
490{
491 spin_lock(&ucontext->mmap_lock);
492 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
493 (unsigned long long) mm->addr, mm->len);
494 list_add_tail(&mm->entry, &ucontext->mmaps);
495 spin_unlock(&ucontext->mmap_lock);
496}
497
498enum c4iw_qp_attr_mask {
499 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
500 C4IW_QP_ATTR_SQ_DB = 1<<1,
501 C4IW_QP_ATTR_RQ_DB = 1<<2,
502 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
503 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
504 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
505 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
506 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
507 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
508 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
509 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
510 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
511 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
512 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
513 C4IW_QP_ATTR_MAX_ORD |
514 C4IW_QP_ATTR_MAX_IRD |
515 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
516 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
517 C4IW_QP_ATTR_MPA_ATTR |
518 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
519};
520
521int c4iw_modify_qp(struct c4iw_dev *rhp,
522 struct c4iw_qp *qhp,
523 enum c4iw_qp_attr_mask mask,
524 struct c4iw_qp_attributes *attrs,
525 int internal);
526
527enum c4iw_qp_state {
528 C4IW_QP_STATE_IDLE,
529 C4IW_QP_STATE_RTS,
530 C4IW_QP_STATE_ERROR,
531 C4IW_QP_STATE_TERMINATE,
532 C4IW_QP_STATE_CLOSING,
533 C4IW_QP_STATE_TOT
534};
535
536static inline int c4iw_convert_state(enum ib_qp_state ib_state)
537{
538 switch (ib_state) {
539 case IB_QPS_RESET:
540 case IB_QPS_INIT:
541 return C4IW_QP_STATE_IDLE;
542 case IB_QPS_RTS:
543 return C4IW_QP_STATE_RTS;
544 case IB_QPS_SQD:
545 return C4IW_QP_STATE_CLOSING;
546 case IB_QPS_SQE:
547 return C4IW_QP_STATE_TERMINATE;
548 case IB_QPS_ERR:
549 return C4IW_QP_STATE_ERROR;
550 default:
551 return -1;
552 }
553}
554
555static inline int to_ib_qp_state(int c4iw_qp_state)
556{
557 switch (c4iw_qp_state) {
558 case C4IW_QP_STATE_IDLE:
559 return IB_QPS_INIT;
560 case C4IW_QP_STATE_RTS:
561 return IB_QPS_RTS;
562 case C4IW_QP_STATE_CLOSING:
563 return IB_QPS_SQD;
564 case C4IW_QP_STATE_TERMINATE:
565 return IB_QPS_SQE;
566 case C4IW_QP_STATE_ERROR:
567 return IB_QPS_ERR;
568 }
569 return IB_QPS_ERR;
570}
571
572static inline u32 c4iw_ib_to_tpt_access(int a)
573{
574 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
575 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
576 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
577 FW_RI_MEM_ACCESS_LOCAL_READ;
578}
579
580static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
581{
582 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
583 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
584}
585
586enum c4iw_mmid_state {
587 C4IW_STAG_STATE_VALID,
588 C4IW_STAG_STATE_INVALID
589};
590
591#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
592
593#define MPA_KEY_REQ "MPA ID Req Frame"
594#define MPA_KEY_REP "MPA ID Rep Frame"
595
596#define MPA_MAX_PRIVATE_DATA 256
597#define MPA_ENHANCED_RDMA_CONN 0x10
598#define MPA_REJECT 0x20
599#define MPA_CRC 0x40
600#define MPA_MARKERS 0x80
601#define MPA_FLAGS_MASK 0xE0
602
603#define MPA_V2_PEER2PEER_MODEL 0x8000
604#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
605#define MPA_V2_RDMA_WRITE_RTR 0x8000
606#define MPA_V2_RDMA_READ_RTR 0x4000
607#define MPA_V2_IRD_ORD_MASK 0x3FFF
608
609/* Fixme: Use atomic_read for kref.count as same as Linux */
610#define c4iw_put_ep(ep) { \
611 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
612 __func__, __LINE__, ep, (ep)->kref.count); \
613 WARN_ON((ep)->kref.count < 1); \
614 kref_put(&((ep)->kref), _c4iw_free_ep); \
615}
616
617/* Fixme: Use atomic_read for kref.count as same as Linux */
618#define c4iw_get_ep(ep) { \
619 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
620 __func__, __LINE__, ep, (ep)->kref.count); \
621 kref_get(&((ep)->kref)); \
622}
623
624void _c4iw_free_ep(struct kref *kref);
625
626struct mpa_message {
627 u8 key[16];
628 u8 flags;
629 u8 revision;
630 __be16 private_data_size;
631 u8 private_data[0];
632};
633
634struct mpa_v2_conn_params {
635 __be16 ird;
636 __be16 ord;
637};
638
639struct terminate_message {
640 u8 layer_etype;
641 u8 ecode;
642 __be16 hdrct_rsvd;
643 u8 len_hdrs[0];
644};
645
646#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
647
648enum c4iw_layers_types {
649 LAYER_RDMAP = 0x00,
650 LAYER_DDP = 0x10,
651 LAYER_MPA = 0x20,
652 RDMAP_LOCAL_CATA = 0x00,
653 RDMAP_REMOTE_PROT = 0x01,
654 RDMAP_REMOTE_OP = 0x02,
655 DDP_LOCAL_CATA = 0x00,
656 DDP_TAGGED_ERR = 0x01,
657 DDP_UNTAGGED_ERR = 0x02,
658 DDP_LLP = 0x03
659};
660
661enum c4iw_rdma_ecodes {
662 RDMAP_INV_STAG = 0x00,
663 RDMAP_BASE_BOUNDS = 0x01,
664 RDMAP_ACC_VIOL = 0x02,
665 RDMAP_STAG_NOT_ASSOC = 0x03,
666 RDMAP_TO_WRAP = 0x04,
667 RDMAP_INV_VERS = 0x05,
668 RDMAP_INV_OPCODE = 0x06,
669 RDMAP_STREAM_CATA = 0x07,
670 RDMAP_GLOBAL_CATA = 0x08,
671 RDMAP_CANT_INV_STAG = 0x09,
672 RDMAP_UNSPECIFIED = 0xff
673};
674
675enum c4iw_ddp_ecodes {
676 DDPT_INV_STAG = 0x00,
677 DDPT_BASE_BOUNDS = 0x01,
678 DDPT_STAG_NOT_ASSOC = 0x02,
679 DDPT_TO_WRAP = 0x03,
680 DDPT_INV_VERS = 0x04,
681 DDPU_INV_QN = 0x01,
682 DDPU_INV_MSN_NOBUF = 0x02,
683 DDPU_INV_MSN_RANGE = 0x03,
684 DDPU_INV_MO = 0x04,
685 DDPU_MSG_TOOBIG = 0x05,
686 DDPU_INV_VERS = 0x06
687};
688
689enum c4iw_mpa_ecodes {
690 MPA_CRC_ERR = 0x02,
691 MPA_MARKER_ERR = 0x03,
692 MPA_LOCAL_CATA = 0x05,
693 MPA_INSUFF_IRD = 0x06,
694 MPA_NOMATCH_RTR = 0x07,
695};
696
697enum c4iw_ep_state {
698 IDLE = 0,
699 LISTEN,
700 CONNECTING,
701 MPA_REQ_WAIT,
702 MPA_REQ_SENT,
703 MPA_REQ_RCVD,
704 MPA_REP_SENT,
705 FPDU_MODE,
706 ABORTING,
707 CLOSING,
708 MORIBUND,
709 DEAD,
710};
711
712enum c4iw_ep_flags {
713 PEER_ABORT_IN_PROGRESS = 0,
714 ABORT_REQ_IN_PROGRESS = 1,
715 RELEASE_RESOURCES = 2,
716 CLOSE_SENT = 3,
717 TIMEOUT = 4
718};
719
720enum c4iw_ep_history {
721 ACT_OPEN_REQ = 0,
722 ACT_OFLD_CONN = 1,
723 ACT_OPEN_RPL = 2,
724 ACT_ESTAB = 3,
725 PASS_ACCEPT_REQ = 4,
726 PASS_ESTAB = 5,
727 ABORT_UPCALL = 6,
728 ESTAB_UPCALL = 7,
729 CLOSE_UPCALL = 8,
730 ULP_ACCEPT = 9,
731 ULP_REJECT = 10,
732 TIMEDOUT = 11,
733 PEER_ABORT = 12,
734 PEER_CLOSE = 13,
735 CONNREQ_UPCALL = 14,
736 ABORT_CONN = 15,
737 DISCONN_UPCALL = 16,
738 EP_DISC_CLOSE = 17,
739 EP_DISC_ABORT = 18,
740 CONN_RPL_UPCALL = 19,
741 ACT_RETRY_NOMEM = 20,
742 ACT_RETRY_INUSE = 21
743};
744
745struct c4iw_ep_common {
746 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
747 struct iw_cm_id *cm_id;
748 struct c4iw_qp *qp;
749 struct c4iw_dev *dev;
750 enum c4iw_ep_state state;
751 struct kref kref;
752 struct mutex mutex;
753 struct sockaddr_in local_addr;
754 struct sockaddr_in remote_addr;
755 struct c4iw_wr_wait wr_wait;
756 unsigned long flags;
757 unsigned long history;
758 int rpl_err;
759 int rpl_done;
760 struct thread *thread;
761 struct socket *so;
762};
763
764struct c4iw_listen_ep {
765 struct c4iw_ep_common com;
766 unsigned int stid;
767 int backlog;
768};
769
770struct c4iw_ep {
771 struct c4iw_ep_common com;
772 struct c4iw_ep *parent_ep;
773 struct timer_list timer;
774 struct list_head entry;
775 unsigned int atid;
776 u32 hwtid;
777 u32 snd_seq;
778 u32 rcv_seq;
779 struct l2t_entry *l2t;
780 struct dst_entry *dst;
781 struct c4iw_mpa_attributes mpa_attr;
782 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
783 unsigned int mpa_pkt_len;
784 u32 ird;
785 u32 ord;
786 u32 smac_idx;
787 u32 tx_chan;
788 u32 mtu;
789 u16 mss;
790 u16 emss;
791 u16 plen;
792 u16 rss_qid;
793 u16 txq_idx;
794 u16 ctrlq_idx;
795 u8 tos;
796 u8 retry_with_mpa_v1;
797 u8 tried_with_mpa_v1;
798};
799
800static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
801{
802 return cm_id->provider_data;
803}
804
805static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
806{
807 return cm_id->provider_data;
808}
809
810static inline int compute_wscale(int win)
811{
812 int wscale = 0;
813
814 while (wscale < 14 && (65535<<wscale) < win)
815 wscale++;
816 return wscale;
817}
818
819u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
820void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
821int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
822 u32 reserved, u32 flags);
823void c4iw_id_table_free(struct c4iw_id_table *alloc);
824
825typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
826
827int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
828 struct l2t_entry *l2t);
829u32 c4iw_get_resource(struct c4iw_id_table *id_table);
830void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
831int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
832int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
833int c4iw_pblpool_create(struct c4iw_rdev *rdev);
834int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
835void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
836void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
837void c4iw_destroy_resource(struct c4iw_resource *rscp);
838int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
839int c4iw_register_device(struct c4iw_dev *dev);
840void c4iw_unregister_device(struct c4iw_dev *dev);
841int __init c4iw_cm_init(void);
842void __exit c4iw_cm_term(void);
843void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
844 struct c4iw_dev_ucontext *uctx);
845void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
846 struct c4iw_dev_ucontext *uctx);
847int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
848int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
849 struct ib_send_wr **bad_wr);
850int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
851 struct ib_recv_wr **bad_wr);
852int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
853 struct ib_mw_bind *mw_bind);
854int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
855int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
856int c4iw_destroy_listen(struct iw_cm_id *cm_id);
857int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
858int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
859void c4iw_qp_add_ref(struct ib_qp *qp);
860void c4iw_qp_rem_ref(struct ib_qp *qp);
861void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
862struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
863 struct ib_device *device,
864 int page_list_len);
865struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
866int c4iw_dealloc_mw(struct ib_mw *mw);
867struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
868struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
869 virt, int acc, struct ib_udata *udata, int mr_id);
870struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
871struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
872 struct ib_phys_buf *buffer_list,
873 int num_phys_buf,
874 int acc,
875 u64 *iova_start);
876int c4iw_reregister_phys_mem(struct ib_mr *mr,
877 int mr_rereg_mask,
878 struct ib_pd *pd,
879 struct ib_phys_buf *buffer_list,
880 int num_phys_buf,
881 int acc, u64 *iova_start);
882int c4iw_dereg_mr(struct ib_mr *ib_mr);
883int c4iw_destroy_cq(struct ib_cq *ib_cq);
884struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
885 int vector,
886 struct ib_ucontext *ib_context,
887 struct ib_udata *udata);
888int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
889int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
890int c4iw_destroy_qp(struct ib_qp *ib_qp);
891struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
892 struct ib_qp_init_attr *attrs,
893 struct ib_udata *udata);
894int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
895 int attr_mask, struct ib_udata *udata);
896int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
897 int attr_mask, struct ib_qp_init_attr *init_attr);
898struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
899u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
900void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
901u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
902void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
903int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
904void c4iw_flush_hw_cq(struct t4_cq *cq);
905void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
906void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
907int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
908int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
909int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
910int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
911u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
912int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
913u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
914void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
915 struct c4iw_dev_ucontext *uctx);
916u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
917void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
918 struct c4iw_dev_ucontext *uctx);
919void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
920
921extern struct cxgb4_client t4c_client;
922extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
923extern int c4iw_max_read_depth;
924
925#include <sys/blist.h>
926struct gen_pool {
927 blist_t gen_list;
928 daddr_t gen_base;
929 int gen_chunk_shift;
930 struct mutex gen_lock;
931};
932
933static __inline struct gen_pool *
934gen_pool_create(daddr_t base, u_int chunk_shift, u_int len)
935{
936 struct gen_pool *gp;
937
938 gp = malloc(sizeof(struct gen_pool), M_DEVBUF, M_NOWAIT);
939 if (gp == NULL)
940 return (NULL);
941
942 memset(gp, 0, sizeof(struct gen_pool));
943 gp->gen_list = blist_create(len >> chunk_shift, M_NOWAIT);
944 if (gp->gen_list == NULL) {
945 free(gp, M_DEVBUF);
946 return (NULL);
947 }
948 blist_free(gp->gen_list, 0, len >> chunk_shift);
949 gp->gen_base = base;
950 gp->gen_chunk_shift = chunk_shift;
951 //mutex_init(&gp->gen_lock, "genpool", NULL, MTX_DUPOK|MTX_DEF);
952 mutex_init(&gp->gen_lock);
953
954 return (gp);
955}
956
957static __inline unsigned long
958gen_pool_alloc(struct gen_pool *gp, int size)
959{
960 int chunks;
961 daddr_t blkno;
962
963 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
964 mutex_lock(&gp->gen_lock);
965 blkno = blist_alloc(gp->gen_list, chunks);
966 mutex_unlock(&gp->gen_lock);
967
968 if (blkno == SWAPBLK_NONE)
969 return (0);
970
971 return (gp->gen_base + ((1 << gp->gen_chunk_shift) * blkno));
972}
973
974static __inline void
975gen_pool_free(struct gen_pool *gp, daddr_t address, int size)
976{
977 int chunks;
978 daddr_t blkno;
979
980 chunks = (size + (1<<gp->gen_chunk_shift) - 1) >> gp->gen_chunk_shift;
981 blkno = (address - gp->gen_base) / (1 << gp->gen_chunk_shift);
982 mutex_lock(&gp->gen_lock);
983 blist_free(gp->gen_list, blkno, chunks);
984 mutex_unlock(&gp->gen_lock);
985}
986
987static __inline void
988gen_pool_destroy(struct gen_pool *gp)
989{
990 blist_destroy(gp->gen_list);
991 free(gp, M_DEVBUF);
992}
993
994#if defined(__i386__) || defined(__amd64__)
995#define L1_CACHE_BYTES 128
996#else
997#define L1_CACHE_BYTES 32
998#endif
999
1000static inline
1001int idr_for_each(struct idr *idp,
1002 int (*fn)(int id, void *p, void *data), void *data)
1003{
1004 int n, id, max, error = 0;
1005 struct idr_layer *p;
1006 struct idr_layer *pa[MAX_LEVEL];
1007 struct idr_layer **paa = &pa[0];
1008
1009 n = idp->layers * IDR_BITS;
1010 p = idp->top;
1011 max = 1 << n;
1012
1013 id = 0;
1014 while (id < max) {
1015 while (n > 0 && p) {
1016 n -= IDR_BITS;
1017 *paa++ = p;
1018 p = p->ary[(id >> n) & IDR_MASK];
1019 }
1020
1021 if (p) {
1022 error = fn(id, (void *)p, data);
1023 if (error)
1024 break;
1025 }
1026
1027 id += 1 << n;
1028 while (n < fls(id)) {
1029 n += IDR_BITS;
1030 p = *--paa;
1031 }
1032 }
1033
1034 return error;
1035}
1036
1037void c4iw_cm_init_cpl(struct adapter *);
1038void c4iw_cm_term_cpl(struct adapter *);
1039
1040void your_reg_device(struct c4iw_dev *dev);
1041
1042#define SGE_CTRLQ_NUM 0
1043
1044extern int spg_creds;/* Status Page size in credit units(1 unit = 64) */
1045#endif