Deleted Added
full compact
ib_verbs.h (270710) ib_verbs.h (278886)
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.

--- 34 unchanged lines hidden (view full) ---

43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.

--- 34 unchanged lines hidden (view full) ---

43#include <linux/device.h>
44#include <linux/mm.h>
45#include <linux/dma-mapping.h>
46#include <linux/kref.h>
47#include <linux/list.h>
48#include <linux/rwsem.h>
49#include <linux/scatterlist.h>
50#include <linux/workqueue.h>
51#include <linux/if_ether.h>
52#include <linux/mutex.h>
51
52#include <asm/uaccess.h>
53
54#include <asm/uaccess.h>
53#include <linux/rbtree.h>
54#include <linux/mutex.h>
55
56extern struct workqueue_struct *ib_wq;
57
58union ib_gid {
59 u8 raw[16];
60 struct {
61 __be64 subnet_prefix;
62 __be64 interface_id;
63 } global;
64};
65
66enum rdma_node_type {
67 /* IB values map to NodeInfo:NodeType. */
68 RDMA_NODE_IB_CA = 1,
69 RDMA_NODE_IB_SWITCH,
70 RDMA_NODE_IB_ROUTER,
55
56extern struct workqueue_struct *ib_wq;
57
58union ib_gid {
59 u8 raw[16];
60 struct {
61 __be64 subnet_prefix;
62 __be64 interface_id;
63 } global;
64};
65
66enum rdma_node_type {
67 /* IB values map to NodeInfo:NodeType. */
68 RDMA_NODE_IB_CA = 1,
69 RDMA_NODE_IB_SWITCH,
70 RDMA_NODE_IB_ROUTER,
71 RDMA_NODE_RNIC
71 RDMA_NODE_RNIC,
72 RDMA_NODE_MIC
72};
73
74enum rdma_transport_type {
75 RDMA_TRANSPORT_IB,
73};
74
75enum rdma_transport_type {
76 RDMA_TRANSPORT_IB,
76 RDMA_TRANSPORT_IWARP
77 RDMA_TRANSPORT_IWARP,
78 RDMA_TRANSPORT_SCIF
77};
78
79enum rdma_transport_type
80rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
81
82enum rdma_link_layer {
83 IB_LINK_LAYER_UNSPECIFIED,
84 IB_LINK_LAYER_INFINIBAND,
85 IB_LINK_LAYER_ETHERNET,
79};
80
81enum rdma_transport_type
82rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
83
84enum rdma_link_layer {
85 IB_LINK_LAYER_UNSPECIFIED,
86 IB_LINK_LAYER_INFINIBAND,
87 IB_LINK_LAYER_ETHERNET,
88 IB_LINK_LAYER_SCIF
86};
87
88enum ib_device_cap_flags {
89 IB_DEVICE_RESIZE_MAX_WR = 1,
90 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
91 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
92 IB_DEVICE_RAW_MULTI = (1<<3),
93 IB_DEVICE_AUTO_PATH_MIG = (1<<4),

--- 21 unchanged lines hidden (view full) ---

115 IB_DEVICE_UD_TSO = (1<<19),
116 IB_DEVICE_XRC = (1<<20),
117 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
118 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
119 IB_DEVICE_MR_ALLOCATE = (1<<23),
120 IB_DEVICE_SHARED_MR = (1<<24),
121 IB_DEVICE_QPG = (1<<25),
122 IB_DEVICE_UD_RSS = (1<<26),
89};
90
91enum ib_device_cap_flags {
92 IB_DEVICE_RESIZE_MAX_WR = 1,
93 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
94 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
95 IB_DEVICE_RAW_MULTI = (1<<3),
96 IB_DEVICE_AUTO_PATH_MIG = (1<<4),

--- 21 unchanged lines hidden (view full) ---

118 IB_DEVICE_UD_TSO = (1<<19),
119 IB_DEVICE_XRC = (1<<20),
120 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
121 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
122 IB_DEVICE_MR_ALLOCATE = (1<<23),
123 IB_DEVICE_SHARED_MR = (1<<24),
124 IB_DEVICE_QPG = (1<<25),
125 IB_DEVICE_UD_RSS = (1<<26),
123 IB_DEVICE_UD_TSS = (1<<27)
126 IB_DEVICE_UD_TSS = (1<<27),
127 IB_DEVICE_CROSS_CHANNEL = (1<<28),
128 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
129 /*
130 * Devices can set either IB_DEVICE_MEM_WINDOW_TYPE_2A or
131 * IB_DEVICE_MEM_WINDOW_TYPE_2B if it supports type 2A or type 2B
132 * memory windows. It can set neither to indicate it doesn't support
133 * type 2 windows at all.
134 */
135 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<30),
136 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<31),
137 IB_DEVICE_SIGNATURE_HANDOVER = (1LL<<32)
124};
125
138};
139
140enum ib_signature_prot_cap {
141 IB_PROT_T10DIF_TYPE_1 = 1,
142 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
143 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
144};
145
146enum ib_signature_guard_cap {
147 IB_GUARD_T10DIF_CRC = 1,
148 IB_GUARD_T10DIF_CSUM = 1 << 1,
149};
150
126enum ib_atomic_cap {
127 IB_ATOMIC_NONE,
128 IB_ATOMIC_HCA,
129 IB_ATOMIC_GLOB
130};
131
151enum ib_atomic_cap {
152 IB_ATOMIC_NONE,
153 IB_ATOMIC_HCA,
154 IB_ATOMIC_GLOB
155};
156
157enum ib_cq_create_flags {
158 IB_CQ_CREATE_CROSS_CHANNEL = 1 << 0,
159 IB_CQ_TIMESTAMP = 1 << 1,
160 IB_CQ_TIMESTAMP_TO_SYS_TIME = 1 << 2
161};
162
132struct ib_device_attr {
133 u64 fw_ver;
134 __be64 sys_image_guid;
135 u64 max_mr_size;
136 u64 page_size_cap;
137 u32 vendor_id;
138 u32 vendor_part_id;
139 u32 hw_ver;
140 int max_qp;
141 int max_qp_wr;
163struct ib_device_attr {
164 u64 fw_ver;
165 __be64 sys_image_guid;
166 u64 max_mr_size;
167 u64 page_size_cap;
168 u32 vendor_id;
169 u32 vendor_part_id;
170 u32 hw_ver;
171 int max_qp;
172 int max_qp_wr;
142 int device_cap_flags;
173 u64 device_cap_flags;
143 int max_sge;
144 int max_sge_rd;
145 int max_cq;
146 int max_cqe;
147 int max_mr;
148 int max_pd;
149 int max_qp_rd_atom;
150 int max_ee_rd_atom;

--- 15 unchanged lines hidden (view full) ---

166 int max_map_per_fmr;
167 int max_srq;
168 int max_srq_wr;
169 int max_srq_sge;
170 unsigned int max_fast_reg_page_list_len;
171 int max_rss_tbl_sz;
172 u16 max_pkeys;
173 u8 local_ca_ack_delay;
174 int max_sge;
175 int max_sge_rd;
176 int max_cq;
177 int max_cqe;
178 int max_mr;
179 int max_pd;
180 int max_qp_rd_atom;
181 int max_ee_rd_atom;

--- 15 unchanged lines hidden (view full) ---

197 int max_map_per_fmr;
198 int max_srq;
199 int max_srq_wr;
200 int max_srq_sge;
201 unsigned int max_fast_reg_page_list_len;
202 int max_rss_tbl_sz;
203 u16 max_pkeys;
204 u8 local_ca_ack_delay;
205 int comp_mask;
206 uint64_t timestamp_mask;
207 uint64_t hca_core_clock;
208 unsigned int sig_prot_cap;
209 unsigned int sig_guard_cap;
174};
175
210};
211
212enum ib_device_attr_comp_mask {
213 IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK = 1ULL << 1,
214 IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK = 1ULL << 2
215};
216
176enum ib_mtu {
177 IB_MTU_256 = 1,
178 IB_MTU_512 = 2,
179 IB_MTU_1024 = 3,
180 IB_MTU_2048 = 4,
181 IB_MTU_4096 = 5
182};
183

--- 10 unchanged lines hidden (view full) ---

194}
195
196enum ib_port_state {
197 IB_PORT_NOP = 0,
198 IB_PORT_DOWN = 1,
199 IB_PORT_INIT = 2,
200 IB_PORT_ARMED = 3,
201 IB_PORT_ACTIVE = 4,
217enum ib_mtu {
218 IB_MTU_256 = 1,
219 IB_MTU_512 = 2,
220 IB_MTU_1024 = 3,
221 IB_MTU_2048 = 4,
222 IB_MTU_4096 = 5
223};
224

--- 10 unchanged lines hidden (view full) ---

235}
236
237enum ib_port_state {
238 IB_PORT_NOP = 0,
239 IB_PORT_DOWN = 1,
240 IB_PORT_INIT = 2,
241 IB_PORT_ARMED = 3,
242 IB_PORT_ACTIVE = 4,
202 IB_PORT_ACTIVE_DEFER = 5
243 IB_PORT_ACTIVE_DEFER = 5,
244 IB_PORT_DUMMY = -1 /* force enum signed */
203};
204
205enum ib_port_cap_flags {
206 IB_PORT_SM = 1 << 1,
207 IB_PORT_NOTICE_SUP = 1 << 2,
208 IB_PORT_TRAP_SUP = 1 << 3,
209 IB_PORT_OPT_IPD_SUP = 1 << 4,
210 IB_PORT_AUTO_MIGR_SUP = 1 << 5,

--- 110 unchanged lines hidden (view full) ---

321 u8 lmc;
322 u8 max_vl_num;
323 u8 sm_sl;
324 u8 subnet_timeout;
325 u8 init_type_reply;
326 u8 active_width;
327 u8 active_speed;
328 u8 phys_state;
245};
246
247enum ib_port_cap_flags {
248 IB_PORT_SM = 1 << 1,
249 IB_PORT_NOTICE_SUP = 1 << 2,
250 IB_PORT_TRAP_SUP = 1 << 3,
251 IB_PORT_OPT_IPD_SUP = 1 << 4,
252 IB_PORT_AUTO_MIGR_SUP = 1 << 5,

--- 110 unchanged lines hidden (view full) ---

363 u8 lmc;
364 u8 max_vl_num;
365 u8 sm_sl;
366 u8 subnet_timeout;
367 u8 init_type_reply;
368 u8 active_width;
369 u8 active_speed;
370 u8 phys_state;
329 enum rdma_link_layer link_layer;
330};
331
332enum ib_device_modify_flags {
333 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
334 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
335};
336
337struct ib_device_modify {

--- 30 unchanged lines hidden (view full) ---

368 IB_EVENT_SM_CHANGE,
369 IB_EVENT_SRQ_ERR,
370 IB_EVENT_SRQ_LIMIT_REACHED,
371 IB_EVENT_QP_LAST_WQE_REACHED,
372 IB_EVENT_CLIENT_REREGISTER,
373 IB_EVENT_GID_CHANGE,
374};
375
371};
372
373enum ib_device_modify_flags {
374 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
375 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
376};
377
378struct ib_device_modify {

--- 30 unchanged lines hidden (view full) ---

409 IB_EVENT_SM_CHANGE,
410 IB_EVENT_SRQ_ERR,
411 IB_EVENT_SRQ_LIMIT_REACHED,
412 IB_EVENT_QP_LAST_WQE_REACHED,
413 IB_EVENT_CLIENT_REREGISTER,
414 IB_EVENT_GID_CHANGE,
415};
416
376enum ib_event_flags {
377 IB_XRC_QP_EVENT_FLAG = 0x80000000,
378};
379
380struct ib_event {
381 struct ib_device *device;
382 union {
383 struct ib_cq *cq;
384 struct ib_qp *qp;
385 struct ib_srq *srq;
386 u8 port_num;
417struct ib_event {
418 struct ib_device *device;
419 union {
420 struct ib_cq *cq;
421 struct ib_qp *qp;
422 struct ib_srq *srq;
423 u8 port_num;
387 u32 xrc_qp_num;
388 } element;
389 enum ib_event_type event;
390};
391
392struct ib_event_handler {
393 struct ib_device *device;
394 void (*handler)(struct ib_event_handler *, struct ib_event *);
395 struct list_head list;

--- 49 unchanged lines hidden (view full) ---

445 IB_RATE_112_GBPS = 13,
446 IB_RATE_168_GBPS = 14,
447 IB_RATE_25_GBPS = 15,
448 IB_RATE_100_GBPS = 16,
449 IB_RATE_200_GBPS = 17,
450 IB_RATE_300_GBPS = 18
451};
452
424 } element;
425 enum ib_event_type event;
426};
427
428struct ib_event_handler {
429 struct ib_device *device;
430 void (*handler)(struct ib_event_handler *, struct ib_event *);
431 struct list_head list;

--- 49 unchanged lines hidden (view full) ---

481 IB_RATE_112_GBPS = 13,
482 IB_RATE_168_GBPS = 14,
483 IB_RATE_25_GBPS = 15,
484 IB_RATE_100_GBPS = 16,
485 IB_RATE_200_GBPS = 17,
486 IB_RATE_300_GBPS = 18
487};
488
489enum ib_mr_create_flags {
490 IB_MR_SIGNATURE_EN = 1,
491};
492
453/**
493/**
494 * ib_mr_init_attr - Memory region init attributes passed to routine
495 * ib_create_mr.
496 * @max_reg_descriptors: max number of registration descriptors that
497 * may be used with registration work requests.
498 * @flags: MR creation flags bit mask.
499 */
500struct ib_mr_init_attr {
501 int max_reg_descriptors;
502 u32 flags;
503};
504
505/**
454 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
455 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
456 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
457 * @rate: rate to convert.
458 */
459int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
460
461/**
462 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
463 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
464 * @rate: rate to convert.
465 */
466int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
467
506 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
507 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
508 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
509 * @rate: rate to convert.
510 */
511int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
512
513/**
514 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
515 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
516 * @rate: rate to convert.
517 */
518int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
519
520struct ib_cq_init_attr {
521 int cqe;
522 int comp_vector;
523 u32 flags;
524};
525
526enum ib_signature_type {
527 IB_SIG_TYPE_T10_DIF,
528};
529
468/**
530/**
531 * T10-DIF Signature types
532 * T10-DIF types are defined by SCSI
533 * specifications.
534 */
535enum ib_t10_dif_type {
536 IB_T10DIF_NONE,
537 IB_T10DIF_TYPE1,
538 IB_T10DIF_TYPE2,
539 IB_T10DIF_TYPE3
540};
541
542/**
543 * Signature T10-DIF block-guard types
544 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
545 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
546 */
547enum ib_t10_dif_bg_type {
548 IB_T10DIF_CRC,
549 IB_T10DIF_CSUM
550};
551
552/**
553 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
554 * domain.
555 * @type: T10-DIF type (0|1|2|3)
556 * @bg_type: T10-DIF block guard type (CRC|CSUM)
557 * @pi_interval: protection information interval.
558 * @bg: seed of guard computation.
559 * @app_tag: application tag of guard block
560 * @ref_tag: initial guard block reference tag.
561 * @type3_inc_reftag: T10-DIF type 3 does not state
562 * about the reference tag, it is the user
563 * choice to increment it or not.
564 */
565struct ib_t10_dif_domain {
566 enum ib_t10_dif_type type;
567 enum ib_t10_dif_bg_type bg_type;
568 u32 pi_interval;
569 u16 bg;
570 u16 app_tag;
571 u32 ref_tag;
572 bool type3_inc_reftag;
573};
574
575/**
576 * struct ib_sig_domain - Parameters for signature domain
577 * @sig_type: specific signauture type
578 * @sig: union of all signature domain attributes that may
579 * be used to set domain layout.
580 */
581struct ib_sig_domain {
582 enum ib_signature_type sig_type;
583 union {
584 struct ib_t10_dif_domain dif;
585 } sig;
586};
587
588/**
589 * struct ib_sig_attrs - Parameters for signature handover operation
590 * @check_mask: bitmask for signature byte check (8 bytes)
591 * @mem: memory domain layout desciptor.
592 * @wire: wire domain layout desciptor.
593 */
594struct ib_sig_attrs {
595 u8 check_mask;
596 struct ib_sig_domain mem;
597 struct ib_sig_domain wire;
598};
599
600enum ib_sig_err_type {
601 IB_SIG_BAD_GUARD,
602 IB_SIG_BAD_REFTAG,
603 IB_SIG_BAD_APPTAG,
604};
605
606/**
607 * struct ib_sig_err - signature error descriptor
608 */
609struct ib_sig_err {
610 enum ib_sig_err_type err_type;
611 u32 expected;
612 u32 actual;
613 u64 sig_err_offset;
614 u32 key;
615};
616
617enum ib_mr_status_check {
618 IB_MR_CHECK_SIG_STATUS = 1,
619};
620
621/**
622 * struct ib_mr_status - Memory region status container
623 *
624 * @fail_status: Bitmask of MR checks status. For each
625 * failed check a corresponding status bit is set.
626 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
627 * failure.
628 */
629struct ib_mr_status {
630 u32 fail_status;
631 struct ib_sig_err sig_err;
632};
633
634/**
469 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
470 * enum.
471 * @mult: multiple to convert.
472 */
473enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
474
475struct ib_ah_attr {
476 struct ib_global_route grh;
477 u16 dlid;
478 u8 sl;
479 u8 src_path_bits;
480 u8 static_rate;
481 u8 ah_flags;
482 u8 port_num;
635 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
636 * enum.
637 * @mult: multiple to convert.
638 */
639enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
640
641struct ib_ah_attr {
642 struct ib_global_route grh;
643 u16 dlid;
644 u8 sl;
645 u8 src_path_bits;
646 u8 static_rate;
647 u8 ah_flags;
648 u8 port_num;
649 u8 dmac[6];
650 u16 vlan_id;
483};
484
485enum ib_wc_status {
486 IB_WC_SUCCESS,
487 IB_WC_LOC_LEN_ERR,
488 IB_WC_LOC_QP_OP_ERR,
489 IB_WC_LOC_EEC_OP_ERR,
490 IB_WC_LOC_PROT_ERR,

--- 36 unchanged lines hidden (view full) ---

527 IB_WC_RECV_RDMA_WITH_IMM
528};
529
530enum ib_wc_flags {
531 IB_WC_GRH = 1,
532 IB_WC_WITH_IMM = (1<<1),
533 IB_WC_WITH_INVALIDATE = (1<<2),
534 IB_WC_IP_CSUM_OK = (1<<3),
651};
652
653enum ib_wc_status {
654 IB_WC_SUCCESS,
655 IB_WC_LOC_LEN_ERR,
656 IB_WC_LOC_QP_OP_ERR,
657 IB_WC_LOC_EEC_OP_ERR,
658 IB_WC_LOC_PROT_ERR,

--- 36 unchanged lines hidden (view full) ---

695 IB_WC_RECV_RDMA_WITH_IMM
696};
697
698enum ib_wc_flags {
699 IB_WC_GRH = 1,
700 IB_WC_WITH_IMM = (1<<1),
701 IB_WC_WITH_INVALIDATE = (1<<2),
702 IB_WC_IP_CSUM_OK = (1<<3),
703 IB_WC_WITH_SL = (1<<4),
704 IB_WC_WITH_SLID = (1<<5),
705 IB_WC_WITH_TIMESTAMP = (1<<6),
706 IB_WC_WITH_SMAC = (1<<7),
707 IB_WC_WITH_VLAN = (1<<8),
535};
536
537struct ib_wc {
538 u64 wr_id;
539 enum ib_wc_status status;
540 enum ib_wc_opcode opcode;
541 u32 vendor_err;
542 u32 byte_len;

--- 5 unchanged lines hidden (view full) ---

548 u32 src_qp;
549 int wc_flags;
550 u16 pkey_index;
551 u16 slid;
552 u8 sl;
553 u8 dlid_path_bits;
554 u8 port_num; /* valid only for DR SMPs on switches */
555 int csum_ok;
708};
709
710struct ib_wc {
711 u64 wr_id;
712 enum ib_wc_status status;
713 enum ib_wc_opcode opcode;
714 u32 vendor_err;
715 u32 byte_len;

--- 5 unchanged lines hidden (view full) ---

721 u32 src_qp;
722 int wc_flags;
723 u16 pkey_index;
724 u16 slid;
725 u8 sl;
726 u8 dlid_path_bits;
727 u8 port_num; /* valid only for DR SMPs on switches */
728 int csum_ok;
729 struct {
730 uint64_t timestamp; /* timestamp = 0 indicates error*/
731 } ts;
732 u8 smac[6];
733 u16 vlan_id;
556};
557
558enum ib_cq_notify_flags {
559 IB_CQ_SOLICITED = 1 << 0,
560 IB_CQ_NEXT_COMP = 1 << 1,
561 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
562 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
563};

--- 49 unchanged lines hidden (view full) ---

613 * indices into a 2-entry table.
614 */
615 IB_QPT_SMI,
616 IB_QPT_GSI,
617
618 IB_QPT_RC,
619 IB_QPT_UC,
620 IB_QPT_UD,
734};
735
736enum ib_cq_notify_flags {
737 IB_CQ_SOLICITED = 1 << 0,
738 IB_CQ_NEXT_COMP = 1 << 1,
739 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
740 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
741};

--- 49 unchanged lines hidden (view full) ---

791 * indices into a 2-entry table.
792 */
793 IB_QPT_SMI,
794 IB_QPT_GSI,
795
796 IB_QPT_RC,
797 IB_QPT_UC,
798 IB_QPT_UD,
621 IB_QPT_XRC,
622 IB_QPT_RAW_IPV6,
623 IB_QPT_RAW_ETHERTYPE,
624 IB_QPT_RAW_PACKET = 8,
625 IB_QPT_XRC_INI = 9,
626 IB_QPT_XRC_TGT,
799 IB_QPT_RAW_IPV6,
800 IB_QPT_RAW_ETHERTYPE,
801 IB_QPT_RAW_PACKET = 8,
802 IB_QPT_XRC_INI = 9,
803 IB_QPT_XRC_TGT,
804 IB_QPT_DC_INI,
627 IB_QPT_MAX,
805 IB_QPT_MAX,
806 /* Reserve a range for qp types internal to the low level driver.
807 * These qp types will not be visible at the IB core layer, so the
808 * IB_QPT_MAX usages should not be affected in the core layer
809 */
810 IB_QPT_RESERVED1 = 0x1000,
811 IB_QPT_RESERVED2,
812 IB_QPT_RESERVED3,
813 IB_QPT_RESERVED4,
814 IB_QPT_RESERVED5,
815 IB_QPT_RESERVED6,
816 IB_QPT_RESERVED7,
817 IB_QPT_RESERVED8,
818 IB_QPT_RESERVED9,
819 IB_QPT_RESERVED10,
628};
629
630enum ib_qp_create_flags {
631 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
632 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
820};
821
822enum ib_qp_create_flags {
823 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
824 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
633 IB_QP_CREATE_NETIF_QP = 1 << 2,
825 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
826 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
827 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
828 IB_QP_CREATE_NETIF_QP = 1 << 5,
829 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
634 /* reserve bits 26-31 for low level drivers' internal use */
635 IB_QP_CREATE_RESERVED_START = 1 << 26,
636 IB_QP_CREATE_RESERVED_END = 1 << 31,
637};
638
639enum ib_qpg_type {
640 IB_QPG_NONE = 0,
641 IB_QPG_PARENT = (1<<0),

--- 12 unchanged lines hidden (view full) ---

654 struct ib_cq *send_cq;
655 struct ib_cq *recv_cq;
656 struct ib_srq *srq;
657 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
658 struct ib_qp_cap cap;
659 union {
660 struct ib_qp *qpg_parent; /* see qpg_type */
661 struct ib_qpg_init_attrib parent_attrib;
830 /* reserve bits 26-31 for low level drivers' internal use */
831 IB_QP_CREATE_RESERVED_START = 1 << 26,
832 IB_QP_CREATE_RESERVED_END = 1 << 31,
833};
834
835enum ib_qpg_type {
836 IB_QPG_NONE = 0,
837 IB_QPG_PARENT = (1<<0),

--- 12 unchanged lines hidden (view full) ---

850 struct ib_cq *send_cq;
851 struct ib_cq *recv_cq;
852 struct ib_srq *srq;
853 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
854 struct ib_qp_cap cap;
855 union {
856 struct ib_qp *qpg_parent; /* see qpg_type */
857 struct ib_qpg_init_attrib parent_attrib;
662 } pp;
858 };
663 enum ib_sig_type sq_sig_type;
664 enum ib_qp_type qp_type;
665 enum ib_qp_create_flags create_flags;
666 enum ib_qpg_type qpg_type;
667 u8 port_num; /* special QP types only */
668};
669
859 enum ib_sig_type sq_sig_type;
860 enum ib_qp_type qp_type;
861 enum ib_qp_create_flags create_flags;
862 enum ib_qpg_type qpg_type;
863 u8 port_num; /* special QP types only */
864};
865
866enum {
867 IB_DCT_CREATE_FLAG_RCV_INLINE = 1 << 0,
868 IB_DCT_CREATE_FLAGS_MASK = IB_DCT_CREATE_FLAG_RCV_INLINE,
869};
870
871struct ib_dct_init_attr {
872 struct ib_pd *pd;
873 struct ib_cq *cq;
874 struct ib_srq *srq;
875 u64 dc_key;
876 u8 port;
877 u32 access_flags;
878 u8 min_rnr_timer;
879 u8 tclass;
880 u32 flow_label;
881 enum ib_mtu mtu;
882 u8 pkey_index;
883 u8 gid_index;
884 u8 hop_limit;
885 u32 create_flags;
886};
887
888struct ib_dct_attr {
889 u64 dc_key;
890 u8 port;
891 u32 access_flags;
892 u8 min_rnr_timer;
893 u8 tclass;
894 u32 flow_label;
895 enum ib_mtu mtu;
896 u8 pkey_index;
897 u8 gid_index;
898 u8 hop_limit;
899 u32 key_violations;
900 u8 state;
901};
902
670struct ib_qp_open_attr {
671 void (*event_handler)(struct ib_event *, void *);
672 void *qp_context;
673 u32 qp_num;
674 enum ib_qp_type qp_type;
675};
676
677enum ib_rnr_timeout {

--- 48 unchanged lines hidden (view full) ---

726 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
727 IB_QP_ALT_PATH = (1<<14),
728 IB_QP_MIN_RNR_TIMER = (1<<15),
729 IB_QP_SQ_PSN = (1<<16),
730 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
731 IB_QP_PATH_MIG_STATE = (1<<18),
732 IB_QP_CAP = (1<<19),
733 IB_QP_DEST_QPN = (1<<20),
903struct ib_qp_open_attr {
904 void (*event_handler)(struct ib_event *, void *);
905 void *qp_context;
906 u32 qp_num;
907 enum ib_qp_type qp_type;
908};
909
910enum ib_rnr_timeout {

--- 48 unchanged lines hidden (view full) ---

959 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
960 IB_QP_ALT_PATH = (1<<14),
961 IB_QP_MIN_RNR_TIMER = (1<<15),
962 IB_QP_SQ_PSN = (1<<16),
963 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
964 IB_QP_PATH_MIG_STATE = (1<<18),
965 IB_QP_CAP = (1<<19),
966 IB_QP_DEST_QPN = (1<<20),
734 IB_QP_GROUP_RSS = (1<<21)
967 IB_QP_GROUP_RSS = (1<<21),
968 IB_QP_DC_KEY = (1<<22),
969 IB_QP_SMAC = (1<<23),
970 IB_QP_ALT_SMAC = (1<<24),
971 IB_QP_VID = (1<<25),
972 IB_QP_ALT_VID = (1<<26)
735};
736
737enum ib_qp_state {
738 IB_QPS_RESET,
739 IB_QPS_INIT,
740 IB_QPS_RTR,
741 IB_QPS_RTS,
742 IB_QPS_SQD,
743 IB_QPS_SQE,
973};
974
975enum ib_qp_state {
976 IB_QPS_RESET,
977 IB_QPS_INIT,
978 IB_QPS_RTR,
979 IB_QPS_RTS,
980 IB_QPS_SQD,
981 IB_QPS_SQE,
744 IB_QPS_ERR
982 IB_QPS_ERR,
983 IB_QPS_DUMMY = -1 /* force enum signed */
745};
746
747enum ib_mig_state {
748 IB_MIG_MIGRATED,
749 IB_MIG_REARM,
750 IB_MIG_ARMED
751};
752
984};
985
986enum ib_mig_state {
987 IB_MIG_MIGRATED,
988 IB_MIG_REARM,
989 IB_MIG_ARMED
990};
991
992enum ib_mw_type {
993 IB_MW_TYPE_1 = 1,
994 IB_MW_TYPE_2 = 2
995};
996
753struct ib_qp_attr {
754 enum ib_qp_state qp_state;
755 enum ib_qp_state cur_qp_state;
756 enum ib_mtu path_mtu;
757 enum ib_mig_state path_mig_state;
758 u32 qkey;
759 u32 rq_psn;
760 u32 sq_psn;

--- 10 unchanged lines hidden (view full) ---

771 u8 max_dest_rd_atomic;
772 u8 min_rnr_timer;
773 u8 port_num;
774 u8 timeout;
775 u8 retry_cnt;
776 u8 rnr_retry;
777 u8 alt_port_num;
778 u8 alt_timeout;
997struct ib_qp_attr {
998 enum ib_qp_state qp_state;
999 enum ib_qp_state cur_qp_state;
1000 enum ib_mtu path_mtu;
1001 enum ib_mig_state path_mig_state;
1002 u32 qkey;
1003 u32 rq_psn;
1004 u32 sq_psn;

--- 10 unchanged lines hidden (view full) ---

1015 u8 max_dest_rd_atomic;
1016 u8 min_rnr_timer;
1017 u8 port_num;
1018 u8 timeout;
1019 u8 retry_cnt;
1020 u8 rnr_retry;
1021 u8 alt_port_num;
1022 u8 alt_timeout;
1023 u8 smac[ETH_ALEN];
1024 u8 alt_smac[ETH_ALEN];
1025 u16 vlan_id;
1026 u16 alt_vlan_id;
1027
779};
780
1028};
1029
1030struct ib_qp_attr_ex {
1031 enum ib_qp_state qp_state;
1032 enum ib_qp_state cur_qp_state;
1033 enum ib_mtu path_mtu;
1034 enum ib_mig_state path_mig_state;
1035 u32 qkey;
1036 u32 rq_psn;
1037 u32 sq_psn;
1038 u32 dest_qp_num;
1039 int qp_access_flags;
1040 struct ib_qp_cap cap;
1041 struct ib_ah_attr ah_attr;
1042 struct ib_ah_attr alt_ah_attr;
1043 u16 pkey_index;
1044 u16 alt_pkey_index;
1045 u8 en_sqd_async_notify;
1046 u8 sq_draining;
1047 u8 max_rd_atomic;
1048 u8 max_dest_rd_atomic;
1049 u8 min_rnr_timer;
1050 u8 port_num;
1051 u8 timeout;
1052 u8 retry_cnt;
1053 u8 rnr_retry;
1054 u8 alt_port_num;
1055 u8 alt_timeout;
1056 u64 dct_key;
1057};
1058
781enum ib_wr_opcode {
782 IB_WR_RDMA_WRITE,
783 IB_WR_RDMA_WRITE_WITH_IMM,
784 IB_WR_SEND,
785 IB_WR_SEND_WITH_IMM,
786 IB_WR_RDMA_READ,
787 IB_WR_ATOMIC_CMP_AND_SWP,
788 IB_WR_ATOMIC_FETCH_AND_ADD,
789 IB_WR_LSO,
1059enum ib_wr_opcode {
1060 IB_WR_RDMA_WRITE,
1061 IB_WR_RDMA_WRITE_WITH_IMM,
1062 IB_WR_SEND,
1063 IB_WR_SEND_WITH_IMM,
1064 IB_WR_RDMA_READ,
1065 IB_WR_ATOMIC_CMP_AND_SWP,
1066 IB_WR_ATOMIC_FETCH_AND_ADD,
1067 IB_WR_LSO,
790 IB_WR_BIG_LSO,
791 IB_WR_SEND_WITH_INV,
792 IB_WR_RDMA_READ_WITH_INV,
793 IB_WR_LOCAL_INV,
794 IB_WR_FAST_REG_MR,
795 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
796 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1068 IB_WR_SEND_WITH_INV,
1069 IB_WR_RDMA_READ_WITH_INV,
1070 IB_WR_LOCAL_INV,
1071 IB_WR_FAST_REG_MR,
1072 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1073 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1074 IB_WR_BIND_MW,
1075 IB_WR_REG_SIG_MR,
1076 /* reserve values for low level drivers' internal use.
1077 * These values will not be used at all in the ib core layer.
1078 */
1079 IB_WR_RESERVED1 = 0xf0,
1080 IB_WR_RESERVED2,
1081 IB_WR_RESERVED3,
1082 IB_WR_RESERVED4,
1083 IB_WR_RESERVED5,
1084 IB_WR_RESERVED6,
1085 IB_WR_RESERVED7,
1086 IB_WR_RESERVED8,
1087 IB_WR_RESERVED9,
1088 IB_WR_RESERVED10,
797};
798
799enum ib_send_flags {
800 IB_SEND_FENCE = 1,
801 IB_SEND_SIGNALED = (1<<1),
802 IB_SEND_SOLICITED = (1<<2),
803 IB_SEND_INLINE = (1<<3),
1089};
1090
1091enum ib_send_flags {
1092 IB_SEND_FENCE = 1,
1093 IB_SEND_SIGNALED = (1<<1),
1094 IB_SEND_SOLICITED = (1<<2),
1095 IB_SEND_INLINE = (1<<3),
804 IB_SEND_IP_CSUM = (1<<4)
805};
1096 IB_SEND_IP_CSUM = (1<<4),
806
1097
807enum ib_flow_types {
808 IB_FLOW_ETH = 0,
809 IB_FLOW_IB_UC = 1,
810 IB_FLOW_IB_MC_IPV4 = 2,
811 IB_FLOW_IB_MC_IPV6 = 3
1098 /* reserve bits 26-31 for low level drivers' internal use */
1099 IB_SEND_RESERVED_START = (1 << 26),
1100 IB_SEND_RESERVED_END = (1 << 31),
1101 IB_SEND_UMR_UNREG = (1<<5)
812};
813
1102};
1103
814enum {
815 IB_FLOW_L4_NONE = 0,
816 IB_FLOW_L4_OTHER = 3,
817 IB_FLOW_L4_UDP = 5,
818 IB_FLOW_L4_TCP = 6
819};
820
821struct ib_sge {
822 u64 addr;
823 u32 length;
824 u32 lkey;
825};
826
827struct ib_fast_reg_page_list {
828 struct ib_device *device;
829 u64 *page_list;
830 unsigned int max_page_list_len;
831};
832
1104struct ib_sge {
1105 u64 addr;
1106 u32 length;
1107 u32 lkey;
1108};
1109
1110struct ib_fast_reg_page_list {
1111 struct ib_device *device;
1112 u64 *page_list;
1113 unsigned int max_page_list_len;
1114};
1115
1116/**
1117 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1118 * @mr: A memory region to bind the memory window to.
1119 * @addr: The address where the memory window should begin.
1120 * @length: The length of the memory window, in bytes.
1121 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1122 *
1123 * This struct contains the shared parameters for type 1 and type 2
1124 * memory window bind operations.
1125 */
1126struct ib_mw_bind_info {
1127 struct ib_mr *mr;
1128 u64 addr;
1129 u64 length;
1130 int mw_access_flags;
1131};
1132
833struct ib_send_wr {
834 struct ib_send_wr *next;
835 u64 wr_id;
836 struct ib_sge *sg_list;
837 int num_sge;
838 enum ib_wr_opcode opcode;
839 int send_flags;
840 union {

--- 28 unchanged lines hidden (view full) ---

869 struct ib_fast_reg_page_list *page_list;
870 unsigned int page_shift;
871 unsigned int page_list_len;
872 u32 length;
873 int access_flags;
874 u32 rkey;
875 } fast_reg;
876 struct {
1133struct ib_send_wr {
1134 struct ib_send_wr *next;
1135 u64 wr_id;
1136 struct ib_sge *sg_list;
1137 int num_sge;
1138 enum ib_wr_opcode opcode;
1139 int send_flags;
1140 union {

--- 28 unchanged lines hidden (view full) ---

1169 struct ib_fast_reg_page_list *page_list;
1170 unsigned int page_shift;
1171 unsigned int page_list_len;
1172 u32 length;
1173 int access_flags;
1174 u32 rkey;
1175 } fast_reg;
1176 struct {
877 struct ib_unpacked_lrh *lrh;
878 u32 eth_type;
879 u8 static_rate;
880 } raw_ety;
1177 int npages;
1178 int access_flags;
1179 u32 mkey;
1180 struct ib_pd *pd;
1181 u64 virt_addr;
1182 u64 length;
1183 int page_shift;
1184 } umr;
1185 struct {
1186 struct ib_mw *mw;
1187 /* The new rkey for the memory window. */
1188 u32 rkey;
1189 struct ib_mw_bind_info bind_info;
1190 } bind_mw;
1191 struct {
1192 struct ib_sig_attrs *sig_attrs;
1193 struct ib_mr *sig_mr;
1194 int access_flags;
1195 struct ib_sge *prot;
1196 } sig_handover;
881 } wr;
882 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
883};
884
885struct ib_recv_wr {
886 struct ib_recv_wr *next;
887 u64 wr_id;
888 struct ib_sge *sg_list;
889 int num_sge;
890};
891
892enum ib_access_flags {
893 IB_ACCESS_LOCAL_WRITE = 1,
894 IB_ACCESS_REMOTE_WRITE = (1<<1),
895 IB_ACCESS_REMOTE_READ = (1<<2),
896 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
897 IB_ACCESS_MW_BIND = (1<<4),
898 IB_ACCESS_ALLOCATE_MR = (1<<5),
1197 } wr;
1198 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
1199};
1200
1201struct ib_recv_wr {
1202 struct ib_recv_wr *next;
1203 u64 wr_id;
1204 struct ib_sge *sg_list;
1205 int num_sge;
1206};
1207
1208enum ib_access_flags {
1209 IB_ACCESS_LOCAL_WRITE = 1,
1210 IB_ACCESS_REMOTE_WRITE = (1<<1),
1211 IB_ACCESS_REMOTE_READ = (1<<2),
1212 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1213 IB_ACCESS_MW_BIND = (1<<4),
1214 IB_ACCESS_ALLOCATE_MR = (1<<5),
899 IB_ACCESS_SHARED_MR_USER_READ = (1<<6),
900 IB_ACCESS_SHARED_MR_USER_WRITE = (1<<7),
901 IB_ACCESS_SHARED_MR_GROUP_READ = (1<<8),
902 IB_ACCESS_SHARED_MR_GROUP_WRITE = (1<<9),
903 IB_ACCESS_SHARED_MR_OTHER_READ = (1<<10),
904 IB_ACCESS_SHARED_MR_OTHER_WRITE = (1<<11)
905
1215 IB_ZERO_BASED = (1<<13)
906};
907
908struct ib_phys_buf {
909 u64 addr;
910 u64 size;
911};
912
913struct ib_mr_attr {

--- 6 unchanged lines hidden (view full) ---

920};
921
922enum ib_mr_rereg_flags {
923 IB_MR_REREG_TRANS = 1,
924 IB_MR_REREG_PD = (1<<1),
925 IB_MR_REREG_ACCESS = (1<<2)
926};
927
1216};
1217
1218struct ib_phys_buf {
1219 u64 addr;
1220 u64 size;
1221};
1222
1223struct ib_mr_attr {

--- 6 unchanged lines hidden (view full) ---

1230};
1231
1232enum ib_mr_rereg_flags {
1233 IB_MR_REREG_TRANS = 1,
1234 IB_MR_REREG_PD = (1<<1),
1235 IB_MR_REREG_ACCESS = (1<<2)
1236};
1237
1238/**
1239 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1240 * @wr_id: Work request id.
1241 * @send_flags: Flags from ib_send_flags enum.
1242 * @bind_info: More parameters of the bind operation.
1243 */
928struct ib_mw_bind {
1244struct ib_mw_bind {
929 struct ib_mr *mr;
930 u64 wr_id;
1245 u64 wr_id;
931 u64 addr;
932 u32 length;
933 int send_flags;
1246 int send_flags;
934 int mw_access_flags;
1247 struct ib_mw_bind_info bind_info;
935};
936
937struct ib_fmr_attr {
938 int max_pages;
939 int max_maps;
940 u8 page_shift;
941};
942
943struct ib_ucontext {
944 struct ib_device *device;
945 struct list_head pd_list;
946 struct list_head mr_list;
947 struct list_head mw_list;
948 struct list_head cq_list;
949 struct list_head qp_list;
950 struct list_head srq_list;
951 struct list_head ah_list;
952 struct list_head xrcd_list;
1248};
1249
1250struct ib_fmr_attr {
1251 int max_pages;
1252 int max_maps;
1253 u8 page_shift;
1254};
1255
1256struct ib_ucontext {
1257 struct ib_device *device;
1258 struct list_head pd_list;
1259 struct list_head mr_list;
1260 struct list_head mw_list;
1261 struct list_head cq_list;
1262 struct list_head qp_list;
1263 struct list_head srq_list;
1264 struct list_head ah_list;
1265 struct list_head xrcd_list;
1266 struct list_head rule_list;
1267 struct list_head dct_list;
953 int closing;
1268 int closing;
1269 void *peer_mem_private_data;
1270 char *peer_mem_name;
954};
955
956struct ib_uobject {
957 u64 user_handle; /* handle given to us by userspace */
958 struct ib_ucontext *context; /* associated user context */
959 void *object; /* containing object */
960 struct list_head list; /* link to context's list */
961 int id; /* index into kernel idr */
962 struct kref ref;
963 struct rw_semaphore mutex; /* protects .live */
964 int live;
965};
966
1271};
1272
1273struct ib_uobject {
1274 u64 user_handle; /* handle given to us by userspace */
1275 struct ib_ucontext *context; /* associated user context */
1276 void *object; /* containing object */
1277 struct list_head list; /* link to context's list */
1278 int id; /* index into kernel idr */
1279 struct kref ref;
1280 struct rw_semaphore mutex; /* protects .live */
1281 int live;
1282};
1283
1284struct ib_udata;
1285struct ib_udata_ops {
1286 int (*copy_from)(void *dest, struct ib_udata *udata,
1287 size_t len);
1288 int (*copy_to)(struct ib_udata *udata, void *src,
1289 size_t len);
1290};
1291
967struct ib_udata {
1292struct ib_udata {
1293 struct ib_udata_ops *ops;
968 void __user *inbuf;
969 void __user *outbuf;
970 size_t inlen;
971 size_t outlen;
972};
973
1294 void __user *inbuf;
1295 void __user *outbuf;
1296 size_t inlen;
1297 size_t outlen;
1298};
1299
974struct ib_uxrc_rcv_object {
975 struct list_head list; /* link to context's list */
976 u32 qp_num;
977 u32 domain_handle;
978};
979
980struct ib_pd {
981 struct ib_device *device;
982 struct ib_uobject *uobject;
983 atomic_t usecnt; /* count all resources */
984};
985
986struct ib_xrcd {
987 struct ib_device *device;
1300struct ib_pd {
1301 struct ib_device *device;
1302 struct ib_uobject *uobject;
1303 atomic_t usecnt; /* count all resources */
1304};
1305
1306struct ib_xrcd {
1307 struct ib_device *device;
988 struct ib_uobject *uobject;
989 atomic_t usecnt; /* count all exposed resources */
990 struct inode *inode;
1308 atomic_t usecnt; /* count all exposed resources */
1309 struct inode *inode;
991 struct rb_node node;
992
993 struct mutex tgt_qp_mutex;
994 struct list_head tgt_qp_list;
995};
996
997struct ib_ah {
998 struct ib_device *device;
999 struct ib_pd *pd;
1000 struct ib_uobject *uobject;
1001};
1002
1310
1311 struct mutex tgt_qp_mutex;
1312 struct list_head tgt_qp_list;
1313};
1314
1315struct ib_ah {
1316 struct ib_device *device;
1317 struct ib_pd *pd;
1318 struct ib_uobject *uobject;
1319};
1320
1321enum ib_cq_attr_mask {
1322 IB_CQ_MODERATION = (1 << 0),
1323 IB_CQ_CAP_FLAGS = (1 << 1)
1324};
1325
1326enum ib_cq_cap_flags {
1327 IB_CQ_IGNORE_OVERRUN = (1 << 0)
1328};
1329
1330struct ib_cq_attr {
1331 struct {
1332 u16 cq_count;
1333 u16 cq_period;
1334 } moderation;
1335 u32 cq_cap_flags;
1336};
1337
1003typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1004
1005struct ib_cq {
1006 struct ib_device *device;
1007 struct ib_uobject *uobject;
1008 ib_comp_handler comp_handler;
1009 void (*event_handler)(struct ib_event *, void *);
1010 void *cq_context;

--- 22 unchanged lines hidden (view full) ---

1033struct ib_qp {
1034 struct ib_device *device;
1035 struct ib_pd *pd;
1036 struct ib_cq *send_cq;
1037 struct ib_cq *recv_cq;
1038 struct ib_srq *srq;
1039 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1040 struct list_head xrcd_list;
1338typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1339
1340struct ib_cq {
1341 struct ib_device *device;
1342 struct ib_uobject *uobject;
1343 ib_comp_handler comp_handler;
1344 void (*event_handler)(struct ib_event *, void *);
1345 void *cq_context;

--- 22 unchanged lines hidden (view full) ---

1368struct ib_qp {
1369 struct ib_device *device;
1370 struct ib_pd *pd;
1371 struct ib_cq *send_cq;
1372 struct ib_cq *recv_cq;
1373 struct ib_srq *srq;
1374 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1375 struct list_head xrcd_list;
1041 atomic_t usecnt; /* count times opened, mcast attaches */
1376 /* count times opened, mcast attaches, flow attaches */
1377 atomic_t usecnt;
1042 struct list_head open_list;
1043 struct ib_qp *real_qp;
1044 struct ib_uobject *uobject;
1045 void (*event_handler)(struct ib_event *, void *);
1046 void *qp_context;
1047 u32 qp_num;
1048 enum ib_qp_type qp_type;
1049 enum ib_qpg_type qpg_type;
1378 struct list_head open_list;
1379 struct ib_qp *real_qp;
1380 struct ib_uobject *uobject;
1381 void (*event_handler)(struct ib_event *, void *);
1382 void *qp_context;
1383 u32 qp_num;
1384 enum ib_qp_type qp_type;
1385 enum ib_qpg_type qpg_type;
1386 u8 port_num;
1050};
1051
1387};
1388
1389struct ib_dct {
1390 struct ib_device *device;
1391 struct ib_uobject *uobject;
1392 struct ib_pd *pd;
1393 struct ib_cq *cq;
1394 struct ib_srq *srq;
1395 u32 dct_num;
1396};
1397
1052struct ib_mr {
1053 struct ib_device *device;
1054 struct ib_pd *pd;
1055 struct ib_uobject *uobject;
1056 u32 lkey;
1057 u32 rkey;
1058 atomic_t usecnt; /* count number of MWs */
1059};
1060
1061struct ib_mw {
1062 struct ib_device *device;
1063 struct ib_pd *pd;
1064 struct ib_uobject *uobject;
1065 u32 rkey;
1398struct ib_mr {
1399 struct ib_device *device;
1400 struct ib_pd *pd;
1401 struct ib_uobject *uobject;
1402 u32 lkey;
1403 u32 rkey;
1404 atomic_t usecnt; /* count number of MWs */
1405};
1406
1407struct ib_mw {
1408 struct ib_device *device;
1409 struct ib_pd *pd;
1410 struct ib_uobject *uobject;
1411 u32 rkey;
1412 enum ib_mw_type type;
1066};
1067
1068struct ib_fmr {
1069 struct ib_device *device;
1070 struct ib_pd *pd;
1071 struct list_head list;
1072 u32 lkey;
1073 u32 rkey;
1074};
1075
1413};
1414
1415struct ib_fmr {
1416 struct ib_device *device;
1417 struct ib_pd *pd;
1418 struct list_head list;
1419 u32 lkey;
1420 u32 rkey;
1421};
1422
1076struct ib_flow_spec {
1077 enum ib_flow_types type;
1078 union {
1079 struct {
1080 __be16 ethertype;
1081 __be16 vlan;
1082 u8 vlan_present;
1083 u8 mac[6];
1084 u8 port;
1085 } eth;
1086 struct {
1087 __be32 qpn;
1088 } ib_uc;
1089 struct {
1090 u8 mgid[16];
1091 } ib_mc;
1092 } l2_id;
1423/* Supported steering options */
1424enum ib_flow_attr_type {
1425 /* steering according to rule specifications */
1426 IB_FLOW_ATTR_NORMAL = 0x0,
1427 /* default unicast and multicast rule -
1428 * receive all Eth traffic which isn't steered to any QP
1429 */
1430 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1431 /* default multicast rule -
1432 * receive all Eth multicast traffic which isn't steered to any QP
1433 */
1434 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1435 /* sniffer rule - receive all port traffic */
1436 IB_FLOW_ATTR_SNIFFER = 0x3
1437};
1438
1439/* Supported steering header types */
1440enum ib_flow_spec_type {
1441 /* L2 headers*/
1442 IB_FLOW_SPEC_ETH = 0x20,
1443 IB_FLOW_SPEC_IB = 0x21,
1444 /* L3 header*/
1445 IB_FLOW_SPEC_IPV4 = 0x30,
1446 /* L4 headers*/
1447 IB_FLOW_SPEC_TCP = 0x40,
1448 IB_FLOW_SPEC_UDP = 0x41
1449};
1450
1451#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1452
1453/* Flow steering rule priority is set according to it's domain.
1454 * Lower domain value means higher priority.
1455 */
1456enum ib_flow_domain {
1457 IB_FLOW_DOMAIN_USER,
1458 IB_FLOW_DOMAIN_ETHTOOL,
1459 IB_FLOW_DOMAIN_RFS,
1460 IB_FLOW_DOMAIN_NIC,
1461 IB_FLOW_DOMAIN_NUM /* Must be last */
1462};
1463
1464enum ib_flow_flags {
1465 IB_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1
1466};
1467
1468struct ib_flow_eth_filter {
1469 u8 dst_mac[6];
1470 u8 src_mac[6];
1471 __be16 ether_type;
1472 __be16 vlan_tag;
1473};
1474
1475struct ib_flow_spec_eth {
1476 enum ib_flow_spec_type type;
1477 u16 size;
1478 struct ib_flow_eth_filter val;
1479 struct ib_flow_eth_filter mask;
1480};
1481
1482struct ib_flow_ib_filter {
1483 __be32 l3_type_qpn;
1484 u8 dst_gid[16];
1485};
1486
1487struct ib_flow_spec_ib {
1488 enum ib_flow_spec_type type;
1489 u16 size;
1490 struct ib_flow_ib_filter val;
1491 struct ib_flow_ib_filter mask;
1492};
1493
1494struct ib_flow_ipv4_filter {
1093 __be32 src_ip;
1094 __be32 dst_ip;
1495 __be32 src_ip;
1496 __be32 dst_ip;
1095 __be16 src_port;
1497};
1498
1499struct ib_flow_spec_ipv4 {
1500 enum ib_flow_spec_type type;
1501 u16 size;
1502 struct ib_flow_ipv4_filter val;
1503 struct ib_flow_ipv4_filter mask;
1504};
1505
1506struct ib_flow_tcp_udp_filter {
1096 __be16 dst_port;
1507 __be16 dst_port;
1097 u8 l4_protocol;
1098 u8 block_mc_loopback;
1099 u8 rule_type;
1508 __be16 src_port;
1100};
1101
1509};
1510
1511struct ib_flow_spec_tcp_udp {
1512 enum ib_flow_spec_type type;
1513 u16 size;
1514 struct ib_flow_tcp_udp_filter val;
1515 struct ib_flow_tcp_udp_filter mask;
1516};
1517
1518union ib_flow_spec {
1519 struct {
1520 enum ib_flow_spec_type type;
1521 u16 size;
1522 };
1523 struct ib_flow_spec_ib ib;
1524 struct ib_flow_spec_eth eth;
1525 struct ib_flow_spec_ipv4 ipv4;
1526 struct ib_flow_spec_tcp_udp tcp_udp;
1527};
1528
1529struct ib_flow_attr {
1530 enum ib_flow_attr_type type;
1531 u16 size;
1532 u16 priority;
1533 u8 num_of_specs;
1534 u8 port;
1535 u32 flags;
1536 /* Following are the optional layers according to user request
1537 * struct ib_flow_spec_xxx
1538 * struct ib_flow_spec_yyy
1539 */
1540};
1541
1542struct ib_flow {
1543 struct ib_qp *qp;
1544 struct ib_uobject *uobject;
1545};
1546
1102struct ib_mad;
1103struct ib_grh;
1104
1105enum ib_process_mad_flags {
1106 IB_MAD_IGNORE_MKEY = 1,
1107 IB_MAD_IGNORE_BKEY = 2,
1108 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1109};

--- 10 unchanged lines hidden (view full) ---

1120struct ib_cache {
1121 rwlock_t lock;
1122 struct ib_event_handler event_handler;
1123 struct ib_pkey_cache **pkey_cache;
1124 struct ib_gid_cache **gid_cache;
1125 u8 *lmc_cache;
1126};
1127
1547struct ib_mad;
1548struct ib_grh;
1549
1550enum ib_process_mad_flags {
1551 IB_MAD_IGNORE_MKEY = 1,
1552 IB_MAD_IGNORE_BKEY = 2,
1553 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1554};

--- 10 unchanged lines hidden (view full) ---

1565struct ib_cache {
1566 rwlock_t lock;
1567 struct ib_event_handler event_handler;
1568 struct ib_pkey_cache **pkey_cache;
1569 struct ib_gid_cache **gid_cache;
1570 u8 *lmc_cache;
1571};
1572
1573enum verbs_values_mask {
1574 IBV_VALUES_HW_CLOCK = 1 << 0
1575};
1576
1577struct ib_device_values {
1578 int values_mask;
1579 uint64_t hwclock;
1580};
1581
1128struct ib_dma_mapping_ops {
1129 int (*mapping_error)(struct ib_device *dev,
1130 u64 dma_addr);
1131 u64 (*map_single)(struct ib_device *dev,
1132 void *ptr, size_t size,
1133 enum dma_data_direction direction);
1134 void (*unmap_single)(struct ib_device *dev,
1135 u64 addr, size_t size,

--- 28 unchanged lines hidden (view full) ---

1164 u64 *dma_handle,
1165 gfp_t flag);
1166 void (*free_coherent)(struct ib_device *dev,
1167 size_t size, void *cpu_addr,
1168 u64 dma_handle);
1169};
1170
1171struct iw_cm_verbs;
1582struct ib_dma_mapping_ops {
1583 int (*mapping_error)(struct ib_device *dev,
1584 u64 dma_addr);
1585 u64 (*map_single)(struct ib_device *dev,
1586 void *ptr, size_t size,
1587 enum dma_data_direction direction);
1588 void (*unmap_single)(struct ib_device *dev,
1589 u64 addr, size_t size,

--- 28 unchanged lines hidden (view full) ---

1618 u64 *dma_handle,
1619 gfp_t flag);
1620 void (*free_coherent)(struct ib_device *dev,
1621 size_t size, void *cpu_addr,
1622 u64 dma_handle);
1623};
1624
1625struct iw_cm_verbs;
1626struct ib_exp_device_attr;
1627struct ib_exp_qp_init_attr;
1172
1173struct ib_device {
1174 struct device *dma_device;
1175
1176 char name[IB_DEVICE_NAME_MAX];
1177
1178 struct list_head event_handler_list;
1179 spinlock_t event_handler_lock;

--- 72 unchanged lines hidden (view full) ---

1252 struct ib_qp_init_attr *qp_init_attr);
1253 int (*destroy_qp)(struct ib_qp *qp);
1254 int (*post_send)(struct ib_qp *qp,
1255 struct ib_send_wr *send_wr,
1256 struct ib_send_wr **bad_send_wr);
1257 int (*post_recv)(struct ib_qp *qp,
1258 struct ib_recv_wr *recv_wr,
1259 struct ib_recv_wr **bad_recv_wr);
1628
1629struct ib_device {
1630 struct device *dma_device;
1631
1632 char name[IB_DEVICE_NAME_MAX];
1633
1634 struct list_head event_handler_list;
1635 spinlock_t event_handler_lock;

--- 72 unchanged lines hidden (view full) ---

1708 struct ib_qp_init_attr *qp_init_attr);
1709 int (*destroy_qp)(struct ib_qp *qp);
1710 int (*post_send)(struct ib_qp *qp,
1711 struct ib_send_wr *send_wr,
1712 struct ib_send_wr **bad_send_wr);
1713 int (*post_recv)(struct ib_qp *qp,
1714 struct ib_recv_wr *recv_wr,
1715 struct ib_recv_wr **bad_recv_wr);
1260 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1261 int comp_vector,
1716 struct ib_cq * (*create_cq)(struct ib_device *device,
1717 struct ib_cq_init_attr *attr,
1262 struct ib_ucontext *context,
1263 struct ib_udata *udata);
1718 struct ib_ucontext *context,
1719 struct ib_udata *udata);
1264 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1265 u16 cq_period);
1720 int (*modify_cq)(struct ib_cq *cq,
1721 struct ib_cq_attr *cq_attr,
1722 int cq_attr_mask);
1266 int (*destroy_cq)(struct ib_cq *cq);
1267 int (*resize_cq)(struct ib_cq *cq, int cqe,
1268 struct ib_udata *udata);
1269 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1270 struct ib_wc *wc);
1271 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1272 int (*req_notify_cq)(struct ib_cq *cq,
1273 enum ib_cq_notify_flags flags);

--- 10 unchanged lines hidden (view full) ---

1284 u64 start, u64 length,
1285 u64 virt_addr,
1286 int mr_access_flags,
1287 struct ib_udata *udata,
1288 int mr_id);
1289 int (*query_mr)(struct ib_mr *mr,
1290 struct ib_mr_attr *mr_attr);
1291 int (*dereg_mr)(struct ib_mr *mr);
1723 int (*destroy_cq)(struct ib_cq *cq);
1724 int (*resize_cq)(struct ib_cq *cq, int cqe,
1725 struct ib_udata *udata);
1726 int (*poll_cq)(struct ib_cq *cq, int num_entries,
1727 struct ib_wc *wc);
1728 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1729 int (*req_notify_cq)(struct ib_cq *cq,
1730 enum ib_cq_notify_flags flags);

--- 10 unchanged lines hidden (view full) ---

1741 u64 start, u64 length,
1742 u64 virt_addr,
1743 int mr_access_flags,
1744 struct ib_udata *udata,
1745 int mr_id);
1746 int (*query_mr)(struct ib_mr *mr,
1747 struct ib_mr_attr *mr_attr);
1748 int (*dereg_mr)(struct ib_mr *mr);
1749 int (*destroy_mr)(struct ib_mr *mr);
1750 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1751 struct ib_mr_init_attr *mr_init_attr);
1292 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1293 int max_page_list_len);
1294 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1295 int page_list_len);
1296 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1297 int (*rereg_phys_mr)(struct ib_mr *mr,
1298 int mr_rereg_mask,
1299 struct ib_pd *pd,
1300 struct ib_phys_buf *phys_buf_array,
1301 int num_phys_buf,
1302 int mr_access_flags,
1303 u64 *iova_start);
1752 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1753 int max_page_list_len);
1754 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1755 int page_list_len);
1756 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1757 int (*rereg_phys_mr)(struct ib_mr *mr,
1758 int mr_rereg_mask,
1759 struct ib_pd *pd,
1760 struct ib_phys_buf *phys_buf_array,
1761 int num_phys_buf,
1762 int mr_access_flags,
1763 u64 *iova_start);
1304 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1764 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1765 enum ib_mw_type type);
1305 int (*bind_mw)(struct ib_qp *qp,
1306 struct ib_mw *mw,
1307 struct ib_mw_bind *mw_bind);
1308 int (*dealloc_mw)(struct ib_mw *mw);
1309 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1310 int mr_access_flags,
1311 struct ib_fmr_attr *fmr_attr);
1312 int (*map_phys_fmr)(struct ib_fmr *fmr,

--- 9 unchanged lines hidden (view full) ---

1322 u16 lid);
1323 int (*process_mad)(struct ib_device *device,
1324 int process_mad_flags,
1325 u8 port_num,
1326 struct ib_wc *in_wc,
1327 struct ib_grh *in_grh,
1328 struct ib_mad *in_mad,
1329 struct ib_mad *out_mad);
1766 int (*bind_mw)(struct ib_qp *qp,
1767 struct ib_mw *mw,
1768 struct ib_mw_bind *mw_bind);
1769 int (*dealloc_mw)(struct ib_mw *mw);
1770 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1771 int mr_access_flags,
1772 struct ib_fmr_attr *fmr_attr);
1773 int (*map_phys_fmr)(struct ib_fmr *fmr,

--- 9 unchanged lines hidden (view full) ---

1783 u16 lid);
1784 int (*process_mad)(struct ib_device *device,
1785 int process_mad_flags,
1786 u8 port_num,
1787 struct ib_wc *in_wc,
1788 struct ib_grh *in_grh,
1789 struct ib_mad *in_mad,
1790 struct ib_mad *out_mad);
1330 struct ib_srq * (*create_xrc_srq)(struct ib_pd *pd,
1331 struct ib_cq *xrc_cq,
1332 struct ib_xrcd *xrcd,
1333 struct ib_srq_init_attr *srq_init_attr,
1334 struct ib_udata *udata);
1335 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1336 struct ib_ucontext *ucontext,
1337 struct ib_udata *udata);
1338 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1791 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1792 struct ib_ucontext *ucontext,
1793 struct ib_udata *udata);
1794 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1339 int (*create_xrc_rcv_qp)(struct ib_qp_init_attr *init_attr,
1340 u32 *qp_num);
1341 int (*modify_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1342 u32 qp_num,
1343 struct ib_qp_attr *attr,
1344 int attr_mask);
1345 int (*query_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1346 u32 qp_num,
1347 struct ib_qp_attr *attr,
1348 int attr_mask,
1349 struct ib_qp_init_attr *init_attr);
1350 int (*reg_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1351 void *context,
1352 u32 qp_num);
1353 int (*unreg_xrc_rcv_qp)(struct ib_xrcd *xrcd,
1354 void *context,
1355 u32 qp_num);
1356 int (*attach_flow)(struct ib_qp *qp,
1357 struct ib_flow_spec *spec,
1358 int priority);
1359 int (*detach_flow)(struct ib_qp *qp,
1360 struct ib_flow_spec *spec,
1361 int priority);
1795 struct ib_flow * (*create_flow)(struct ib_qp *qp,
1796 struct ib_flow_attr
1797 *flow_attr,
1798 int domain);
1799 int (*destroy_flow)(struct ib_flow *flow_id);
1800 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1801 struct ib_mr_status *mr_status);
1362
1363 unsigned long (*get_unmapped_area)(struct file *file,
1364 unsigned long addr,
1365 unsigned long len, unsigned long pgoff,
1366 unsigned long flags);
1802
1803 unsigned long (*get_unmapped_area)(struct file *file,
1804 unsigned long addr,
1805 unsigned long len, unsigned long pgoff,
1806 unsigned long flags);
1807 int (*ioctl)(struct ib_ucontext *context,
1808 unsigned int cmd,
1809 unsigned long arg);
1810 int (*query_values)(struct ib_device *device,
1811 int q_values,
1812 struct ib_device_values *values);
1367 struct ib_dma_mapping_ops *dma_ops;
1368
1369 struct module *owner;
1370 struct device dev;
1371 struct kobject *ports_parent;
1372 struct list_head port_list;
1373
1374 enum {
1375 IB_DEV_UNINITIALIZED,
1376 IB_DEV_REGISTERED,
1377 IB_DEV_UNREGISTERED
1378 } reg_state;
1379
1380 int uverbs_abi_ver;
1381 u64 uverbs_cmd_mask;
1813 struct ib_dma_mapping_ops *dma_ops;
1814
1815 struct module *owner;
1816 struct device dev;
1817 struct kobject *ports_parent;
1818 struct list_head port_list;
1819
1820 enum {
1821 IB_DEV_UNINITIALIZED,
1822 IB_DEV_REGISTERED,
1823 IB_DEV_UNREGISTERED
1824 } reg_state;
1825
1826 int uverbs_abi_ver;
1827 u64 uverbs_cmd_mask;
1828 u64 uverbs_ex_cmd_mask;
1382
1383 char node_desc[64];
1384 __be64 node_guid;
1385 u32 local_dma_lkey;
1386 u8 node_type;
1387 u8 phys_port_cnt;
1829
1830 char node_desc[64];
1831 __be64 node_guid;
1832 u32 local_dma_lkey;
1833 u8 node_type;
1834 u8 phys_port_cnt;
1388 struct rb_root ib_uverbs_xrcd_table;
1389 struct mutex xrcd_table_mutex;
1835 int cmd_perf;
1836 u64 cmd_avg;
1837 u32 cmd_n;
1838 spinlock_t cmd_perf_lock;
1839
1840 /*
1841 * Experimental data and functions
1842 */
1843 int (*exp_query_device)(struct ib_device *device,
1844 struct ib_exp_device_attr *device_attr);
1845 struct ib_qp * (*exp_create_qp)(struct ib_pd *pd,
1846 struct ib_exp_qp_init_attr *qp_init_attr,
1847 struct ib_udata *udata);
1848 struct ib_dct * (*exp_create_dct)(struct ib_pd *pd,
1849 struct ib_dct_init_attr *attr,
1850 struct ib_udata *udata);
1851 int (*exp_destroy_dct)(struct ib_dct *dct);
1852 int (*exp_query_dct)(struct ib_dct *dct, struct ib_dct_attr *attr);
1853
1854 u64 uverbs_exp_cmd_mask;
1390};
1391
1392struct ib_client {
1393 char *name;
1394 void (*add) (struct ib_device *);
1395 void (*remove)(struct ib_device *);
1396
1397 struct list_head list;

--- 11 unchanged lines hidden (view full) ---

1409void ib_unregister_client(struct ib_client *client);
1410
1411void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1412void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1413 void *data);
1414
1415static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1416{
1855};
1856
1857struct ib_client {
1858 char *name;
1859 void (*add) (struct ib_device *);
1860 void (*remove)(struct ib_device *);
1861
1862 struct list_head list;

--- 11 unchanged lines hidden (view full) ---

1874void ib_unregister_client(struct ib_client *client);
1875
1876void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1877void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1878 void *data);
1879
1880static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1881{
1417 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1882 return udata->ops->copy_from(dest, udata, len);
1418}
1419
1420static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1421{
1883}
1884
1885static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1886{
1422 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1887 return udata->ops->copy_to(udata, src, len);
1423}
1424
1425/**
1426 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1427 * contains all required attributes and no attributes not allowed for
1428 * the given QP state transition.
1429 * @cur_state: Current QP state
1430 * @next_state: Next QP state
1431 * @type: QP type
1432 * @mask: Mask of supplied QP attributes
1888}
1889
1890/**
1891 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1892 * contains all required attributes and no attributes not allowed for
1893 * the given QP state transition.
1894 * @cur_state: Current QP state
1895 * @next_state: Next QP state
1896 * @type: QP type
1897 * @mask: Mask of supplied QP attributes
1898 * @ll : link layer of port
1433 *
1434 * This function is a helper function that a low-level driver's
1435 * modify_qp method can use to validate the consumer's input. It
1436 * checks that cur_state and next_state are valid QP states, that a
1437 * transition from cur_state to next_state is allowed by the IB spec,
1438 * and that the attribute mask supplied is allowed for the transition.
1439 */
1440int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1899 *
1900 * This function is a helper function that a low-level driver's
1901 * modify_qp method can use to validate the consumer's input. It
1902 * checks that cur_state and next_state are valid QP states, that a
1903 * transition from cur_state to next_state is allowed by the IB spec,
1904 * and that the attribute mask supplied is allowed for the transition.
1905 */
1906int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1441 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1907 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1908 enum rdma_link_layer ll);
1442
1443int ib_register_event_handler (struct ib_event_handler *event_handler);
1444int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1445void ib_dispatch_event(struct ib_event *event);
1446
1447int ib_query_device(struct ib_device *device,
1448 struct ib_device_attr *device_attr);
1449

--- 97 unchanged lines hidden (view full) ---

1547
1548/**
1549 * ib_destroy_ah - Destroys an address handle.
1550 * @ah: The address handle to destroy.
1551 */
1552int ib_destroy_ah(struct ib_ah *ah);
1553
1554/**
1909
1910int ib_register_event_handler (struct ib_event_handler *event_handler);
1911int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1912void ib_dispatch_event(struct ib_event *event);
1913
1914int ib_query_device(struct ib_device *device,
1915 struct ib_device_attr *device_attr);
1916

--- 97 unchanged lines hidden (view full) ---

2014
2015/**
2016 * ib_destroy_ah - Destroys an address handle.
2017 * @ah: The address handle to destroy.
2018 */
2019int ib_destroy_ah(struct ib_ah *ah);
2020
2021/**
1555 * ib_create_xrc_srq - Creates an XRC SRQ associated with the specified
1556 * protection domain, cq, and xrc domain.
1557 * @pd: The protection domain associated with the SRQ.
1558 * @xrc_cq: The cq to be associated with the XRC SRQ.
1559 * @xrcd: The XRC domain to be associated with the XRC SRQ.
1560 * @srq_init_attr: A list of initial attributes required to create the
1561 * XRC SRQ. If XRC SRQ creation succeeds, then the attributes are updated
1562 * to the actual capabilities of the created XRC SRQ.
1563 *
1564 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1565 * requested size of the XRC SRQ, and set to the actual values allocated
1566 * on return. If ib_create_xrc_srq() succeeds, then max_wr and max_sge
1567 * will always be at least as large as the requested values.
1568 */
1569struct ib_srq *ib_create_xrc_srq(struct ib_pd *pd,
1570 struct ib_cq *xrc_cq,
1571 struct ib_xrcd *xrcd,
1572 struct ib_srq_init_attr *srq_init_attr);
1573
1574/**
1575 * ib_create_srq - Creates a SRQ associated with the specified protection
1576 * domain.
1577 * @pd: The protection domain associated with the SRQ.
1578 * @srq_init_attr: A list of initial attributes required to create the
1579 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1580 * the actual capabilities of the created SRQ.
1581 *
1582 * srq_attr->max_wr and srq_attr->max_sge are read the determine the

--- 144 unchanged lines hidden (view full) ---

1727 */
1728static inline int ib_post_recv(struct ib_qp *qp,
1729 struct ib_recv_wr *recv_wr,
1730 struct ib_recv_wr **bad_recv_wr)
1731{
1732 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1733}
1734
2022 * ib_create_srq - Creates a SRQ associated with the specified protection
2023 * domain.
2024 * @pd: The protection domain associated with the SRQ.
2025 * @srq_init_attr: A list of initial attributes required to create the
2026 * SRQ. If SRQ creation succeeds, then the attributes are updated to
2027 * the actual capabilities of the created SRQ.
2028 *
2029 * srq_attr->max_wr and srq_attr->max_sge are read the determine the

--- 144 unchanged lines hidden (view full) ---

2174 */
2175static inline int ib_post_recv(struct ib_qp *qp,
2176 struct ib_recv_wr *recv_wr,
2177 struct ib_recv_wr **bad_recv_wr)
2178{
2179 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2180}
2181
1735/*
1736 * IB_CQ_VECTOR_LEAST_ATTACHED: The constant specifies that
1737 * the CQ will be attached to the completion vector that has
1738 * the least number of CQs already attached to it.
1739 */
1740#define IB_CQ_VECTOR_LEAST_ATTACHED 0xffffffff
1741
1742/**
1743 * ib_create_cq - Creates a CQ on the specified device.
1744 * @device: The device on which to create the CQ.
1745 * @comp_handler: A user-specified callback that is invoked when a
1746 * completion event occurs on the CQ.
1747 * @event_handler: A user-specified callback that is invoked when an
1748 * asynchronous event not associated with a completion occurs on the CQ.
1749 * @cq_context: Context associated with the CQ returned to the user via

--- 14 unchanged lines hidden (view full) ---

1764 * @cq: The CQ to resize.
1765 * @cqe: The minimum size of the CQ.
1766 *
1767 * Users can examine the cq structure to determine the actual CQ size.
1768 */
1769int ib_resize_cq(struct ib_cq *cq, int cqe);
1770
1771/**
2182/**
2183 * ib_create_cq - Creates a CQ on the specified device.
2184 * @device: The device on which to create the CQ.
2185 * @comp_handler: A user-specified callback that is invoked when a
2186 * completion event occurs on the CQ.
2187 * @event_handler: A user-specified callback that is invoked when an
2188 * asynchronous event not associated with a completion occurs on the CQ.
2189 * @cq_context: Context associated with the CQ returned to the user via

--- 14 unchanged lines hidden (view full) ---

2204 * @cq: The CQ to resize.
2205 * @cqe: The minimum size of the CQ.
2206 *
2207 * Users can examine the cq structure to determine the actual CQ size.
2208 */
2209int ib_resize_cq(struct ib_cq *cq, int cqe);
2210
2211/**
1772 * ib_modify_cq - Modifies moderation params of the CQ
2212 * ib_modify_cq - Modifies the attributes for the specified CQ and then
2213 * transitions the CQ to the given state.
1773 * @cq: The CQ to modify.
2214 * @cq: The CQ to modify.
1774 * @cq_count: number of CQEs that will trigger an event
1775 * @cq_period: max period of time in usec before triggering an event
1776 *
2215 * @cq_attr: specifies the CQ attributes to modify.
2216 * @cq_attr_mask: A bit-mask used to specify which attributes of the CQ
2217 * are being modified.
1777 */
2218 */
1778int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2219int ib_modify_cq(struct ib_cq *cq,
2220 struct ib_cq_attr *cq_attr,
2221 int cq_attr_mask);
1779
1780/**
1781 * ib_destroy_cq - Destroys the specified CQ.
1782 * @cq: The CQ to destroy.
1783 */
1784int ib_destroy_cq(struct ib_cq *cq);
1785
1786/**

--- 387 unchanged lines hidden (view full) ---

2174 * @mr_attr: The attributes of the specified memory region.
2175 */
2176int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2177
2178/**
2179 * ib_dereg_mr - Deregisters a memory region and removes it from the
2180 * HCA translation table.
2181 * @mr: The memory region to deregister.
2222
2223/**
2224 * ib_destroy_cq - Destroys the specified CQ.
2225 * @cq: The CQ to destroy.
2226 */
2227int ib_destroy_cq(struct ib_cq *cq);
2228
2229/**

--- 387 unchanged lines hidden (view full) ---

2617 * @mr_attr: The attributes of the specified memory region.
2618 */
2619int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2620
2621/**
2622 * ib_dereg_mr - Deregisters a memory region and removes it from the
2623 * HCA translation table.
2624 * @mr: The memory region to deregister.
2625 *
2626 * This function can fail, if the memory region has memory windows bound to it.
2182 */
2183int ib_dereg_mr(struct ib_mr *mr);
2184
2627 */
2628int ib_dereg_mr(struct ib_mr *mr);
2629
2630
2185/**
2631/**
2632 * ib_create_mr - Allocates a memory region that may be used for
2633 * signature handover operations.
2634 * @pd: The protection domain associated with the region.
2635 * @mr_init_attr: memory region init attributes.
2636 */
2637struct ib_mr *ib_create_mr(struct ib_pd *pd,
2638 struct ib_mr_init_attr *mr_init_attr);
2639
2640/**
2641 * ib_destroy_mr - Destroys a memory region that was created using
2642 * ib_create_mr and removes it from HW translation tables.
2643 * @mr: The memory region to destroy.
2644 *
2645 * This function can fail, if the memory region has memory windows bound to it.
2646 */
2647int ib_destroy_mr(struct ib_mr *mr);
2648
2649/**
2186 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2187 * IB_WR_FAST_REG_MR send work request.
2188 * @pd: The protection domain associated with the region.
2189 * @max_page_list_len: requested max physical buffer list length to be
2190 * used with fast register work requests for this MR.
2191 */
2192struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2193

--- 32 unchanged lines hidden (view full) ---

2226 */
2227static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2228{
2229 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2230 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2231}
2232
2233/**
2650 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2651 * IB_WR_FAST_REG_MR send work request.
2652 * @pd: The protection domain associated with the region.
2653 * @max_page_list_len: requested max physical buffer list length to be
2654 * used with fast register work requests for this MR.
2655 */
2656struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2657

--- 32 unchanged lines hidden (view full) ---

2690 */
2691static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
2692{
2693 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2694 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2695}
2696
2697/**
2698 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
2699 * for calculating a new rkey for type 2 memory windows.
2700 * @rkey - the rkey to increment.
2701 */
2702static inline u32 ib_inc_rkey(u32 rkey)
2703{
2704 const u32 mask = 0x000000ff;
2705 return ((rkey + 1) & mask) | (rkey & ~mask);
2706}
2707
2708/**
2234 * ib_alloc_mw - Allocates a memory window.
2235 * @pd: The protection domain associated with the memory window.
2709 * ib_alloc_mw - Allocates a memory window.
2710 * @pd: The protection domain associated with the memory window.
2711 * @type: The type of the memory window (1 or 2).
2236 */
2712 */
2237struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2713struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2238
2239/**
2240 * ib_bind_mw - Posts a work request to the send queue of the specified
2241 * QP, which binds the memory window to the given address range and
2242 * remote access attributes.
2243 * @qp: QP to post the bind work request on.
2244 * @mw: The memory window to bind.
2245 * @mw_bind: Specifies information about the memory window, including
2246 * its address range, remote access rights, and associated memory region.
2714
2715/**
2716 * ib_bind_mw - Posts a work request to the send queue of the specified
2717 * QP, which binds the memory window to the given address range and
2718 * remote access attributes.
2719 * @qp: QP to post the bind work request on.
2720 * @mw: The memory window to bind.
2721 * @mw_bind: Specifies information about the memory window, including
2722 * its address range, remote access rights, and associated memory region.
2723 *
2724 * If there is no immediate error, the function will update the rkey member
2725 * of the mw parameter to its new value. The bind operation can still fail
2726 * asynchronously.
2247 */
2248static inline int ib_bind_mw(struct ib_qp *qp,
2249 struct ib_mw *mw,
2250 struct ib_mw_bind *mw_bind)
2251{
2252 /* XXX reference counting in corresponding MR? */
2253 return mw->device->bind_mw ?
2254 mw->device->bind_mw(qp, mw, mw_bind) :

--- 74 unchanged lines hidden (view full) ---

2329struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2330
2331/**
2332 * ib_dealloc_xrcd - Deallocates an XRC domain.
2333 * @xrcd: The XRC domain to deallocate.
2334 */
2335int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2336
2727 */
2728static inline int ib_bind_mw(struct ib_qp *qp,
2729 struct ib_mw *mw,
2730 struct ib_mw_bind *mw_bind)
2731{
2732 /* XXX reference counting in corresponding MR? */
2733 return mw->device->bind_mw ?
2734 mw->device->bind_mw(qp, mw, mw_bind) :

--- 74 unchanged lines hidden (view full) ---

2809struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2810
2811/**
2812 * ib_dealloc_xrcd - Deallocates an XRC domain.
2813 * @xrcd: The XRC domain to deallocate.
2814 */
2815int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2816
2337int ib_attach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2338int ib_detach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2817struct ib_flow *ib_create_flow(struct ib_qp *qp,
2818 struct ib_flow_attr *flow_attr, int domain);
2819int ib_destroy_flow(struct ib_flow *flow_id);
2339
2820
2821struct ib_dct *ib_create_dct(struct ib_pd *pd, struct ib_dct_init_attr *attr,
2822 struct ib_udata *udata);
2823int ib_destroy_dct(struct ib_dct *dct);
2824int ib_query_dct(struct ib_dct *dct, struct ib_dct_attr *attr);
2825
2826int ib_query_values(struct ib_device *device,
2827 int q_values, struct ib_device_values *values);
2828
2829static inline void ib_active_speed_enum_to_rate(u8 active_speed,
2830 int *rate,
2831 char **speed)
2832{
2833 switch (active_speed) {
2834 case IB_SPEED_DDR:
2835 *speed = " DDR";
2836 *rate = 50;
2837 break;
2838 case IB_SPEED_QDR:
2839 *speed = " QDR";
2840 *rate = 100;
2841 break;
2842 case IB_SPEED_FDR10:
2843 *speed = " FDR10";
2844 *rate = 100;
2845 break;
2846 case IB_SPEED_FDR:
2847 *speed = " FDR";
2848 *rate = 140;
2849 break;
2850 case IB_SPEED_EDR:
2851 *speed = " EDR";
2852 *rate = 250;
2853 break;
2854 case IB_SPEED_SDR:
2855 default: /* default to SDR for invalid rates */
2856 *rate = 25;
2857 break;
2858 }
2859
2860}
2861
2862static inline int ib_check_mr_access(int flags)
2863{
2864 /*
2865 * Local write permission is required if remote write or
2866 * remote atomic permission is also requested.
2867 */
2868 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
2869 !(flags & IB_ACCESS_LOCAL_WRITE))
2870 return -EINVAL;
2871
2872 return 0;
2873}
2874
2875/**
2876 * ib_check_mr_status: lightweight check of MR status.
2877 * This routine may provide status checks on a selected
2878 * ib_mr. first use is for signature status check.
2879 *
2880 * @mr: A memory region.
2881 * @check_mask: Bitmask of which checks to perform from
2882 * ib_mr_status_check enumeration.
2883 * @mr_status: The container of relevant status checks.
2884 * failed checks will be indicated in the status bitmask
2885 * and the relevant info shall be in the error item.
2886 */
2887int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2888 struct ib_mr_status *mr_status);
2889
2340#endif /* IB_VERBS_H */
2890#endif /* IB_VERBS_H */