1#ifndef _RDS_IW_H 2#define _RDS_IW_H 3 4#include <rdma/ib_verbs.h> 5#include <rdma/rdma_cm.h> 6#include "rds.h" 7#include "rdma_transport.h" 8 9#define RDS_FASTREG_SIZE 20 10#define RDS_FASTREG_POOL_SIZE 2048 11 12#define RDS_IW_MAX_SGE 8 13#define RDS_IW_RECV_SGE 2 14 15#define RDS_IW_DEFAULT_RECV_WR 1024 16#define RDS_IW_DEFAULT_SEND_WR 256 17 18#define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ 19 20extern struct list_head rds_iw_devices; 21 22/* 23 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to 24 * try and minimize the amount of memory tied up both the device and 25 * socket receive queues. 26 */ 27/* page offset of the final full frag that fits in the page */ 28#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE) 29struct rds_page_frag { 30 struct list_head f_item; 31 struct page *f_page; 32 unsigned long f_offset; 33 dma_addr_t f_mapped; 34}; 35 36struct rds_iw_incoming { 37 struct list_head ii_frags; 38 struct rds_incoming ii_inc; 39}; 40 41struct rds_iw_connect_private { 42 /* Add new fields at the end, and don't permute existing fields. */ 43 __be32 dp_saddr; 44 __be32 dp_daddr; 45 u8 dp_protocol_major; 46 u8 dp_protocol_minor; 47 __be16 dp_protocol_minor_mask; /* bitmask */ 48 __be32 dp_reserved1; 49 __be64 dp_ack_seq; 50 __be32 dp_credit; /* non-zero enables flow ctl */ 51}; 52 53struct rds_iw_scatterlist { 54 struct scatterlist *list; 55 unsigned int len; 56 int dma_len; 57 unsigned int dma_npages; 58 unsigned int bytes; 59}; 60 61struct rds_iw_mapping { 62 spinlock_t m_lock; /* protect the mapping struct */ 63 struct list_head m_list; 64 struct rds_iw_mr *m_mr; 65 uint32_t m_rkey; 66 struct rds_iw_scatterlist m_sg; 67}; 68 69struct rds_iw_send_work { 70 struct rds_message *s_rm; 71 72 /* We should really put these into a union: */ 73 struct rds_rdma_op *s_op; 74 struct rds_iw_mapping *s_mapping; 75 struct ib_mr *s_mr; 76 struct ib_fast_reg_page_list *s_page_list; 77 unsigned char s_remap_count; 78 79 struct ib_send_wr s_wr; 80 struct ib_sge s_sge[RDS_IW_MAX_SGE]; 81 unsigned long s_queued; 82}; 83 84struct rds_iw_recv_work { 85 struct rds_iw_incoming *r_iwinc; 86 struct rds_page_frag *r_frag; 87 struct ib_recv_wr r_wr; 88 struct ib_sge r_sge[2]; 89}; 90 91struct rds_iw_work_ring { 92 u32 w_nr; 93 u32 w_alloc_ptr; 94 u32 w_alloc_ctr; 95 u32 w_free_ptr; 96 atomic_t w_free_ctr; 97}; 98 99struct rds_iw_device; 100 101struct rds_iw_connection { 102 103 struct list_head iw_node; 104 struct rds_iw_device *rds_iwdev; 105 struct rds_connection *conn; 106 107 /* alphabet soup, IBTA style */ 108 struct rdma_cm_id *i_cm_id; 109 struct ib_pd *i_pd; 110 struct ib_mr *i_mr; 111 struct ib_cq *i_send_cq; 112 struct ib_cq *i_recv_cq; 113 114 /* tx */ 115 struct rds_iw_work_ring i_send_ring; 116 struct rds_message *i_rm; 117 struct rds_header *i_send_hdrs; 118 u64 i_send_hdrs_dma; 119 struct rds_iw_send_work *i_sends; 120 121 /* rx */ 122 struct tasklet_struct i_recv_tasklet; 123 struct mutex i_recv_mutex; 124 struct rds_iw_work_ring i_recv_ring; 125 struct rds_iw_incoming *i_iwinc; 126 u32 i_recv_data_rem; 127 struct rds_header *i_recv_hdrs; 128 u64 i_recv_hdrs_dma; 129 struct rds_iw_recv_work *i_recvs; 130 struct rds_page_frag i_frag; 131 u64 i_ack_recv; /* last ACK received */ 132 133 /* sending acks */ 134 unsigned long i_ack_flags; 135#ifdef KERNEL_HAS_ATOMIC64 136 atomic64_t i_ack_next; /* next ACK to send */ 137#else 138 spinlock_t i_ack_lock; /* protect i_ack_next */ 139 u64 i_ack_next; /* next ACK to send */ 140#endif 141 struct rds_header *i_ack; 142 struct ib_send_wr i_ack_wr; 143 struct ib_sge i_ack_sge; 144 u64 i_ack_dma; 145 unsigned long i_ack_queued; 146 147 /* Flow control related information 148 * 149 * Our algorithm uses a pair variables that we need to access 150 * atomically - one for the send credits, and one posted 151 * recv credits we need to transfer to remote. 152 * Rather than protect them using a slow spinlock, we put both into 153 * a single atomic_t and update it using cmpxchg 154 */ 155 atomic_t i_credits; 156 157 /* Protocol version specific information */ 158 unsigned int i_flowctl:1; /* enable/disable flow ctl */ 159 unsigned int i_dma_local_lkey:1; 160 unsigned int i_fastreg_posted:1; /* fastreg posted on this connection */ 161 /* Batched completions */ 162 unsigned int i_unsignaled_wrs; 163 long i_unsignaled_bytes; 164}; 165 166/* This assumes that atomic_t is at least 32 bits */ 167#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) 168#define IB_GET_POST_CREDITS(v) ((v) >> 16) 169#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) 170#define IB_SET_POST_CREDITS(v) ((v) << 16) 171 172struct rds_iw_cm_id { 173 struct list_head list; 174 struct rdma_cm_id *cm_id; 175}; 176 177struct rds_iw_device { 178 struct list_head list; 179 struct list_head cm_id_list; 180 struct list_head conn_list; 181 struct ib_device *dev; 182 struct ib_pd *pd; 183 struct ib_mr *mr; 184 struct rds_iw_mr_pool *mr_pool; 185 int max_sge; 186 unsigned int max_wrs; 187 unsigned int dma_local_lkey:1; 188 spinlock_t spinlock; /* protect the above */ 189}; 190 191/* bits for i_ack_flags */ 192#define IB_ACK_IN_FLIGHT 0 193#define IB_ACK_REQUESTED 1 194 195/* Magic WR_ID for ACKs */ 196#define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL) 197#define RDS_IW_FAST_REG_WR_ID ((u64)0xefefefefefefefefULL) 198#define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL) 199 200struct rds_iw_statistics { 201 uint64_t s_iw_connect_raced; 202 uint64_t s_iw_listen_closed_stale; 203 uint64_t s_iw_tx_cq_call; 204 uint64_t s_iw_tx_cq_event; 205 uint64_t s_iw_tx_ring_full; 206 uint64_t s_iw_tx_throttle; 207 uint64_t s_iw_tx_sg_mapping_failure; 208 uint64_t s_iw_tx_stalled; 209 uint64_t s_iw_tx_credit_updates; 210 uint64_t s_iw_rx_cq_call; 211 uint64_t s_iw_rx_cq_event; 212 uint64_t s_iw_rx_ring_empty; 213 uint64_t s_iw_rx_refill_from_cq; 214 uint64_t s_iw_rx_refill_from_thread; 215 uint64_t s_iw_rx_alloc_limit; 216 uint64_t s_iw_rx_credit_updates; 217 uint64_t s_iw_ack_sent; 218 uint64_t s_iw_ack_send_failure; 219 uint64_t s_iw_ack_send_delayed; 220 uint64_t s_iw_ack_send_piggybacked; 221 uint64_t s_iw_ack_received; 222 uint64_t s_iw_rdma_mr_alloc; 223 uint64_t s_iw_rdma_mr_free; 224 uint64_t s_iw_rdma_mr_used; 225 uint64_t s_iw_rdma_mr_pool_flush; 226 uint64_t s_iw_rdma_mr_pool_wait; 227 uint64_t s_iw_rdma_mr_pool_depleted; 228}; 229 230extern struct workqueue_struct *rds_iw_wq; 231 232/* 233 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h 234 * doesn't define it. 235 */ 236static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev, 237 struct scatterlist *sg, unsigned int sg_dma_len, int direction) 238{ 239 unsigned int i; 240 241 for (i = 0; i < sg_dma_len; ++i) { 242 ib_dma_sync_single_for_cpu(dev, 243 ib_sg_dma_address(dev, &sg[i]), 244 ib_sg_dma_len(dev, &sg[i]), 245 direction); 246 } 247} 248#define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu 249 250static inline void rds_iw_dma_sync_sg_for_device(struct ib_device *dev, 251 struct scatterlist *sg, unsigned int sg_dma_len, int direction) 252{ 253 unsigned int i; 254 255 for (i = 0; i < sg_dma_len; ++i) { 256 ib_dma_sync_single_for_device(dev, 257 ib_sg_dma_address(dev, &sg[i]), 258 ib_sg_dma_len(dev, &sg[i]), 259 direction); 260 } 261} 262#define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device 263 264static inline u32 rds_iw_local_dma_lkey(struct rds_iw_connection *ic) 265{ 266 return ic->i_dma_local_lkey ? ic->i_cm_id->device->local_dma_lkey : ic->i_mr->lkey; 267} 268 269/* ib.c */ 270extern struct rds_transport rds_iw_transport; 271extern void rds_iw_add_one(struct ib_device *device); 272extern void rds_iw_remove_one(struct ib_device *device); 273extern struct ib_client rds_iw_client; 274 275extern unsigned int fastreg_pool_size; 276extern unsigned int fastreg_message_size; 277 278extern spinlock_t iw_nodev_conns_lock; 279extern struct list_head iw_nodev_conns; 280 281/* ib_cm.c */ 282int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp); 283void rds_iw_conn_free(void *arg); 284int rds_iw_conn_connect(struct rds_connection *conn); 285void rds_iw_conn_shutdown(struct rds_connection *conn); 286void rds_iw_state_change(struct sock *sk); 287int __init rds_iw_listen_init(void); 288void rds_iw_listen_stop(void); 289void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); 290int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, 291 struct rdma_cm_event *event); 292int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id); 293void rds_iw_cm_connect_complete(struct rds_connection *conn, 294 struct rdma_cm_event *event); 295 296 297#define rds_iw_conn_error(conn, fmt...) \ 298 __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt) 299 300/* ib_rdma.c */ 301int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); 302void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); 303void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn); 304void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock); 305static inline void rds_iw_destroy_nodev_conns(void) 306{ 307 __rds_iw_destroy_conns(&iw_nodev_conns, &iw_nodev_conns_lock); 308} 309static inline void rds_iw_destroy_conns(struct rds_iw_device *rds_iwdev) 310{ 311 __rds_iw_destroy_conns(&rds_iwdev->conn_list, &rds_iwdev->spinlock); 312} 313struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *); 314void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo); 315void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *); 316void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, 317 struct rds_sock *rs, u32 *key_ret); 318void rds_iw_sync_mr(void *trans_private, int dir); 319void rds_iw_free_mr(void *trans_private, int invalidate); 320void rds_iw_flush_mrs(void); 321void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); 322 323/* ib_recv.c */ 324int __init rds_iw_recv_init(void); 325void rds_iw_recv_exit(void); 326int rds_iw_recv(struct rds_connection *conn); 327int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 328 gfp_t page_gfp, int prefill); 329void rds_iw_inc_purge(struct rds_incoming *inc); 330void rds_iw_inc_free(struct rds_incoming *inc); 331int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 332 size_t size); 333void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); 334void rds_iw_recv_tasklet_fn(unsigned long data); 335void rds_iw_recv_init_ring(struct rds_iw_connection *ic); 336void rds_iw_recv_clear_ring(struct rds_iw_connection *ic); 337void rds_iw_recv_init_ack(struct rds_iw_connection *ic); 338void rds_iw_attempt_ack(struct rds_iw_connection *ic); 339void rds_iw_ack_send_complete(struct rds_iw_connection *ic); 340u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic); 341 342/* ib_ring.c */ 343void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr); 344void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr); 345u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos); 346void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val); 347void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val); 348int rds_iw_ring_empty(struct rds_iw_work_ring *ring); 349int rds_iw_ring_low(struct rds_iw_work_ring *ring); 350u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring); 351u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest); 352extern wait_queue_head_t rds_iw_ring_empty_wait; 353 354/* ib_send.c */ 355void rds_iw_xmit_complete(struct rds_connection *conn); 356int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, 357 unsigned int hdr_off, unsigned int sg, unsigned int off); 358void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); 359void rds_iw_send_init_ring(struct rds_iw_connection *ic); 360void rds_iw_send_clear_ring(struct rds_iw_connection *ic); 361int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 362void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 363void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 364int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 365 u32 *adv_credits, int need_posted, int max_posted); 366 367/* ib_stats.c */ 368DECLARE_PER_CPU(struct rds_iw_statistics, rds_iw_stats); 369#define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member) 370unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, 371 unsigned int avail); 372 373/* ib_sysctl.c */ 374int __init rds_iw_sysctl_init(void); 375void rds_iw_sysctl_exit(void); 376extern unsigned long rds_iw_sysctl_max_send_wr; 377extern unsigned long rds_iw_sysctl_max_recv_wr; 378extern unsigned long rds_iw_sysctl_max_unsig_wrs; 379extern unsigned long rds_iw_sysctl_max_unsig_bytes; 380extern unsigned long rds_iw_sysctl_max_recv_allocation; 381extern unsigned int rds_iw_sysctl_flow_control; 382extern ctl_table rds_iw_sysctl_table[]; 383 384/* 385 * Helper functions for getting/setting the header and data SGEs in 386 * RDS packets (not RDMA) 387 */ 388static inline struct ib_sge * 389rds_iw_header_sge(struct rds_iw_connection *ic, struct ib_sge *sge) 390{ 391 return &sge[0]; 392} 393 394static inline struct ib_sge * 395rds_iw_data_sge(struct rds_iw_connection *ic, struct ib_sge *sge) 396{ 397 return &sge[1]; 398} 399 400#endif 401