1319974Shselasky#ifndef DEF_RDMAVT_INCQP_H
2319974Shselasky#define DEF_RDMAVT_INCQP_H
3319974Shselasky
4331772Shselasky/*-
5331772Shselasky * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
6331772Shselasky *
7319974Shselasky * Copyright(c) 2016 Intel Corporation.
8319974Shselasky *
9319974Shselasky * This file is provided under a dual BSD/GPLv2 license.  When using or
10319974Shselasky * redistributing this file, you may do so under either license.
11319974Shselasky *
12319974Shselasky * GPL LICENSE SUMMARY
13319974Shselasky *
14319974Shselasky * This program is free software; you can redistribute it and/or modify
15319974Shselasky * it under the terms of version 2 of the GNU General Public License as
16319974Shselasky * published by the Free Software Foundation.
17319974Shselasky *
18319974Shselasky * This program is distributed in the hope that it will be useful, but
19319974Shselasky * WITHOUT ANY WARRANTY; without even the implied warranty of
20319974Shselasky * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21319974Shselasky * General Public License for more details.
22319974Shselasky *
23319974Shselasky * BSD LICENSE
24319974Shselasky *
25319974Shselasky * Redistribution and use in source and binary forms, with or without
26319974Shselasky * modification, are permitted provided that the following conditions
27319974Shselasky * are met:
28319974Shselasky *
29319974Shselasky *  - Redistributions of source code must retain the above copyright
30319974Shselasky *    notice, this list of conditions and the following disclaimer.
31319974Shselasky *  - Redistributions in binary form must reproduce the above copyright
32319974Shselasky *    notice, this list of conditions and the following disclaimer in
33319974Shselasky *    the documentation and/or other materials provided with the
34319974Shselasky *    distribution.
35319974Shselasky *  - Neither the name of Intel Corporation nor the names of its
36319974Shselasky *    contributors may be used to endorse or promote products derived
37319974Shselasky *    from this software without specific prior written permission.
38319974Shselasky *
39319974Shselasky * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40319974Shselasky * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41319974Shselasky * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
42319974Shselasky * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
43319974Shselasky * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
44319974Shselasky * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
45319974Shselasky * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
46319974Shselasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
47319974Shselasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48319974Shselasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49319974Shselasky * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50319974Shselasky *
51331772Shselasky * $FreeBSD: stable/11/sys/ofed/include/rdma/rdmavt_qp.h 331772 2018-03-30 18:17:33Z hselasky $
52319974Shselasky */
53319974Shselasky
54319974Shselasky#include <rdma/rdma_vt.h>
55319974Shselasky#include <rdma/ib_pack.h>
56319974Shselasky#include <rdma/ib_verbs.h>
57319974Shselasky/*
58319974Shselasky * Atomic bit definitions for r_aflags.
59319974Shselasky */
60319974Shselasky#define RVT_R_WRID_VALID        0
61319974Shselasky#define RVT_R_REWIND_SGE        1
62319974Shselasky
63319974Shselasky/*
64319974Shselasky * Bit definitions for r_flags.
65319974Shselasky */
66319974Shselasky#define RVT_R_REUSE_SGE 0x01
67319974Shselasky#define RVT_R_RDMAR_SEQ 0x02
68319974Shselasky#define RVT_R_RSP_NAK   0x04
69319974Shselasky#define RVT_R_RSP_SEND  0x08
70319974Shselasky#define RVT_R_COMM_EST  0x10
71319974Shselasky
72319974Shselasky/*
73319974Shselasky * Bit definitions for s_flags.
74319974Shselasky *
75319974Shselasky * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
76319974Shselasky * RVT_S_BUSY - send tasklet is processing the QP
77319974Shselasky * RVT_S_TIMER - the RC retry timer is active
78319974Shselasky * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
79319974Shselasky * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
80319974Shselasky *                         before processing the next SWQE
81319974Shselasky * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
82319974Shselasky *                         before processing the next SWQE
83319974Shselasky * RVT_S_WAIT_RNR - waiting for RNR timeout
84319974Shselasky * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
85319974Shselasky * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
86319974Shselasky *                  next send completion entry not via send DMA
87319974Shselasky * RVT_S_WAIT_PIO - waiting for a send buffer to be available
88319974Shselasky * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
89319974Shselasky * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
90319974Shselasky * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
91319974Shselasky * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
92319974Shselasky * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
93319974Shselasky * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
94319974Shselasky * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
95319974Shselasky * RVT_S_ECN - a BECN was queued to the send engine
96319974Shselasky */
97319974Shselasky#define RVT_S_SIGNAL_REQ_WR	0x0001
98319974Shselasky#define RVT_S_BUSY		0x0002
99319974Shselasky#define RVT_S_TIMER		0x0004
100319974Shselasky#define RVT_S_RESP_PENDING	0x0008
101319974Shselasky#define RVT_S_ACK_PENDING	0x0010
102319974Shselasky#define RVT_S_WAIT_FENCE	0x0020
103319974Shselasky#define RVT_S_WAIT_RDMAR	0x0040
104319974Shselasky#define RVT_S_WAIT_RNR		0x0080
105319974Shselasky#define RVT_S_WAIT_SSN_CREDIT	0x0100
106319974Shselasky#define RVT_S_WAIT_DMA		0x0200
107319974Shselasky#define RVT_S_WAIT_PIO		0x0400
108319974Shselasky#define RVT_S_WAIT_PIO_DRAIN    0x0800
109319974Shselasky#define RVT_S_WAIT_TX		0x1000
110319974Shselasky#define RVT_S_WAIT_DMA_DESC	0x2000
111319974Shselasky#define RVT_S_WAIT_KMEM		0x4000
112319974Shselasky#define RVT_S_WAIT_PSN		0x8000
113319974Shselasky#define RVT_S_WAIT_ACK		0x10000
114319974Shselasky#define RVT_S_SEND_ONE		0x20000
115319974Shselasky#define RVT_S_UNLIMITED_CREDIT	0x40000
116319974Shselasky#define RVT_S_AHG_VALID		0x80000
117319974Shselasky#define RVT_S_AHG_CLEAR		0x100000
118319974Shselasky#define RVT_S_ECN		0x200000
119319974Shselasky
120319974Shselasky/*
121319974Shselasky * Wait flags that would prevent any packet type from being sent.
122319974Shselasky */
123319974Shselasky#define RVT_S_ANY_WAIT_IO \
124319974Shselasky	(RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
125319974Shselasky	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
126319974Shselasky
127319974Shselasky/*
128319974Shselasky * Wait flags that would prevent send work requests from making progress.
129319974Shselasky */
130319974Shselasky#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
131319974Shselasky	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
132319974Shselasky	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
133319974Shselasky
134319974Shselasky#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
135319974Shselasky
136319974Shselasky/* Number of bits to pay attention to in the opcode for checking qp type */
137319974Shselasky#define RVT_OPCODE_QP_MASK 0xE0
138319974Shselasky
139319974Shselasky/* Flags for checking QP state (see ib_rvt_state_ops[]) */
140319974Shselasky#define RVT_POST_SEND_OK                0x01
141319974Shselasky#define RVT_POST_RECV_OK                0x02
142319974Shselasky#define RVT_PROCESS_RECV_OK             0x04
143319974Shselasky#define RVT_PROCESS_SEND_OK             0x08
144319974Shselasky#define RVT_PROCESS_NEXT_SEND_OK        0x10
145319974Shselasky#define RVT_FLUSH_SEND			0x20
146319974Shselasky#define RVT_FLUSH_RECV			0x40
147319974Shselasky#define RVT_PROCESS_OR_FLUSH_SEND \
148319974Shselasky	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
149319974Shselasky
150319974Shselasky/*
151319974Shselasky * Internal send flags
152319974Shselasky */
153319974Shselasky#define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
154319974Shselasky#define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
155319974Shselasky
156319974Shselasky/*
157319974Shselasky * Send work request queue entry.
158319974Shselasky * The size of the sg_list is determined when the QP is created and stored
159319974Shselasky * in qp->s_max_sge.
160319974Shselasky */
161319974Shselaskystruct rvt_swqe {
162319974Shselasky	union {
163319974Shselasky		struct ib_send_wr wr;   /* don't use wr.sg_list */
164319974Shselasky		struct ib_ud_wr ud_wr;
165319974Shselasky		struct ib_reg_wr reg_wr;
166319974Shselasky		struct ib_rdma_wr rdma_wr;
167319974Shselasky		struct ib_atomic_wr atomic_wr;
168319974Shselasky	};
169319974Shselasky	u32 psn;                /* first packet sequence number */
170319974Shselasky	u32 lpsn;               /* last packet sequence number */
171319974Shselasky	u32 ssn;                /* send sequence number */
172319974Shselasky	u32 length;             /* total length of data in sg_list */
173319974Shselasky	struct rvt_sge sg_list[0];
174319974Shselasky};
175319974Shselasky
176319974Shselasky/*
177319974Shselasky * Receive work request queue entry.
178319974Shselasky * The size of the sg_list is determined when the QP (or SRQ) is created
179319974Shselasky * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
180319974Shselasky */
181319974Shselaskystruct rvt_rwqe {
182319974Shselasky	u64 wr_id;
183319974Shselasky	u8 num_sge;
184319974Shselasky	struct ib_sge sg_list[0];
185319974Shselasky};
186319974Shselasky
187319974Shselasky/*
188319974Shselasky * This structure is used to contain the head pointer, tail pointer,
189319974Shselasky * and receive work queue entries as a single memory allocation so
190319974Shselasky * it can be mmap'ed into user space.
191319974Shselasky * Note that the wq array elements are variable size so you can't
192319974Shselasky * just index into the array to get the N'th element;
193319974Shselasky * use get_rwqe_ptr() instead.
194319974Shselasky */
195319974Shselaskystruct rvt_rwq {
196319974Shselasky	u32 head;               /* new work requests posted to the head */
197319974Shselasky	u32 tail;               /* receives pull requests from here. */
198319974Shselasky	struct rvt_rwqe wq[0];
199319974Shselasky};
200319974Shselasky
201319974Shselaskystruct rvt_rq {
202319974Shselasky	struct rvt_rwq *wq;
203319974Shselasky	u32 size;               /* size of RWQE array */
204319974Shselasky	u8 max_sge;
205319974Shselasky	/* protect changes in this struct */
206319974Shselasky	spinlock_t lock ____cacheline_aligned_in_smp;
207319974Shselasky};
208319974Shselasky
209319974Shselasky/*
210319974Shselasky * This structure is used by rvt_mmap() to validate an offset
211319974Shselasky * when an mmap() request is made.  The vm_area_struct then uses
212319974Shselasky * this as its vm_private_data.
213319974Shselasky */
214319974Shselaskystruct rvt_mmap_info {
215319974Shselasky	struct list_head pending_mmaps;
216319974Shselasky	struct ib_ucontext *context;
217319974Shselasky	void *obj;
218319974Shselasky	__u64 offset;
219319974Shselasky	struct kref ref;
220319974Shselasky	unsigned size;
221319974Shselasky};
222319974Shselasky
223319974Shselasky/*
224319974Shselasky * This structure holds the information that the send tasklet needs
225319974Shselasky * to send a RDMA read response or atomic operation.
226319974Shselasky */
227319974Shselaskystruct rvt_ack_entry {
228319974Shselasky	struct rvt_sge rdma_sge;
229319974Shselasky	u64 atomic_data;
230319974Shselasky	u32 psn;
231319974Shselasky	u32 lpsn;
232319974Shselasky	u8 opcode;
233319974Shselasky	u8 sent;
234319974Shselasky};
235319974Shselasky
236319974Shselasky#define	RC_QP_SCALING_INTERVAL	5
237319974Shselasky
238319974Shselasky#define RVT_OPERATION_PRIV        0x00000001
239319974Shselasky#define RVT_OPERATION_ATOMIC      0x00000002
240319974Shselasky#define RVT_OPERATION_ATOMIC_SGE  0x00000004
241319974Shselasky#define RVT_OPERATION_LOCAL       0x00000008
242319974Shselasky#define RVT_OPERATION_USE_RESERVE 0x00000010
243319974Shselasky
244319974Shselasky#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
245319974Shselasky
246319974Shselasky/**
247319974Shselasky * rvt_operation_params - op table entry
248319974Shselasky * @length - the length to copy into the swqe entry
249319974Shselasky * @qpt_support - a bit mask indicating QP type support
250319974Shselasky * @flags - RVT_OPERATION flags (see above)
251319974Shselasky *
252319974Shselasky * This supports table driven post send so that
253319974Shselasky * the driver can have differing an potentially
254319974Shselasky * different sets of operations.
255319974Shselasky *
256319974Shselasky **/
257319974Shselasky
258319974Shselaskystruct rvt_operation_params {
259319974Shselasky	size_t length;
260319974Shselasky	u32 qpt_support;
261319974Shselasky	u32 flags;
262319974Shselasky};
263319974Shselasky
264319974Shselasky/*
265319974Shselasky * Common variables are protected by both r_rq.lock and s_lock in that order
266319974Shselasky * which only happens in modify_qp() or changing the QP 'state'.
267319974Shselasky */
268319974Shselaskystruct rvt_qp {
269319974Shselasky	struct ib_qp ibqp;
270319974Shselasky	void *priv; /* Driver private data */
271319974Shselasky	/* read mostly fields above and below */
272319974Shselasky	struct ib_ah_attr remote_ah_attr;
273319974Shselasky	struct ib_ah_attr alt_ah_attr;
274319974Shselasky	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
275319974Shselasky	struct rvt_swqe *s_wq;  /* send work queue */
276319974Shselasky	struct rvt_mmap_info *ip;
277319974Shselasky
278319974Shselasky	unsigned long timeout_jiffies;  /* computed from timeout */
279319974Shselasky
280319974Shselasky	enum ib_mtu path_mtu;
281319974Shselasky	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
282319974Shselasky	pid_t pid;		/* pid for user mode QPs */
283319974Shselasky	u32 remote_qpn;
284319974Shselasky	u32 qkey;               /* QKEY for this QP (for UD or RD) */
285319974Shselasky	u32 s_size;             /* send work queue size */
286319974Shselasky	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
287319974Shselasky
288319974Shselasky	u16 pmtu;		/* decoded from path_mtu */
289319974Shselasky	u8 log_pmtu;		/* shift for pmtu */
290319974Shselasky	u8 state;               /* QP state */
291319974Shselasky	u8 allowed_ops;		/* high order bits of allowed opcodes */
292319974Shselasky	u8 qp_access_flags;
293319974Shselasky	u8 alt_timeout;         /* Alternate path timeout for this QP */
294319974Shselasky	u8 timeout;             /* Timeout for this QP */
295319974Shselasky	u8 s_srate;
296319974Shselasky	u8 s_mig_state;
297319974Shselasky	u8 port_num;
298319974Shselasky	u8 s_pkey_index;        /* PKEY index to use */
299319974Shselasky	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
300319974Shselasky	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
301319974Shselasky	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
302319974Shselasky	u8 s_retry_cnt;         /* number of times to retry */
303319974Shselasky	u8 s_rnr_retry_cnt;
304319974Shselasky	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
305319974Shselasky	u8 s_max_sge;           /* size of s_wq->sg_list */
306319974Shselasky	u8 s_draining;
307319974Shselasky
308319974Shselasky	/* start of read/write fields */
309319974Shselasky	atomic_t refcount ____cacheline_aligned_in_smp;
310319974Shselasky	wait_queue_head_t wait;
311319974Shselasky
312319974Shselasky	struct rvt_ack_entry *s_ack_queue;
313319974Shselasky	struct rvt_sge_state s_rdma_read_sge;
314319974Shselasky
315319974Shselasky	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
316319974Shselasky	u32 r_psn;              /* expected rcv packet sequence number */
317319974Shselasky	unsigned long r_aflags;
318319974Shselasky	u64 r_wr_id;            /* ID for current receive WQE */
319319974Shselasky	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
320319974Shselasky	u32 r_len;              /* total length of r_sge */
321319974Shselasky	u32 r_rcv_len;          /* receive data len processed */
322319974Shselasky	u32 r_msn;              /* message sequence number */
323319974Shselasky
324319974Shselasky	u8 r_state;             /* opcode of last packet received */
325319974Shselasky	u8 r_flags;
326319974Shselasky	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
327319974Shselasky
328319974Shselasky	struct list_head rspwait;       /* link for waiting to respond */
329319974Shselasky
330319974Shselasky	struct rvt_sge_state r_sge;     /* current receive data */
331319974Shselasky	struct rvt_rq r_rq;             /* receive work queue */
332319974Shselasky
333319974Shselasky	/* post send line */
334319974Shselasky	spinlock_t s_hlock ____cacheline_aligned_in_smp;
335319974Shselasky	u32 s_head;             /* new entries added here */
336319974Shselasky	u32 s_next_psn;         /* PSN for next request */
337319974Shselasky	u32 s_avail;            /* number of entries avail */
338319974Shselasky	u32 s_ssn;              /* SSN of tail entry */
339319974Shselasky	atomic_t s_reserved_used; /* reserved entries in use */
340319974Shselasky
341319974Shselasky	spinlock_t s_lock ____cacheline_aligned_in_smp;
342319974Shselasky	u32 s_flags;
343319974Shselasky	struct rvt_sge_state *s_cur_sge;
344319974Shselasky	struct rvt_swqe *s_wqe;
345319974Shselasky	struct rvt_sge_state s_sge;     /* current send request data */
346319974Shselasky	struct rvt_mregion *s_rdma_mr;
347319974Shselasky	u32 s_cur_size;         /* size of send packet in bytes */
348319974Shselasky	u32 s_len;              /* total length of s_sge */
349319974Shselasky	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
350319974Shselasky	u32 s_last_psn;         /* last response PSN processed */
351319974Shselasky	u32 s_sending_psn;      /* lowest PSN that is being sent */
352319974Shselasky	u32 s_sending_hpsn;     /* highest PSN that is being sent */
353319974Shselasky	u32 s_psn;              /* current packet sequence number */
354319974Shselasky	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
355319974Shselasky	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
356319974Shselasky	u32 s_tail;             /* next entry to process */
357319974Shselasky	u32 s_cur;              /* current work queue entry */
358319974Shselasky	u32 s_acked;            /* last un-ACK'ed entry */
359319974Shselasky	u32 s_last;             /* last completed entry */
360319974Shselasky	u32 s_lsn;              /* limit sequence number (credit) */
361319974Shselasky	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
362319974Shselasky	u16 s_rdma_ack_cnt;
363319974Shselasky	s8 s_ahgidx;
364319974Shselasky	u8 s_state;             /* opcode of last packet sent */
365319974Shselasky	u8 s_ack_state;         /* opcode of packet to ACK */
366319974Shselasky	u8 s_nak_state;         /* non-zero if NAK is pending */
367319974Shselasky	u8 r_nak_state;         /* non-zero if NAK is pending */
368319974Shselasky	u8 s_retry;             /* requester retry counter */
369319974Shselasky	u8 s_rnr_retry;         /* requester RNR retry counter */
370319974Shselasky	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
371319974Shselasky	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
372319974Shselasky
373319974Shselasky	struct rvt_sge_state s_ack_rdma_sge;
374319974Shselasky	struct timer_list s_timer;
375319974Shselasky
376319974Shselasky	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
377319974Shselasky
378319974Shselasky	/*
379319974Shselasky	 * This sge list MUST be last. Do not add anything below here.
380319974Shselasky	 */
381319974Shselasky	struct rvt_sge r_sg_list[0] /* verified SGEs */
382319974Shselasky		____cacheline_aligned_in_smp;
383319974Shselasky};
384319974Shselasky
385319974Shselaskystruct rvt_srq {
386319974Shselasky	struct ib_srq ibsrq;
387319974Shselasky	struct rvt_rq rq;
388319974Shselasky	struct rvt_mmap_info *ip;
389319974Shselasky	/* send signal when number of RWQEs < limit */
390319974Shselasky	u32 limit;
391319974Shselasky};
392319974Shselasky
393319974Shselasky#define RVT_QPN_MAX                 BIT(24)
394319974Shselasky#define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
395319974Shselasky#define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
396319974Shselasky#define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
397319974Shselasky#define RVT_QPN_MASK		    0xFFFFFF
398319974Shselasky
399319974Shselasky/*
400319974Shselasky * QPN-map pages start out as NULL, they get allocated upon
401319974Shselasky * first use and are never deallocated. This way,
402319974Shselasky * large bitmaps are not allocated unless large numbers of QPs are used.
403319974Shselasky */
404319974Shselaskystruct rvt_qpn_map {
405319974Shselasky	void *page;
406319974Shselasky};
407319974Shselasky
408319974Shselaskystruct rvt_qpn_table {
409319974Shselasky	spinlock_t lock; /* protect changes to the qp table */
410319974Shselasky	unsigned flags;         /* flags for QP0/1 allocated for each port */
411319974Shselasky	u32 last;               /* last QP number allocated */
412319974Shselasky	u32 nmaps;              /* size of the map table */
413319974Shselasky	u16 limit;
414319974Shselasky	u8  incr;
415319974Shselasky	/* bit map of free QP numbers other than 0/1 */
416319974Shselasky	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
417319974Shselasky};
418319974Shselasky
419319974Shselaskystruct rvt_qp_ibdev {
420319974Shselasky	u32 qp_table_size;
421319974Shselasky	u32 qp_table_bits;
422319974Shselasky	struct rvt_qp __rcu **qp_table;
423319974Shselasky	spinlock_t qpt_lock; /* qptable lock */
424319974Shselasky	struct rvt_qpn_table qpn_table;
425319974Shselasky};
426319974Shselasky
427319974Shselasky/*
428319974Shselasky * There is one struct rvt_mcast for each multicast GID.
429319974Shselasky * All attached QPs are then stored as a list of
430319974Shselasky * struct rvt_mcast_qp.
431319974Shselasky */
432319974Shselaskystruct rvt_mcast_qp {
433319974Shselasky	struct list_head list;
434319974Shselasky	struct rvt_qp *qp;
435319974Shselasky};
436319974Shselasky
437319974Shselaskystruct rvt_mcast {
438319974Shselasky	struct rb_node rb_node;
439319974Shselasky	union ib_gid mgid;
440319974Shselasky	struct list_head qp_list;
441319974Shselasky	wait_queue_head_t wait;
442319974Shselasky	atomic_t refcount;
443319974Shselasky	int n_attached;
444319974Shselasky};
445319974Shselasky
446319974Shselasky/*
447319974Shselasky * Since struct rvt_swqe is not a fixed size, we can't simply index into
448319974Shselasky * struct rvt_qp.s_wq.  This function does the array index computation.
449319974Shselasky */
450319974Shselaskystatic inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
451319974Shselasky						unsigned n)
452319974Shselasky{
453319974Shselasky	return (struct rvt_swqe *)((char *)qp->s_wq +
454319974Shselasky				     (sizeof(struct rvt_swqe) +
455319974Shselasky				      qp->s_max_sge *
456319974Shselasky				      sizeof(struct rvt_sge)) * n);
457319974Shselasky}
458319974Shselasky
459319974Shselasky/*
460319974Shselasky * Since struct rvt_rwqe is not a fixed size, we can't simply index into
461319974Shselasky * struct rvt_rwq.wq.  This function does the array index computation.
462319974Shselasky */
463319974Shselaskystatic inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
464319974Shselasky{
465319974Shselasky	return (struct rvt_rwqe *)
466319974Shselasky		((char *)rq->wq->wq +
467319974Shselasky		 (sizeof(struct rvt_rwqe) +
468319974Shselasky		  rq->max_sge * sizeof(struct ib_sge)) * n);
469319974Shselasky}
470319974Shselasky
471319974Shselasky/**
472319974Shselasky * rvt_get_qp - get a QP reference
473319974Shselasky * @qp - the QP to hold
474319974Shselasky */
475319974Shselaskystatic inline void rvt_get_qp(struct rvt_qp *qp)
476319974Shselasky{
477319974Shselasky	atomic_inc(&qp->refcount);
478319974Shselasky}
479319974Shselasky
480319974Shselasky/**
481319974Shselasky * rvt_put_qp - release a QP reference
482319974Shselasky * @qp - the QP to release
483319974Shselasky */
484319974Shselaskystatic inline void rvt_put_qp(struct rvt_qp *qp)
485319974Shselasky{
486319974Shselasky	if (qp && atomic_dec_and_test(&qp->refcount))
487319974Shselasky		wake_up(&qp->wait);
488319974Shselasky}
489319974Shselasky
490319974Shselasky/**
491319974Shselasky * rvt_qp_wqe_reserve - reserve operation
492319974Shselasky * @qp - the rvt qp
493319974Shselasky * @wqe - the send wqe
494319974Shselasky *
495319974Shselasky * This routine used in post send to record
496319974Shselasky * a wqe relative reserved operation use.
497319974Shselasky */
498319974Shselaskystatic inline void rvt_qp_wqe_reserve(
499319974Shselasky	struct rvt_qp *qp,
500319974Shselasky	struct rvt_swqe *wqe)
501319974Shselasky{
502319974Shselasky	wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
503319974Shselasky	atomic_inc(&qp->s_reserved_used);
504319974Shselasky}
505319974Shselasky
506319974Shselasky/**
507319974Shselasky * rvt_qp_wqe_unreserve - clean reserved operation
508319974Shselasky * @qp - the rvt qp
509319974Shselasky * @wqe - the send wqe
510319974Shselasky *
511319974Shselasky * This decrements the reserve use count.
512319974Shselasky *
513319974Shselasky * This call MUST precede the change to
514319974Shselasky * s_last to insure that post send sees a stable
515319974Shselasky * s_avail.
516319974Shselasky *
517319974Shselasky * An smp_mp__after_atomic() is used to insure
518319974Shselasky * the compiler does not juggle the order of the s_last
519319974Shselasky * ring index and the decrementing of s_reserved_used.
520319974Shselasky */
521319974Shselaskystatic inline void rvt_qp_wqe_unreserve(
522319974Shselasky	struct rvt_qp *qp,
523319974Shselasky	struct rvt_swqe *wqe)
524319974Shselasky{
525319974Shselasky	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
526319974Shselasky		wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
527319974Shselasky		atomic_dec(&qp->s_reserved_used);
528319974Shselasky		/* insure no compiler re-order up to s_last change */
529319974Shselasky		smp_mb__after_atomic();
530319974Shselasky	}
531319974Shselasky}
532319974Shselasky
533319974Shselaskyextern const int  ib_rvt_state_ops[];
534319974Shselasky
535319974Shselaskystruct rvt_dev_info;
536319974Shselaskyint rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
537319974Shselasky
538319974Shselasky#endif          /* DEF_RDMAVT_INCQP_H */
539