• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/s390/scsi/
1/*
2 * zfcp device driver
3 *
4 * Header file for zfcp qdio interface
5 *
6 * Copyright IBM Corporation 2010
7 */
8
9#ifndef ZFCP_QDIO_H
10#define ZFCP_QDIO_H
11
12#include <asm/qdio.h>
13
14#define ZFCP_QDIO_SBALE_LEN	PAGE_SIZE
15
16#define ZFCP_QDIO_MAX_SBALES_PER_SBAL	(QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
17
18#define ZFCP_QDIO_LAST_SBALE_PER_SBAL	(ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
19
20/* Max SBALS for chaining */
21#define ZFCP_QDIO_MAX_SBALS_PER_REQ	36
22
23/* max. number of (data buffer) SBALEs in largest SBAL chain
24 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain   */
25#define ZFCP_QDIO_MAX_SBALES_PER_REQ     \
26	(ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
27
28/**
29 * struct zfcp_qdio - basic qdio data structure
30 * @res_q: response queue
31 * @req_q: request queue
32 * @req_q_idx: index of next free buffer
33 * @req_q_free: number of free buffers in queue
34 * @stat_lock: lock to protect req_q_util and req_q_time
35 * @req_q_lock: lock to serialize access to request queue
36 * @req_q_time: time of last fill level change
37 * @req_q_util: used for accounting
38 * @req_q_full: queue full incidents
39 * @req_q_wq: used to wait for SBAL availability
40 * @adapter: adapter used in conjunction with this qdio structure
41 */
42struct zfcp_qdio {
43	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
44	struct qdio_buffer	*req_q[QDIO_MAX_BUFFERS_PER_Q];
45	u8			req_q_idx;
46	atomic_t		req_q_free;
47	spinlock_t		stat_lock;
48	spinlock_t		req_q_lock;
49	unsigned long long	req_q_time;
50	u64			req_q_util;
51	atomic_t		req_q_full;
52	wait_queue_head_t	req_q_wq;
53	struct zfcp_adapter	*adapter;
54};
55
56/**
57 * struct zfcp_qdio_req - qdio queue related values for a request
58 * @sbtype: sbal type flags for sbale 0
59 * @sbal_number: number of free sbals
60 * @sbal_first: first sbal for this request
61 * @sbal_last: last sbal for this request
62 * @sbal_limit: last possible sbal for this request
63 * @sbale_curr: current sbale at creation of this request
64 * @sbal_response: sbal used in interrupt
65 * @qdio_outb_usage: usage of outbound queue
66 */
67struct zfcp_qdio_req {
68	u32	sbtype;
69	u8	sbal_number;
70	u8	sbal_first;
71	u8	sbal_last;
72	u8	sbal_limit;
73	u8	sbale_curr;
74	u8	sbal_response;
75	u16	qdio_outb_usage;
76};
77
78/**
79 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
80 * @qdio: pointer to struct zfcp_qdio
81 * @q_rec: pointer to struct zfcp_qdio_req
82 * Returns: pointer to qdio_buffer_element (sbale) structure
83 */
84static inline struct qdio_buffer_element *
85zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
86{
87	return &qdio->req_q[q_req->sbal_last]->element[0];
88}
89
90/**
91 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
92 * @qdio: pointer to struct zfcp_qdio
93 * @fsf_req: pointer to struct zfcp_fsf_req
94 * Returns: pointer to qdio_buffer_element (sbale) structure
95 */
96static inline struct qdio_buffer_element *
97zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
98{
99	return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
100}
101
102/**
103 * zfcp_qdio_req_init - initialize qdio request
104 * @qdio: request queue where to start putting the request
105 * @q_req: the qdio request to start
106 * @req_id: The request id
107 * @sbtype: type flags to set for all sbals
108 * @data: First data block
109 * @len: Length of first data block
110 *
111 * This is the start of putting the request into the queue, the last
112 * step is passing the request to zfcp_qdio_send. The request queue
113 * lock must be held during the whole process from init to send.
114 */
115static inline
116void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
117			unsigned long req_id, u32 sbtype, void *data, u32 len)
118{
119	struct qdio_buffer_element *sbale;
120	int count = min(atomic_read(&qdio->req_q_free),
121			ZFCP_QDIO_MAX_SBALS_PER_REQ);
122
123	q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
124	q_req->sbal_number = 1;
125	q_req->sbtype = sbtype;
126	q_req->sbale_curr = 1;
127	q_req->sbal_limit = (q_req->sbal_first + count - 1)
128					% QDIO_MAX_BUFFERS_PER_Q;
129
130	sbale = zfcp_qdio_sbale_req(qdio, q_req);
131	sbale->addr = (void *) req_id;
132	sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
133
134	if (unlikely(!data))
135		return;
136	sbale++;
137	sbale->addr = data;
138	sbale->length = len;
139}
140
141/**
142 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
143 * @qdio: pointer to struct zfcp_qdio
144 * @q_req: pointer to struct zfcp_queue_req
145 *
146 * This is only required for single sbal requests, calling it when
147 * wrapping around to the next sbal is a bug.
148 */
149static inline
150void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
151			 void *data, u32 len)
152{
153	struct qdio_buffer_element *sbale;
154
155	BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
156	q_req->sbale_curr++;
157	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
158	sbale->addr = data;
159	sbale->length = len;
160}
161
162/**
163 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
164 * @qdio: pointer to struct zfcp_qdio
165 * @q_req: pointer to struct zfcp_queue_req
166 */
167static inline
168void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
169			      struct zfcp_qdio_req *q_req)
170{
171	struct qdio_buffer_element *sbale;
172
173	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
174	sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
175}
176
177/**
178 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
179 * @sg: The scatterlist where to check the data size
180 *
181 * Returns: 1 when one sbale is enough for the data in the scatterlist,
182 *	    0 if not.
183 */
184static inline
185int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
186{
187	return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
188}
189
190/**
191 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
192 * @q_req: The current zfcp_qdio_req
193 */
194static inline
195void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
196{
197	q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
198}
199
200/**
201 * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
202 * @qdio: pointer to struct zfcp_qdio
203 * @q_req: The current zfcp_qdio_req
204 * @max_sbals: maximum number of SBALs allowed
205 */
206static inline
207void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
208			  struct zfcp_qdio_req *q_req, int max_sbals)
209{
210	int count = min(atomic_read(&qdio->req_q_free), max_sbals);
211
212	q_req->sbal_limit = (q_req->sbal_first + count - 1) %
213				QDIO_MAX_BUFFERS_PER_Q;
214}
215
216/**
217 * zfcp_qdio_set_data_div - set data division count
218 * @qdio: pointer to struct zfcp_qdio
219 * @q_req: The current zfcp_qdio_req
220 * @count: The data division count
221 */
222static inline
223void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
224			    struct zfcp_qdio_req *q_req, u32 count)
225{
226	struct qdio_buffer_element *sbale;
227
228	sbale = &qdio->req_q[q_req->sbal_first]->element[0];
229	sbale->length = count;
230}
231
232#endif /* ZFCP_QDIO_H */
233