Lines Matching defs:xprt

12 #include <linux/sunrpc/xprt.h>
22 unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
31 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
33 return xprt->bc_alloc_count < xprt->bc_alloc_max;
75 static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
85 req->rq_xprt = xprt;
123 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
125 if (!xprt->ops->bc_setup)
127 return xprt->ops->bc_setup(xprt, min_reqs);
131 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
153 req = xprt_alloc_bc_req(xprt);
167 spin_lock(&xprt->bc_pa_lock);
168 list_splice(&tmp_list, &xprt->bc_pa_list);
169 xprt->bc_alloc_count += min_reqs;
170 xprt->bc_alloc_max += min_reqs;
171 atomic_add(min_reqs, &xprt->bc_slot_count);
172 spin_unlock(&xprt->bc_pa_lock);
195 * @xprt: the transport holding the preallocated strucures
202 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
204 if (xprt->ops->bc_destroy)
205 xprt->ops->bc_destroy(xprt, max_reqs);
209 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
218 spin_lock_bh(&xprt->bc_pa_lock);
219 xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
220 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
224 xprt->bc_alloc_count--;
225 atomic_dec(&xprt->bc_slot_count);
229 spin_unlock_bh(&xprt->bc_pa_lock);
233 list_empty(&xprt->bc_pa_list) ? "true" : "false");
236 static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
242 if (list_empty(&xprt->bc_pa_list)) {
245 if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
247 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
248 xprt->bc_alloc_count++;
249 atomic_inc(&xprt->bc_slot_count);
251 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
257 req->rq_connect_cookie = xprt->connect_cookie;
269 struct rpc_xprt *xprt = req->rq_xprt;
271 xprt->ops->bc_free_rqst(req);
276 struct rpc_xprt *xprt = req->rq_xprt;
280 req->rq_connect_cookie = xprt->connect_cookie - 1;
289 spin_lock_bh(&xprt->bc_pa_lock);
290 if (xprt_need_to_requeue(xprt)) {
294 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
295 xprt->bc_alloc_count++;
296 atomic_inc(&xprt->bc_slot_count);
299 spin_unlock_bh(&xprt->bc_pa_lock);
310 xprt_put(xprt);
324 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
329 spin_lock(&xprt->bc_pa_lock);
330 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
331 if (req->rq_connect_cookie != xprt->connect_cookie)
336 req = xprt_get_bc_request(xprt, xid, new);
338 spin_unlock(&xprt->bc_pa_lock);
345 new = xprt_alloc_bc_req(xprt);
356 struct rpc_xprt *xprt = req->rq_xprt;
357 struct svc_serv *bc_serv = xprt->bc_serv;
359 spin_lock(&xprt->bc_pa_lock);
361 xprt->bc_alloc_count--;
362 spin_unlock(&xprt->bc_pa_lock);
368 xprt_get(xprt);