Lines Matching defs:xprt

148 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
154 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
156 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
160 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
166 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
168 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
172 xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
178 xprt_rdma_format_addresses4(xprt, sap);
181 xprt_rdma_format_addresses6(xprt, sap);
189 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
192 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
195 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
197 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
201 xprt_rdma_free_addresses(struct rpc_xprt *xprt)
211 kfree(xprt->address_strings[i]);
219 * Requester holds the xprt's send lock to prevent activity on this
221 * sleep on the xprt's pending queue waiting for connect to complete.
228 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
232 if (atomic_read(&xprt->swapper))
235 xprt_clear_connecting(xprt);
237 xprt->connect_cookie++;
238 xprt->stat.connect_count++;
239 xprt->stat.connect_time += (long)jiffies -
240 xprt->stat.connect_start;
241 xprt_set_connected(xprt);
245 xprt_unlock_connect(xprt, r_xprt);
246 xprt_wake_pending_tasks(xprt, rc);
252 * @xprt: transport context
254 * If @xprt is connected, disconnect it to simulate spurious
255 * connection loss. Caller must hold @xprt's send lock to
260 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
262 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
270 * @xprt: doomed transport context
273 * this @xprt.
276 xprt_rdma_destroy(struct rpc_xprt *xprt)
278 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
285 xprt_rdma_free_addresses(xprt);
286 xprt_free(xprt);
305 struct rpc_xprt *xprt;
310 if (args->addrlen > sizeof(xprt->addr))
316 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
318 if (!xprt) {
323 xprt->timeout = &xprt_rdma_default_timeout;
324 xprt->connect_timeout = xprt->timeout->to_initval;
325 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
326 xprt->bind_timeout = RPCRDMA_BIND_TO;
327 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
328 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
330 xprt->resvport = 0; /* privileged port not needed */
331 xprt->ops = &xprt_rdma_procs;
338 /* Ensure xprt->addr holds valid server TCP (not RDMA)
340 xprt->prot = IPPROTO_TCP;
341 xprt->xprt_class = &xprt_rdma;
342 xprt->addrlen = args->addrlen;
343 memcpy(&xprt->addr, sap, xprt->addrlen);
346 xprt_set_bound(xprt);
347 xprt_rdma_format_addresses(xprt, sap);
349 new_xprt = rpcx_to_rdmax(xprt);
352 xprt_rdma_free_addresses(xprt);
353 xprt_free(xprt);
361 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
363 return xprt;
368 * @xprt: transport context
372 * Caller holds @xprt's send lock to prevent activity on this
375 void xprt_rdma_close(struct rpc_xprt *xprt)
377 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
381 xprt->reestablish_timeout = 0;
382 ++xprt->connect_cookie;
383 xprt_disconnect_done(xprt);
388 * @xprt: controlling RPC transport
394 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
396 struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
401 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
403 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
405 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
407 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
412 * @xprt: controlling RPC transport
424 xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
426 xprt_force_disconnect(xprt);
431 * @xprt: controlling transport instance
436 static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
440 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
444 spin_lock(&xprt->transport_lock);
446 if (connect_timeout < xprt->connect_timeout) {
450 to = *xprt->timeout;
457 xprt->timeout = &r_xprt->rx_timeout;
458 xprt->connect_timeout = connect_timeout;
461 if (reconnect_timeout < xprt->max_reconnect_timeout)
462 xprt->max_reconnect_timeout = reconnect_timeout;
464 spin_unlock(&xprt->transport_lock);
469 * @xprt: transport state
474 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
476 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
480 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
484 delay = xprt_reconnect_delay(xprt);
485 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
493 * @xprt: controlling RPC transport
501 xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
503 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
515 xprt_add_backlog(xprt, task);
520 * @xprt: controlling RPC transport
525 xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
528 container_of(xprt, struct rpcrdma_xprt, rx_xprt);
531 if (!xprt_wake_up_backlog(xprt, rqst)) {
624 struct rpc_xprt *xprt = rqst->rq_xprt;
626 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
634 if (!xprt_connected(xprt))
637 if (!xprt_request_get_cong(xprt, rqst))
645 if (rqst->rq_connect_cookie == xprt->connect_cookie)
665 xprt_rdma_close(xprt);
669 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
671 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
674 if (xprt_connected(xprt))
675 idle_time = (long)(jiffies - xprt->last_used) / HZ;
680 xprt->stat.bind_count,
681 xprt->stat.connect_count,
682 xprt->stat.connect_time / HZ,
684 xprt->stat.sends,
685 xprt->stat.recvs,
686 xprt->stat.bad_xids,
687 xprt->stat.req_u,
688 xprt->stat.bklog_u);
711 xprt_rdma_enable_swap(struct rpc_xprt *xprt)
717 xprt_rdma_disable_swap(struct rpc_xprt *xprt)
727 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */