1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015-2020, Oracle and/or its affiliates.
4 *
5 * Support for reverse-direction RPCs on RPC/RDMA.
6 */
7
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
10#include <linux/sunrpc/svc_xprt.h>
11#include <linux/sunrpc/svc_rdma.h>
12
13#include "xprt_rdma.h"
14#include <trace/events/rpcrdma.h>
15
16#undef RPCRDMA_BACKCHANNEL_DEBUG
17
18/**
19 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
20 * @xprt: transport associated with these backchannel resources
21 * @reqs: number of concurrent incoming requests to expect
22 *
23 * Returns 0 on success; otherwise a negative errno
24 */
25int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
26{
27	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
28
29	r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
30	trace_xprtrdma_cb_setup(r_xprt, reqs);
31	return 0;
32}
33
34/**
35 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
36 * @xprt: transport
37 *
38 * Returns maximum size, in bytes, of a backchannel message
39 */
40size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
41{
42	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
43	struct rpcrdma_ep *ep = r_xprt->rx_ep;
44	size_t maxmsg;
45
46	maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
47	maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
48	return maxmsg - RPCRDMA_HDRLEN_MIN;
49}
50
51unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
52{
53	return RPCRDMA_BACKWARD_WRS >> 1;
54}
55
56static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
57{
58	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
59	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
60	__be32 *p;
61
62	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
63	xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
64			rdmab_data(req->rl_rdmabuf), rqst);
65
66	p = xdr_reserve_space(&req->rl_stream, 28);
67	if (unlikely(!p))
68		return -EIO;
69	*p++ = rqst->rq_xid;
70	*p++ = rpcrdma_version;
71	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
72	*p++ = rdma_msg;
73	*p++ = xdr_zero;
74	*p++ = xdr_zero;
75	*p = xdr_zero;
76
77	if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
78				      &rqst->rq_snd_buf, rpcrdma_noch_pullup))
79		return -EIO;
80
81	trace_xprtrdma_cb_reply(r_xprt, rqst);
82	return 0;
83}
84
85/**
86 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
87 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
88 *
89 * Caller holds the transport's write lock.
90 *
91 * Returns:
92 *	%0 if the RPC message has been sent
93 *	%-ENOTCONN if the caller should reconnect and call again
94 *	%-EIO if a permanent error occurred and the request was not
95 *		sent. Do not try to send this message again.
96 */
97int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
98{
99	struct rpc_xprt *xprt = rqst->rq_xprt;
100	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
101	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
102	int rc;
103
104	if (!xprt_connected(xprt))
105		return -ENOTCONN;
106
107	if (!xprt_request_get_cong(xprt, rqst))
108		return -EBADSLT;
109
110	rc = rpcrdma_bc_marshal_reply(rqst);
111	if (rc < 0)
112		goto failed_marshal;
113
114	if (frwr_send(r_xprt, req))
115		goto drop_connection;
116	return 0;
117
118failed_marshal:
119	if (rc != -ENOTCONN)
120		return rc;
121drop_connection:
122	xprt_rdma_close(xprt);
123	return -ENOTCONN;
124}
125
126/**
127 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
128 * @xprt: transport associated with these backchannel resources
129 * @reqs: number of incoming requests to destroy; ignored
130 */
131void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
132{
133	struct rpc_rqst *rqst, *tmp;
134
135	spin_lock(&xprt->bc_pa_lock);
136	list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
137		list_del(&rqst->rq_bc_pa_list);
138		spin_unlock(&xprt->bc_pa_lock);
139
140		rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
141
142		spin_lock(&xprt->bc_pa_lock);
143	}
144	spin_unlock(&xprt->bc_pa_lock);
145}
146
147/**
148 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
149 * @rqst: request to release
150 */
151void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
152{
153	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
154	struct rpcrdma_rep *rep = req->rl_reply;
155	struct rpc_xprt *xprt = rqst->rq_xprt;
156	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
157
158	rpcrdma_rep_put(&r_xprt->rx_buf, rep);
159	req->rl_reply = NULL;
160
161	spin_lock(&xprt->bc_pa_lock);
162	list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
163	spin_unlock(&xprt->bc_pa_lock);
164	xprt_put(xprt);
165}
166
167static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
168{
169	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
170	struct rpcrdma_req *req;
171	struct rpc_rqst *rqst;
172	size_t size;
173
174	spin_lock(&xprt->bc_pa_lock);
175	rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
176					rq_bc_pa_list);
177	if (!rqst)
178		goto create_req;
179	list_del(&rqst->rq_bc_pa_list);
180	spin_unlock(&xprt->bc_pa_lock);
181	return rqst;
182
183create_req:
184	spin_unlock(&xprt->bc_pa_lock);
185
186	/* Set a limit to prevent a remote from overrunning our resources.
187	 */
188	if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
189		return NULL;
190
191	size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
192	req = rpcrdma_req_create(r_xprt, size);
193	if (!req)
194		return NULL;
195	if (rpcrdma_req_setup(r_xprt, req)) {
196		rpcrdma_req_destroy(req);
197		return NULL;
198	}
199
200	xprt->bc_alloc_count++;
201	rqst = &req->rl_slot;
202	rqst->rq_xprt = xprt;
203	__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
204	xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
205	return rqst;
206}
207
208/**
209 * rpcrdma_bc_receive_call - Handle a reverse-direction Call
210 * @r_xprt: transport receiving the call
211 * @rep: receive buffer containing the call
212 *
213 * Operational assumptions:
214 *    o Backchannel credits are ignored, just as the NFS server
215 *      forechannel currently does
216 *    o The ULP manages a replay cache (eg, NFSv4.1 sessions).
217 *      No replay detection is done at the transport level
218 */
219void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
220			     struct rpcrdma_rep *rep)
221{
222	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
223	struct svc_serv *bc_serv;
224	struct rpcrdma_req *req;
225	struct rpc_rqst *rqst;
226	struct xdr_buf *buf;
227	size_t size;
228	__be32 *p;
229
230	p = xdr_inline_decode(&rep->rr_stream, 0);
231	size = xdr_stream_remaining(&rep->rr_stream);
232
233#ifdef RPCRDMA_BACKCHANNEL_DEBUG
234	pr_info("RPC:       %s: callback XID %08x, length=%u\n",
235		__func__, be32_to_cpup(p), size);
236	pr_info("RPC:       %s: %*ph\n", __func__, size, p);
237#endif
238
239	rqst = rpcrdma_bc_rqst_get(r_xprt);
240	if (!rqst)
241		goto out_overflow;
242
243	rqst->rq_reply_bytes_recvd = 0;
244	rqst->rq_xid = *p;
245
246	rqst->rq_private_buf.len = size;
247
248	buf = &rqst->rq_rcv_buf;
249	memset(buf, 0, sizeof(*buf));
250	buf->head[0].iov_base = p;
251	buf->head[0].iov_len = size;
252	buf->len = size;
253
254	/* The receive buffer has to be hooked to the rpcrdma_req
255	 * so that it is not released while the req is pointing
256	 * to its buffer, and so that it can be reposted after
257	 * the Upper Layer is done decoding it.
258	 */
259	req = rpcr_to_rdmar(rqst);
260	req->rl_reply = rep;
261	trace_xprtrdma_cb_call(r_xprt, rqst);
262
263	/* Queue rqst for ULP's callback service */
264	bc_serv = xprt->bc_serv;
265	xprt_get(xprt);
266	lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
267
268	svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
269
270	r_xprt->rx_stats.bcall_count++;
271	return;
272
273out_overflow:
274	pr_warn("RPC/RDMA backchannel overflow\n");
275	xprt_force_disconnect(xprt);
276	/* This receive buffer gets reposted automatically
277	 * when the connection is re-established.
278	 */
279	return;
280}
281