1/* Maintain an RxRPC server socket to do AFS communications through
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <net/sock.h>
13#include <net/af_rxrpc.h>
14#include <rxrpc/packet.h>
15#include "internal.h"
16#include "afs_cm.h"
17
18static struct socket *afs_socket; /* my RxRPC socket */
19static struct workqueue_struct *afs_async_calls;
20static atomic_t afs_outstanding_calls;
21static atomic_t afs_outstanding_skbs;
22
23static void afs_wake_up_call_waiter(struct afs_call *);
24static int afs_wait_for_call_to_complete(struct afs_call *);
25static void afs_wake_up_async_call(struct afs_call *);
26static int afs_dont_wait_for_call_to_complete(struct afs_call *);
27static void afs_process_async_call(struct work_struct *);
28static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
29static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
30
31/* synchronous call management */
32const struct afs_wait_mode afs_sync_call = {
33	.rx_wakeup	= afs_wake_up_call_waiter,
34	.wait		= afs_wait_for_call_to_complete,
35};
36
37/* asynchronous call management */
38const struct afs_wait_mode afs_async_call = {
39	.rx_wakeup	= afs_wake_up_async_call,
40	.wait		= afs_dont_wait_for_call_to_complete,
41};
42
43/* asynchronous incoming call management */
44static const struct afs_wait_mode afs_async_incoming_call = {
45	.rx_wakeup	= afs_wake_up_async_call,
46};
47
48/* asynchronous incoming call initial processing */
49static const struct afs_call_type afs_RXCMxxxx = {
50	.name		= "CB.xxxx",
51	.deliver	= afs_deliver_cm_op_id,
52	.abort_to_error	= afs_abort_to_error,
53};
54
55static void afs_collect_incoming_call(struct work_struct *);
56
57static struct sk_buff_head afs_incoming_calls;
58static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
59
60/*
61 * open an RxRPC socket and bind it to be a server for callback notifications
62 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
63 */
64int afs_open_socket(void)
65{
66	struct sockaddr_rxrpc srx;
67	struct socket *socket;
68	int ret;
69
70	_enter("");
71
72	skb_queue_head_init(&afs_incoming_calls);
73
74	afs_async_calls = create_singlethread_workqueue("kafsd");
75	if (!afs_async_calls) {
76		_leave(" = -ENOMEM [wq]");
77		return -ENOMEM;
78	}
79
80	ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
81	if (ret < 0) {
82		destroy_workqueue(afs_async_calls);
83		_leave(" = %d [socket]", ret);
84		return ret;
85	}
86
87	socket->sk->sk_allocation = GFP_NOFS;
88
89	/* bind the callback manager's address to make this a server socket */
90	srx.srx_family			= AF_RXRPC;
91	srx.srx_service			= CM_SERVICE;
92	srx.transport_type		= SOCK_DGRAM;
93	srx.transport_len		= sizeof(srx.transport.sin);
94	srx.transport.sin.sin_family	= AF_INET;
95	srx.transport.sin.sin_port	= htons(AFS_CM_PORT);
96	memset(&srx.transport.sin.sin_addr, 0,
97	       sizeof(srx.transport.sin.sin_addr));
98
99	ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
100	if (ret < 0) {
101		sock_release(socket);
102		_leave(" = %d [bind]", ret);
103		return ret;
104	}
105
106	rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
107
108	afs_socket = socket;
109	_leave(" = 0");
110	return 0;
111}
112
113/*
114 * close the RxRPC socket AFS was using
115 */
116void afs_close_socket(void)
117{
118	_enter("");
119
120	sock_release(afs_socket);
121
122	_debug("dework");
123	destroy_workqueue(afs_async_calls);
124
125	ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
126	ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
127	_leave("");
128}
129
130/*
131 * note that the data in a socket buffer is now delivered and that the buffer
132 * should be freed
133 */
134static void afs_data_delivered(struct sk_buff *skb)
135{
136	if (!skb) {
137		_debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
138		dump_stack();
139	} else {
140		_debug("DLVR %p{%u} [%d]",
141		       skb, skb->mark, atomic_read(&afs_outstanding_skbs));
142		if (atomic_dec_return(&afs_outstanding_skbs) == -1)
143			BUG();
144		rxrpc_kernel_data_delivered(skb);
145	}
146}
147
148/*
149 * free a socket buffer
150 */
151static void afs_free_skb(struct sk_buff *skb)
152{
153	if (!skb) {
154		_debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs));
155		dump_stack();
156	} else {
157		_debug("FREE %p{%u} [%d]",
158		       skb, skb->mark, atomic_read(&afs_outstanding_skbs));
159		if (atomic_dec_return(&afs_outstanding_skbs) == -1)
160			BUG();
161		rxrpc_kernel_free_skb(skb);
162	}
163}
164
165/*
166 * free a call
167 */
168static void afs_free_call(struct afs_call *call)
169{
170	_debug("DONE %p{%s} [%d]",
171	       call, call->type->name, atomic_read(&afs_outstanding_calls));
172	if (atomic_dec_return(&afs_outstanding_calls) == -1)
173		BUG();
174
175	ASSERTCMP(call->rxcall, ==, NULL);
176	ASSERT(!work_pending(&call->async_work));
177	ASSERT(skb_queue_empty(&call->rx_queue));
178	ASSERT(call->type->name != NULL);
179
180	kfree(call->request);
181	kfree(call);
182}
183
184/*
185 * allocate a call with flat request and reply buffers
186 */
187struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
188				     size_t request_size, size_t reply_size)
189{
190	struct afs_call *call;
191
192	call = kzalloc(sizeof(*call), GFP_NOFS);
193	if (!call)
194		goto nomem_call;
195
196	_debug("CALL %p{%s} [%d]",
197	       call, type->name, atomic_read(&afs_outstanding_calls));
198	atomic_inc(&afs_outstanding_calls);
199
200	call->type = type;
201	call->request_size = request_size;
202	call->reply_max = reply_size;
203
204	if (request_size) {
205		call->request = kmalloc(request_size, GFP_NOFS);
206		if (!call->request)
207			goto nomem_free;
208	}
209
210	if (reply_size) {
211		call->buffer = kmalloc(reply_size, GFP_NOFS);
212		if (!call->buffer)
213			goto nomem_free;
214	}
215
216	init_waitqueue_head(&call->waitq);
217	skb_queue_head_init(&call->rx_queue);
218	return call;
219
220nomem_free:
221	afs_free_call(call);
222nomem_call:
223	return NULL;
224}
225
226/*
227 * clean up a call with flat buffer
228 */
229void afs_flat_call_destructor(struct afs_call *call)
230{
231	_enter("");
232
233	kfree(call->request);
234	call->request = NULL;
235	kfree(call->buffer);
236	call->buffer = NULL;
237}
238
239/*
240 * attach the data from a bunch of pages on an inode to a call
241 */
242int afs_send_pages(struct afs_call *call, struct msghdr *msg, struct kvec *iov)
243{
244	struct page *pages[8];
245	unsigned count, n, loop, offset, to;
246	pgoff_t first = call->first, last = call->last;
247	int ret;
248
249	_enter("");
250
251	offset = call->first_offset;
252	call->first_offset = 0;
253
254	do {
255		_debug("attach %lx-%lx", first, last);
256
257		count = last - first + 1;
258		if (count > ARRAY_SIZE(pages))
259			count = ARRAY_SIZE(pages);
260		n = find_get_pages_contig(call->mapping, first, count, pages);
261		ASSERTCMP(n, ==, count);
262
263		loop = 0;
264		do {
265			msg->msg_flags = 0;
266			to = PAGE_SIZE;
267			if (first + loop >= last)
268				to = call->last_to;
269			else
270				msg->msg_flags = MSG_MORE;
271			iov->iov_base = kmap(pages[loop]) + offset;
272			iov->iov_len = to - offset;
273			offset = 0;
274
275			_debug("- range %u-%u%s",
276			       offset, to, msg->msg_flags ? " [more]" : "");
277			msg->msg_iov = (struct iovec *) iov;
278			msg->msg_iovlen = 1;
279
280			/* have to change the state *before* sending the last
281			 * packet as RxRPC might give us the reply before it
282			 * returns from sending the request */
283			if (first + loop >= last)
284				call->state = AFS_CALL_AWAIT_REPLY;
285			ret = rxrpc_kernel_send_data(call->rxcall, msg,
286						     to - offset);
287			kunmap(pages[loop]);
288			if (ret < 0)
289				break;
290		} while (++loop < count);
291		first += count;
292
293		for (loop = 0; loop < count; loop++)
294			put_page(pages[loop]);
295		if (ret < 0)
296			break;
297	} while (first <= last);
298
299	_leave(" = %d", ret);
300	return ret;
301}
302
303/*
304 * initiate a call
305 */
306int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
307		  const struct afs_wait_mode *wait_mode)
308{
309	struct sockaddr_rxrpc srx;
310	struct rxrpc_call *rxcall;
311	struct msghdr msg;
312	struct kvec iov[1];
313	int ret;
314
315	_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
316
317	ASSERT(call->type != NULL);
318	ASSERT(call->type->name != NULL);
319
320	_debug("____MAKE %p{%s,%x} [%d]____",
321	       call, call->type->name, key_serial(call->key),
322	       atomic_read(&afs_outstanding_calls));
323
324	call->wait_mode = wait_mode;
325	INIT_WORK(&call->async_work, afs_process_async_call);
326
327	memset(&srx, 0, sizeof(srx));
328	srx.srx_family = AF_RXRPC;
329	srx.srx_service = call->service_id;
330	srx.transport_type = SOCK_DGRAM;
331	srx.transport_len = sizeof(srx.transport.sin);
332	srx.transport.sin.sin_family = AF_INET;
333	srx.transport.sin.sin_port = call->port;
334	memcpy(&srx.transport.sin.sin_addr, addr, 4);
335
336	/* create a call */
337	rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
338					 (unsigned long) call, gfp);
339	call->key = NULL;
340	if (IS_ERR(rxcall)) {
341		ret = PTR_ERR(rxcall);
342		goto error_kill_call;
343	}
344
345	call->rxcall = rxcall;
346
347	/* send the request */
348	iov[0].iov_base	= call->request;
349	iov[0].iov_len	= call->request_size;
350
351	msg.msg_name		= NULL;
352	msg.msg_namelen		= 0;
353	msg.msg_iov		= (struct iovec *) iov;
354	msg.msg_iovlen		= 1;
355	msg.msg_control		= NULL;
356	msg.msg_controllen	= 0;
357	msg.msg_flags		= (call->send_pages ? MSG_MORE : 0);
358
359	/* have to change the state *before* sending the last packet as RxRPC
360	 * might give us the reply before it returns from sending the
361	 * request */
362	if (!call->send_pages)
363		call->state = AFS_CALL_AWAIT_REPLY;
364	ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
365	if (ret < 0)
366		goto error_do_abort;
367
368	if (call->send_pages) {
369		ret = afs_send_pages(call, &msg, iov);
370		if (ret < 0)
371			goto error_do_abort;
372	}
373
374	/* at this point, an async call may no longer exist as it may have
375	 * already completed */
376	return wait_mode->wait(call);
377
378error_do_abort:
379	rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
380	rxrpc_kernel_end_call(rxcall);
381	call->rxcall = NULL;
382error_kill_call:
383	call->type->destructor(call);
384	afs_free_call(call);
385	_leave(" = %d", ret);
386	return ret;
387}
388
389/*
390 * handles intercepted messages that were arriving in the socket's Rx queue
391 * - called with the socket receive queue lock held to ensure message ordering
392 * - called with softirqs disabled
393 */
394static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
395			       struct sk_buff *skb)
396{
397	struct afs_call *call = (struct afs_call *) user_call_ID;
398
399	_enter("%p,,%u", call, skb->mark);
400
401	_debug("ICPT %p{%u} [%d]",
402	       skb, skb->mark, atomic_read(&afs_outstanding_skbs));
403
404	ASSERTCMP(sk, ==, afs_socket->sk);
405	atomic_inc(&afs_outstanding_skbs);
406
407	if (!call) {
408		/* its an incoming call for our callback service */
409		skb_queue_tail(&afs_incoming_calls, skb);
410		schedule_work(&afs_collect_incoming_call_work);
411	} else {
412		/* route the messages directly to the appropriate call */
413		skb_queue_tail(&call->rx_queue, skb);
414		call->wait_mode->rx_wakeup(call);
415	}
416
417	_leave("");
418}
419
420/*
421 * deliver messages to a call
422 */
423static void afs_deliver_to_call(struct afs_call *call)
424{
425	struct sk_buff *skb;
426	bool last;
427	u32 abort_code;
428	int ret;
429
430	_enter("");
431
432	while ((call->state == AFS_CALL_AWAIT_REPLY ||
433		call->state == AFS_CALL_AWAIT_OP_ID ||
434		call->state == AFS_CALL_AWAIT_REQUEST ||
435		call->state == AFS_CALL_AWAIT_ACK) &&
436	       (skb = skb_dequeue(&call->rx_queue))) {
437		switch (skb->mark) {
438		case RXRPC_SKB_MARK_DATA:
439			_debug("Rcv DATA");
440			last = rxrpc_kernel_is_data_last(skb);
441			ret = call->type->deliver(call, skb, last);
442			switch (ret) {
443			case 0:
444				if (last &&
445				    call->state == AFS_CALL_AWAIT_REPLY)
446					call->state = AFS_CALL_COMPLETE;
447				break;
448			case -ENOTCONN:
449				abort_code = RX_CALL_DEAD;
450				goto do_abort;
451			case -ENOTSUPP:
452				abort_code = RX_INVALID_OPERATION;
453				goto do_abort;
454			default:
455				abort_code = RXGEN_CC_UNMARSHAL;
456				if (call->state != AFS_CALL_AWAIT_REPLY)
457					abort_code = RXGEN_SS_UNMARSHAL;
458			do_abort:
459				rxrpc_kernel_abort_call(call->rxcall,
460							abort_code);
461				call->error = ret;
462				call->state = AFS_CALL_ERROR;
463				break;
464			}
465			afs_data_delivered(skb);
466			skb = NULL;
467			continue;
468		case RXRPC_SKB_MARK_FINAL_ACK:
469			_debug("Rcv ACK");
470			call->state = AFS_CALL_COMPLETE;
471			break;
472		case RXRPC_SKB_MARK_BUSY:
473			_debug("Rcv BUSY");
474			call->error = -EBUSY;
475			call->state = AFS_CALL_BUSY;
476			break;
477		case RXRPC_SKB_MARK_REMOTE_ABORT:
478			abort_code = rxrpc_kernel_get_abort_code(skb);
479			call->error = call->type->abort_to_error(abort_code);
480			call->state = AFS_CALL_ABORTED;
481			_debug("Rcv ABORT %u -> %d", abort_code, call->error);
482			break;
483		case RXRPC_SKB_MARK_NET_ERROR:
484			call->error = -rxrpc_kernel_get_error_number(skb);
485			call->state = AFS_CALL_ERROR;
486			_debug("Rcv NET ERROR %d", call->error);
487			break;
488		case RXRPC_SKB_MARK_LOCAL_ERROR:
489			call->error = -rxrpc_kernel_get_error_number(skb);
490			call->state = AFS_CALL_ERROR;
491			_debug("Rcv LOCAL ERROR %d", call->error);
492			break;
493		default:
494			BUG();
495			break;
496		}
497
498		afs_free_skb(skb);
499	}
500
501	/* make sure the queue is empty if the call is done with (we might have
502	 * aborted the call early because of an unmarshalling error) */
503	if (call->state >= AFS_CALL_COMPLETE) {
504		while ((skb = skb_dequeue(&call->rx_queue)))
505			afs_free_skb(skb);
506		if (call->incoming) {
507			rxrpc_kernel_end_call(call->rxcall);
508			call->rxcall = NULL;
509			call->type->destructor(call);
510			afs_free_call(call);
511		}
512	}
513
514	_leave("");
515}
516
517/*
518 * wait synchronously for a call to complete
519 */
520static int afs_wait_for_call_to_complete(struct afs_call *call)
521{
522	struct sk_buff *skb;
523	int ret;
524
525	DECLARE_WAITQUEUE(myself, current);
526
527	_enter("");
528
529	add_wait_queue(&call->waitq, &myself);
530	for (;;) {
531		set_current_state(TASK_INTERRUPTIBLE);
532
533		/* deliver any messages that are in the queue */
534		if (!skb_queue_empty(&call->rx_queue)) {
535			__set_current_state(TASK_RUNNING);
536			afs_deliver_to_call(call);
537			continue;
538		}
539
540		ret = call->error;
541		if (call->state >= AFS_CALL_COMPLETE)
542			break;
543		ret = -EINTR;
544		if (signal_pending(current))
545			break;
546		schedule();
547	}
548
549	remove_wait_queue(&call->waitq, &myself);
550	__set_current_state(TASK_RUNNING);
551
552	/* kill the call */
553	if (call->state < AFS_CALL_COMPLETE) {
554		_debug("call incomplete");
555		rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD);
556		while ((skb = skb_dequeue(&call->rx_queue)))
557			afs_free_skb(skb);
558	}
559
560	_debug("call complete");
561	rxrpc_kernel_end_call(call->rxcall);
562	call->rxcall = NULL;
563	call->type->destructor(call);
564	afs_free_call(call);
565	_leave(" = %d", ret);
566	return ret;
567}
568
569/*
570 * wake up a waiting call
571 */
572static void afs_wake_up_call_waiter(struct afs_call *call)
573{
574	wake_up(&call->waitq);
575}
576
577/*
578 * wake up an asynchronous call
579 */
580static void afs_wake_up_async_call(struct afs_call *call)
581{
582	_enter("");
583	queue_work(afs_async_calls, &call->async_work);
584}
585
586/*
587 * put a call into asynchronous mode
588 * - mustn't touch the call descriptor as the call my have completed by the
589 *   time we get here
590 */
591static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
592{
593	_enter("");
594	return -EINPROGRESS;
595}
596
597/*
598 * delete an asynchronous call
599 */
600static void afs_delete_async_call(struct work_struct *work)
601{
602	struct afs_call *call =
603		container_of(work, struct afs_call, async_work);
604
605	_enter("");
606
607	afs_free_call(call);
608
609	_leave("");
610}
611
612/*
613 * perform processing on an asynchronous call
614 * - on a multiple-thread workqueue this work item may try to run on several
615 *   CPUs at the same time
616 */
617static void afs_process_async_call(struct work_struct *work)
618{
619	struct afs_call *call =
620		container_of(work, struct afs_call, async_work);
621
622	_enter("");
623
624	if (!skb_queue_empty(&call->rx_queue))
625		afs_deliver_to_call(call);
626
627	if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
628		if (call->wait_mode->async_complete)
629			call->wait_mode->async_complete(call->reply,
630							call->error);
631		call->reply = NULL;
632
633		/* kill the call */
634		rxrpc_kernel_end_call(call->rxcall);
635		call->rxcall = NULL;
636		if (call->type->destructor)
637			call->type->destructor(call);
638
639		/* we can't just delete the call because the work item may be
640		 * queued */
641		PREPARE_WORK(&call->async_work, afs_delete_async_call);
642		queue_work(afs_async_calls, &call->async_work);
643	}
644
645	_leave("");
646}
647
648/*
649 * empty a socket buffer into a flat reply buffer
650 */
651void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
652{
653	size_t len = skb->len;
654
655	if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
656		BUG();
657	call->reply_size += len;
658}
659
660/*
661 * accept the backlog of incoming calls
662 */
663static void afs_collect_incoming_call(struct work_struct *work)
664{
665	struct rxrpc_call *rxcall;
666	struct afs_call *call = NULL;
667	struct sk_buff *skb;
668
669	while ((skb = skb_dequeue(&afs_incoming_calls))) {
670		_debug("new call");
671
672		/* don't need the notification */
673		afs_free_skb(skb);
674
675		if (!call) {
676			call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
677			if (!call) {
678				rxrpc_kernel_reject_call(afs_socket);
679				return;
680			}
681
682			INIT_WORK(&call->async_work, afs_process_async_call);
683			call->wait_mode = &afs_async_incoming_call;
684			call->type = &afs_RXCMxxxx;
685			init_waitqueue_head(&call->waitq);
686			skb_queue_head_init(&call->rx_queue);
687			call->state = AFS_CALL_AWAIT_OP_ID;
688
689			_debug("CALL %p{%s} [%d]",
690			       call, call->type->name,
691			       atomic_read(&afs_outstanding_calls));
692			atomic_inc(&afs_outstanding_calls);
693		}
694
695		rxcall = rxrpc_kernel_accept_call(afs_socket,
696						  (unsigned long) call);
697		if (!IS_ERR(rxcall)) {
698			call->rxcall = rxcall;
699			call = NULL;
700		}
701	}
702
703	if (call)
704		afs_free_call(call);
705}
706
707/*
708 * grab the operation ID from an incoming cache manager call
709 */
710static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
711				bool last)
712{
713	size_t len = skb->len;
714	void *oibuf = (void *) &call->operation_ID;
715
716	_enter("{%u},{%zu},%d", call->offset, len, last);
717
718	ASSERTCMP(call->offset, <, 4);
719
720	/* the operation ID forms the first four bytes of the request data */
721	len = min_t(size_t, len, 4 - call->offset);
722	if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0)
723		BUG();
724	if (!pskb_pull(skb, len))
725		BUG();
726	call->offset += len;
727
728	if (call->offset < 4) {
729		if (last) {
730			_leave(" = -EBADMSG [op ID short]");
731			return -EBADMSG;
732		}
733		_leave(" = 0 [incomplete]");
734		return 0;
735	}
736
737	call->state = AFS_CALL_AWAIT_REQUEST;
738
739	/* ask the cache manager to route the call (it'll change the call type
740	 * if successful) */
741	if (!afs_cm_incoming_call(call))
742		return -ENOTSUPP;
743
744	/* pass responsibility for the remainer of this message off to the
745	 * cache manager op */
746	return call->type->deliver(call, skb, last);
747}
748
749/*
750 * send an empty reply
751 */
752void afs_send_empty_reply(struct afs_call *call)
753{
754	struct msghdr msg;
755	struct iovec iov[1];
756
757	_enter("");
758
759	iov[0].iov_base		= NULL;
760	iov[0].iov_len		= 0;
761	msg.msg_name		= NULL;
762	msg.msg_namelen		= 0;
763	msg.msg_iov		= iov;
764	msg.msg_iovlen		= 0;
765	msg.msg_control		= NULL;
766	msg.msg_controllen	= 0;
767	msg.msg_flags		= 0;
768
769	call->state = AFS_CALL_AWAIT_ACK;
770	switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
771	case 0:
772		_leave(" [replied]");
773		return;
774
775	case -ENOMEM:
776		_debug("oom");
777		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
778	default:
779		rxrpc_kernel_end_call(call->rxcall);
780		call->rxcall = NULL;
781		call->type->destructor(call);
782		afs_free_call(call);
783		_leave(" [error]");
784		return;
785	}
786}
787
788/*
789 * send a simple reply
790 */
791void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
792{
793	struct msghdr msg;
794	struct iovec iov[1];
795
796	_enter("");
797
798	iov[0].iov_base		= (void *) buf;
799	iov[0].iov_len		= len;
800	msg.msg_name		= NULL;
801	msg.msg_namelen		= 0;
802	msg.msg_iov		= iov;
803	msg.msg_iovlen		= 1;
804	msg.msg_control		= NULL;
805	msg.msg_controllen	= 0;
806	msg.msg_flags		= 0;
807
808	call->state = AFS_CALL_AWAIT_ACK;
809	switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) {
810	case 0:
811		_leave(" [replied]");
812		return;
813
814	case -ENOMEM:
815		_debug("oom");
816		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
817	default:
818		rxrpc_kernel_end_call(call->rxcall);
819		call->rxcall = NULL;
820		call->type->destructor(call);
821		afs_free_call(call);
822		_leave(" [error]");
823		return;
824	}
825}
826
827/*
828 * extract a piece of data from the received data socket buffers
829 */
830int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
831		     bool last, void *buf, size_t count)
832{
833	size_t len = skb->len;
834
835	_enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count);
836
837	ASSERTCMP(call->offset, <, count);
838
839	len = min_t(size_t, len, count - call->offset);
840	if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 ||
841	    !pskb_pull(skb, len))
842		BUG();
843	call->offset += len;
844
845	if (call->offset < count) {
846		if (last) {
847			_leave(" = -EBADMSG [%d < %zu]", call->offset, count);
848			return -EBADMSG;
849		}
850		_leave(" = -EAGAIN");
851		return -EAGAIN;
852	}
853	return 0;
854}
855