1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 * (C) 2011 Omnibond Systems
5 *
6 * Changes by Acxiom Corporation to implement generic service_operation()
7 * function, Copyright Acxiom Corporation, 2005.
8 *
9 * See COPYING in top-level directory.
10 */
11
12/*
13 *  In-kernel waitqueue operations.
14 */
15
16#include "protocol.h"
17#include "orangefs-kernel.h"
18#include "orangefs-bufmap.h"
19
20static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
21		long timeout,
22		int flags)
23			__acquires(op->lock);
24static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
25	__releases(op->lock);
26
27/*
28 * What we do in this function is to walk the list of operations that are
29 * present in the request queue and mark them as purged.
30 * NOTE: This is called from the device close after client-core has
31 * guaranteed that no new operations could appear on the list since the
32 * client-core is anyway going to exit.
33 */
34void purge_waiting_ops(void)
35{
36	struct orangefs_kernel_op_s *op, *tmp;
37
38	spin_lock(&orangefs_request_list_lock);
39	list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
40		gossip_debug(GOSSIP_WAIT_DEBUG,
41			     "pvfs2-client-core: purging op tag %llu %s\n",
42			     llu(op->tag),
43			     get_opname_string(op));
44		set_op_state_purged(op);
45		gossip_debug(GOSSIP_DEV_DEBUG,
46			     "%s: op:%s: op_state:%d: process:%s:\n",
47			     __func__,
48			     get_opname_string(op),
49			     op->op_state,
50			     current->comm);
51	}
52	spin_unlock(&orangefs_request_list_lock);
53}
54
55/*
56 * submits a ORANGEFS operation and waits for it to complete
57 *
58 * Note op->downcall.status will contain the status of the operation (in
59 * errno format), whether provided by pvfs2-client or a result of failure to
60 * service the operation.  If the caller wishes to distinguish, then
61 * op->state can be checked to see if it was serviced or not.
62 *
63 * Returns contents of op->downcall.status for convenience
64 */
65int service_operation(struct orangefs_kernel_op_s *op,
66		      const char *op_name,
67		      int flags)
68{
69	long timeout = MAX_SCHEDULE_TIMEOUT;
70	int ret = 0;
71
72	DEFINE_WAIT(wait_entry);
73
74	op->upcall.tgid = current->tgid;
75	op->upcall.pid = current->pid;
76
77retry_servicing:
78	op->downcall.status = 0;
79	gossip_debug(GOSSIP_WAIT_DEBUG,
80		     "%s: %s op:%p: process:%s: pid:%d:\n",
81		     __func__,
82		     op_name,
83		     op,
84		     current->comm,
85		     current->pid);
86
87	/*
88	 * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
89	 * acquiring the request_mutex because we're servicing a
90	 * high priority remount operation and the request_mutex is
91	 * already taken.
92	 */
93	if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
94		if (flags & ORANGEFS_OP_INTERRUPTIBLE)
95			ret = mutex_lock_interruptible(&orangefs_request_mutex);
96		else
97			ret = mutex_lock_killable(&orangefs_request_mutex);
98		/*
99		 * check to see if we were interrupted while waiting for
100		 * mutex
101		 */
102		if (ret < 0) {
103			op->downcall.status = ret;
104			gossip_debug(GOSSIP_WAIT_DEBUG,
105				     "%s: service_operation interrupted.\n",
106				     __func__);
107			return ret;
108		}
109	}
110
111	/* queue up the operation */
112	spin_lock(&orangefs_request_list_lock);
113	spin_lock(&op->lock);
114	set_op_state_waiting(op);
115	gossip_debug(GOSSIP_DEV_DEBUG,
116		     "%s: op:%s: op_state:%d: process:%s:\n",
117		     __func__,
118		     get_opname_string(op),
119		     op->op_state,
120		     current->comm);
121	/* add high priority remount op to the front of the line. */
122	if (flags & ORANGEFS_OP_PRIORITY)
123		list_add(&op->list, &orangefs_request_list);
124	else
125		list_add_tail(&op->list, &orangefs_request_list);
126	spin_unlock(&op->lock);
127	wake_up_interruptible(&orangefs_request_list_waitq);
128	if (!__is_daemon_in_service()) {
129		gossip_debug(GOSSIP_WAIT_DEBUG,
130			     "%s:client core is NOT in service.\n",
131			     __func__);
132		/*
133		 * Don't wait for the userspace component to return if
134		 * the filesystem is being umounted anyway.
135		 */
136		if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT)
137			timeout = 0;
138		else
139			timeout = op_timeout_secs * HZ;
140	}
141	spin_unlock(&orangefs_request_list_lock);
142
143	if (!(flags & ORANGEFS_OP_NO_MUTEX))
144		mutex_unlock(&orangefs_request_mutex);
145
146	ret = wait_for_matching_downcall(op, timeout, flags);
147	gossip_debug(GOSSIP_WAIT_DEBUG,
148		     "%s: wait_for_matching_downcall returned %d for %p\n",
149		     __func__,
150		     ret,
151		     op);
152
153	/* got matching downcall; make sure status is in errno format */
154	if (!ret) {
155		spin_unlock(&op->lock);
156		op->downcall.status =
157		    orangefs_normalize_to_errno(op->downcall.status);
158		ret = op->downcall.status;
159		goto out;
160	}
161
162	/* failed to get matching downcall */
163	if (ret == -ETIMEDOUT) {
164		gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
165			   __func__,
166			   op_name);
167	}
168
169	/*
170	 * remove a waiting op from the request list or
171	 * remove an in-progress op from the in-progress list.
172	 */
173	orangefs_clean_up_interrupted_operation(op);
174
175	op->downcall.status = ret;
176	/* retry if operation has not been serviced and if requested */
177	if (ret == -EAGAIN) {
178		op->attempts++;
179		timeout = op_timeout_secs * HZ;
180		gossip_debug(GOSSIP_WAIT_DEBUG,
181			     "orangefs: tag %llu (%s)"
182			     " -- operation to be retried (%d attempt)\n",
183			     llu(op->tag),
184			     op_name,
185			     op->attempts);
186
187		/*
188		 * io ops (ops that use the shared memory buffer) have
189		 * to be returned to their caller for a retry. Other ops
190		 * can just be recycled here.
191		 */
192		if (!op->uses_shared_memory)
193			goto retry_servicing;
194	}
195
196out:
197	gossip_debug(GOSSIP_WAIT_DEBUG,
198		     "%s: %s returning: %d for %p.\n",
199		     __func__,
200		     op_name,
201		     ret,
202		     op);
203	return ret;
204}
205
206/* This can get called on an I/O op if it had a bad service_operation. */
207bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
208{
209	u64 tag = op->tag;
210	if (!op_state_in_progress(op))
211		return false;
212
213	op->slot_to_free = op->upcall.req.io.buf_index;
214	memset(&op->upcall, 0, sizeof(op->upcall));
215	memset(&op->downcall, 0, sizeof(op->downcall));
216	op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
217	op->upcall.req.cancel.op_tag = tag;
218	op->downcall.type = ORANGEFS_VFS_OP_INVALID;
219	op->downcall.status = -1;
220	orangefs_new_tag(op);
221
222	spin_lock(&orangefs_request_list_lock);
223	/* orangefs_request_list_lock is enough of a barrier here */
224	if (!__is_daemon_in_service()) {
225		spin_unlock(&orangefs_request_list_lock);
226		return false;
227	}
228	spin_lock(&op->lock);
229	set_op_state_waiting(op);
230	gossip_debug(GOSSIP_DEV_DEBUG,
231		     "%s: op:%s: op_state:%d: process:%s:\n",
232		     __func__,
233		     get_opname_string(op),
234		     op->op_state,
235		     current->comm);
236	list_add(&op->list, &orangefs_request_list);
237	spin_unlock(&op->lock);
238	spin_unlock(&orangefs_request_list_lock);
239
240	gossip_debug(GOSSIP_WAIT_DEBUG,
241		     "Attempting ORANGEFS operation cancellation of tag %llu\n",
242		     llu(tag));
243	return true;
244}
245
246/*
247 * Change an op to the "given up" state and remove it from its list.
248 */
249static void
250	orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
251		__releases(op->lock)
252{
253	/*
254	 * handle interrupted cases depending on what state we were in when
255	 * the interruption is detected.
256	 *
257	 * Called with op->lock held.
258	 */
259
260	/*
261	 * List manipulation code elsewhere will ignore ops that
262	 * have been given up upon.
263	 */
264	op->op_state |= OP_VFS_STATE_GIVEN_UP;
265
266	if (list_empty(&op->list)) {
267		/* caught copying to/from daemon */
268		BUG_ON(op_state_serviced(op));
269		spin_unlock(&op->lock);
270		wait_for_completion(&op->waitq);
271	} else if (op_state_waiting(op)) {
272		/*
273		 * upcall hasn't been read; remove op from upcall request
274		 * list.
275		 */
276		spin_unlock(&op->lock);
277		spin_lock(&orangefs_request_list_lock);
278		list_del_init(&op->list);
279		spin_unlock(&orangefs_request_list_lock);
280		gossip_debug(GOSSIP_WAIT_DEBUG,
281			     "Interrupted: Removed op %p from request_list\n",
282			     op);
283	} else if (op_state_in_progress(op)) {
284		/* op must be removed from the in progress htable */
285		spin_unlock(&op->lock);
286		spin_lock(&orangefs_htable_ops_in_progress_lock);
287		list_del_init(&op->list);
288		spin_unlock(&orangefs_htable_ops_in_progress_lock);
289		gossip_debug(GOSSIP_WAIT_DEBUG,
290			     "Interrupted: Removed op %p"
291			     " from htable_ops_in_progress\n",
292			     op);
293	} else {
294		spin_unlock(&op->lock);
295		gossip_err("interrupted operation is in a weird state 0x%x\n",
296			   op->op_state);
297	}
298	reinit_completion(&op->waitq);
299}
300
301/*
302 * Sleeps on waitqueue waiting for matching downcall.
303 * If client-core finishes servicing, then we are good to go.
304 * else if client-core exits, we get woken up here, and retry with a timeout
305 *
306 * When this call returns to the caller, the specified op will no
307 * longer be in either the in_progress hash table or on the request list.
308 *
309 * Returns 0 on success and -errno on failure
310 * Errors are:
311 * EAGAIN in case we want the caller to requeue and try again..
312 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
313 * operation since client-core seems to be exiting too often
314 * or if we were interrupted.
315 *
316 * Returns with op->lock taken.
317 */
318static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
319		long timeout,
320		int flags)
321			__acquires(op->lock)
322{
323	long n;
324	int writeback = flags & ORANGEFS_OP_WRITEBACK,
325	    interruptible = flags & ORANGEFS_OP_INTERRUPTIBLE;
326
327	/*
328	 * There's a "schedule_timeout" inside of these wait
329	 * primitives, during which the op is out of the hands of the
330	 * user process that needs something done and is being
331	 * manipulated by the client-core process.
332	 */
333	if (writeback)
334		n = wait_for_completion_io_timeout(&op->waitq, timeout);
335	else if (!writeback && interruptible)
336		n = wait_for_completion_interruptible_timeout(&op->waitq,
337								      timeout);
338	else /* !writeback && !interruptible but compiler complains */
339		n = wait_for_completion_killable_timeout(&op->waitq, timeout);
340
341	spin_lock(&op->lock);
342
343	if (op_state_serviced(op))
344		return 0;
345
346	if (unlikely(n < 0)) {
347		gossip_debug(GOSSIP_WAIT_DEBUG,
348			     "%s: operation interrupted, tag %llu, %p\n",
349			     __func__,
350			     llu(op->tag),
351			     op);
352		return -EINTR;
353	}
354	if (op_state_purged(op)) {
355		gossip_debug(GOSSIP_WAIT_DEBUG,
356			     "%s: operation purged, tag %llu, %p, %d\n",
357			     __func__,
358			     llu(op->tag),
359			     op,
360			     op->attempts);
361		return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
362			 -EAGAIN :
363			 -EIO;
364	}
365	/* must have timed out, then... */
366	gossip_debug(GOSSIP_WAIT_DEBUG,
367		     "%s: operation timed out, tag %llu, %p, %d)\n",
368		     __func__,
369		     llu(op->tag),
370		     op,
371		     op->attempts);
372	return -ETIMEDOUT;
373}
374