1// SPDX-License-Identifier: GPL-2.0-or-later
2/* netfs cookie management
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * See Documentation/filesystems/caching/netfs-api.rst for more information on
8 * the netfs API.
9 */
10
11#define FSCACHE_DEBUG_LEVEL COOKIE
12#include <linux/module.h>
13#include <linux/slab.h>
14#include "internal.h"
15
16struct kmem_cache *fscache_cookie_jar;
17
18static void fscache_cookie_lru_timed_out(struct timer_list *timer);
19static void fscache_cookie_lru_worker(struct work_struct *work);
20static void fscache_cookie_worker(struct work_struct *work);
21static void fscache_unhash_cookie(struct fscache_cookie *cookie);
22static void fscache_perform_invalidation(struct fscache_cookie *cookie);
23
24#define fscache_cookie_hash_shift 15
25static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
26static LIST_HEAD(fscache_cookies);
27static DEFINE_RWLOCK(fscache_cookies_lock);
28static LIST_HEAD(fscache_cookie_lru);
29static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
30DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
31static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
32static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
33static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
34
35void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
36{
37	const u8 *k;
38
39	pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n",
40	       prefix,
41	       cookie->debug_id,
42	       cookie->flags,
43	       atomic_read(&cookie->n_active),
44	       atomic_read(&cookie->n_accesses),
45	       fscache_cookie_states[cookie->state]);
46	pr_err("%c-cookie V=%08x [%s]\n",
47	       prefix,
48	       cookie->volume->debug_id,
49	       cookie->volume->key);
50
51	k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
52		cookie->inline_key : cookie->key;
53	pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k);
54}
55
56static void fscache_free_cookie(struct fscache_cookie *cookie)
57{
58	if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) {
59		spin_lock(&fscache_cookie_lru_lock);
60		list_del_init(&cookie->commit_link);
61		spin_unlock(&fscache_cookie_lru_lock);
62		fscache_stat_d(&fscache_n_cookies_lru);
63		fscache_stat(&fscache_n_cookies_lru_removed);
64	}
65
66	if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) {
67		fscache_print_cookie(cookie, 'F');
68		return;
69	}
70
71	write_lock(&fscache_cookies_lock);
72	list_del(&cookie->proc_link);
73	write_unlock(&fscache_cookies_lock);
74	if (cookie->aux_len > sizeof(cookie->inline_aux))
75		kfree(cookie->aux);
76	if (cookie->key_len > sizeof(cookie->inline_key))
77		kfree(cookie->key);
78	fscache_stat_d(&fscache_n_cookies);
79	kmem_cache_free(fscache_cookie_jar, cookie);
80}
81
82static void __fscache_queue_cookie(struct fscache_cookie *cookie)
83{
84	if (!queue_work(fscache_wq, &cookie->work))
85		fscache_put_cookie(cookie, fscache_cookie_put_over_queued);
86}
87
88static void fscache_queue_cookie(struct fscache_cookie *cookie,
89				 enum fscache_cookie_trace where)
90{
91	fscache_get_cookie(cookie, where);
92	__fscache_queue_cookie(cookie);
93}
94
95/*
96 * Initialise the access gate on a cookie by setting a flag to prevent the
97 * state machine from being queued when the access counter transitions to 0.
98 * We're only interested in this when we withdraw caching services from the
99 * cookie.
100 */
101static void fscache_init_access_gate(struct fscache_cookie *cookie)
102{
103	int n_accesses;
104
105	n_accesses = atomic_read(&cookie->n_accesses);
106	trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
107			     n_accesses, fscache_access_cache_pin);
108	set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
109}
110
111/**
112 * fscache_end_cookie_access - Unpin a cache at the end of an access.
113 * @cookie: A data file cookie
114 * @why: An indication of the circumstances of the access for tracing
115 *
116 * Unpin a cache cookie after we've accessed it and bring a deferred
117 * relinquishment or withdrawal state into effect.
118 *
119 * The @why indicator is provided for tracing purposes.
120 */
121void fscache_end_cookie_access(struct fscache_cookie *cookie,
122			       enum fscache_access_trace why)
123{
124	int n_accesses;
125
126	smp_mb__before_atomic();
127	n_accesses = atomic_dec_return(&cookie->n_accesses);
128	trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
129			     n_accesses, why);
130	if (n_accesses == 0 &&
131	    !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags))
132		fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
133}
134EXPORT_SYMBOL(fscache_end_cookie_access);
135
136/*
137 * Pin the cache behind a cookie so that we can access it.
138 */
139static void __fscache_begin_cookie_access(struct fscache_cookie *cookie,
140					  enum fscache_access_trace why)
141{
142	int n_accesses;
143
144	n_accesses = atomic_inc_return(&cookie->n_accesses);
145	smp_mb__after_atomic(); /* (Future) read state after is-caching.
146				 * Reread n_accesses after is-caching
147				 */
148	trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
149			     n_accesses, why);
150}
151
152/**
153 * fscache_begin_cookie_access - Pin a cache so data can be accessed
154 * @cookie: A data file cookie
155 * @why: An indication of the circumstances of the access for tracing
156 *
157 * Attempt to pin the cache to prevent it from going away whilst we're
158 * accessing data and returns true if successful.  This works as follows:
159 *
160 *  (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not
161 *      set), we return false to indicate access was not permitted.
162 *
163 *  (2) If the cookie is being cached, we increment its n_accesses count and
164 *      then recheck the IS_CACHING flag, ending the access if it got cleared.
165 *
166 *  (3) When we end the access, we decrement the cookie's n_accesses and wake
167 *      up the any waiters if it reaches 0.
168 *
169 *  (4) Whilst the cookie is actively being cached, its n_accesses is kept
170 *      artificially incremented to prevent wakeups from happening.
171 *
172 *  (5) When the cache is taken offline or if the cookie is culled, the flag is
173 *      cleared to prevent new accesses, the cookie's n_accesses is decremented
174 *      and we wait for it to become 0.
175 *
176 * The @why indicator are merely provided for tracing purposes.
177 */
178bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
179				 enum fscache_access_trace why)
180{
181	if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
182		return false;
183	__fscache_begin_cookie_access(cookie, why);
184	if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
185	    !fscache_cache_is_live(cookie->volume->cache)) {
186		fscache_end_cookie_access(cookie, fscache_access_unlive);
187		return false;
188	}
189	return true;
190}
191
192static inline void wake_up_cookie_state(struct fscache_cookie *cookie)
193{
194	/* Use a barrier to ensure that waiters see the state variable
195	 * change, as spin_unlock doesn't guarantee a barrier.
196	 *
197	 * See comments over wake_up_bit() and waitqueue_active().
198	 */
199	smp_mb();
200	wake_up_var(&cookie->state);
201}
202
203/*
204 * Change the state a cookie is at and wake up anyone waiting for that.  Impose
205 * an ordering between the stuff stored in the cookie and the state member.
206 * Paired with fscache_cookie_state().
207 */
208static void __fscache_set_cookie_state(struct fscache_cookie *cookie,
209				       enum fscache_cookie_state state)
210{
211	smp_store_release(&cookie->state, state);
212}
213
214static void fscache_set_cookie_state(struct fscache_cookie *cookie,
215				     enum fscache_cookie_state state)
216{
217	spin_lock(&cookie->lock);
218	__fscache_set_cookie_state(cookie, state);
219	spin_unlock(&cookie->lock);
220	wake_up_cookie_state(cookie);
221}
222
223/**
224 * fscache_cookie_lookup_negative - Note negative lookup
225 * @cookie: The cookie that was being looked up
226 *
227 * Note that some part of the metadata path in the cache doesn't exist and so
228 * we can release any waiting readers in the certain knowledge that there's
229 * nothing for them to actually read.
230 *
231 * This function uses no locking and must only be called from the state machine.
232 */
233void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
234{
235	set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
236	fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
237}
238EXPORT_SYMBOL(fscache_cookie_lookup_negative);
239
240/**
241 * fscache_resume_after_invalidation - Allow I/O to resume after invalidation
242 * @cookie: The cookie that was invalidated
243 *
244 * Tell fscache that invalidation is sufficiently complete that I/O can be
245 * allowed again.
246 */
247void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
248{
249	fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
250}
251EXPORT_SYMBOL(fscache_resume_after_invalidation);
252
253/**
254 * fscache_caching_failed - Report that a failure stopped caching on a cookie
255 * @cookie: The cookie that was affected
256 *
257 * Tell fscache that caching on a cookie needs to be stopped due to some sort
258 * of failure.
259 *
260 * This function uses no locking and must only be called from the state machine.
261 */
262void fscache_caching_failed(struct fscache_cookie *cookie)
263{
264	clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
265	fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
266	trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
267				fscache_cookie_failed);
268}
269EXPORT_SYMBOL(fscache_caching_failed);
270
271/*
272 * Set the index key in a cookie.  The cookie struct has space for a 16-byte
273 * key plus length and hash, but if that's not big enough, it's instead a
274 * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
275 * the key data.
276 */
277static int fscache_set_key(struct fscache_cookie *cookie,
278			   const void *index_key, size_t index_key_len)
279{
280	void *buf;
281	size_t buf_size;
282
283	buf_size = round_up(index_key_len, sizeof(__le32));
284
285	if (index_key_len > sizeof(cookie->inline_key)) {
286		buf = kzalloc(buf_size, GFP_KERNEL);
287		if (!buf)
288			return -ENOMEM;
289		cookie->key = buf;
290	} else {
291		buf = cookie->inline_key;
292	}
293
294	memcpy(buf, index_key, index_key_len);
295	cookie->key_hash = fscache_hash(cookie->volume->key_hash,
296					buf, buf_size);
297	return 0;
298}
299
300static bool fscache_cookie_same(const struct fscache_cookie *a,
301				const struct fscache_cookie *b)
302{
303	const void *ka, *kb;
304
305	if (a->key_hash	!= b->key_hash ||
306	    a->volume	!= b->volume ||
307	    a->key_len	!= b->key_len)
308		return false;
309
310	if (a->key_len <= sizeof(a->inline_key)) {
311		ka = &a->inline_key;
312		kb = &b->inline_key;
313	} else {
314		ka = a->key;
315		kb = b->key;
316	}
317	return memcmp(ka, kb, a->key_len) == 0;
318}
319
320static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
321
322/*
323 * Allocate a cookie.
324 */
325static struct fscache_cookie *fscache_alloc_cookie(
326	struct fscache_volume *volume,
327	u8 advice,
328	const void *index_key, size_t index_key_len,
329	const void *aux_data, size_t aux_data_len,
330	loff_t object_size)
331{
332	struct fscache_cookie *cookie;
333
334	/* allocate and initialise a cookie */
335	cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
336	if (!cookie)
337		return NULL;
338	fscache_stat(&fscache_n_cookies);
339
340	cookie->volume		= volume;
341	cookie->advice		= advice;
342	cookie->key_len		= index_key_len;
343	cookie->aux_len		= aux_data_len;
344	cookie->object_size	= object_size;
345	if (object_size == 0)
346		__set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
347
348	if (fscache_set_key(cookie, index_key, index_key_len) < 0)
349		goto nomem;
350
351	if (cookie->aux_len <= sizeof(cookie->inline_aux)) {
352		memcpy(cookie->inline_aux, aux_data, cookie->aux_len);
353	} else {
354		cookie->aux = kmemdup(aux_data, cookie->aux_len, GFP_KERNEL);
355		if (!cookie->aux)
356			goto nomem;
357	}
358
359	refcount_set(&cookie->ref, 1);
360	cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
361	spin_lock_init(&cookie->lock);
362	INIT_LIST_HEAD(&cookie->commit_link);
363	INIT_WORK(&cookie->work, fscache_cookie_worker);
364	__fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
365
366	write_lock(&fscache_cookies_lock);
367	list_add_tail(&cookie->proc_link, &fscache_cookies);
368	write_unlock(&fscache_cookies_lock);
369	fscache_see_cookie(cookie, fscache_cookie_new_acquire);
370	return cookie;
371
372nomem:
373	fscache_free_cookie(cookie);
374	return NULL;
375}
376
377static inline bool fscache_cookie_is_dropped(struct fscache_cookie *cookie)
378{
379	return READ_ONCE(cookie->state) == FSCACHE_COOKIE_STATE_DROPPED;
380}
381
382static void fscache_wait_on_collision(struct fscache_cookie *candidate,
383				      struct fscache_cookie *wait_for)
384{
385	enum fscache_cookie_state *statep = &wait_for->state;
386
387	wait_var_event_timeout(statep, fscache_cookie_is_dropped(wait_for),
388			       20 * HZ);
389	if (!fscache_cookie_is_dropped(wait_for)) {
390		pr_notice("Potential collision c=%08x old: c=%08x",
391			  candidate->debug_id, wait_for->debug_id);
392		wait_var_event(statep, fscache_cookie_is_dropped(wait_for));
393	}
394}
395
396/*
397 * Attempt to insert the new cookie into the hash.  If there's a collision, we
398 * wait for the old cookie to complete if it's being relinquished and an error
399 * otherwise.
400 */
401static bool fscache_hash_cookie(struct fscache_cookie *candidate)
402{
403	struct fscache_cookie *cursor, *wait_for = NULL;
404	struct hlist_bl_head *h;
405	struct hlist_bl_node *p;
406	unsigned int bucket;
407
408	bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
409	h = &fscache_cookie_hash[bucket];
410
411	hlist_bl_lock(h);
412	hlist_bl_for_each_entry(cursor, p, h, hash_link) {
413		if (fscache_cookie_same(candidate, cursor)) {
414			if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags))
415				goto collision;
416			wait_for = fscache_get_cookie(cursor,
417						      fscache_cookie_get_hash_collision);
418			break;
419		}
420	}
421
422	fscache_get_volume(candidate->volume, fscache_volume_get_cookie);
423	atomic_inc(&candidate->volume->n_cookies);
424	hlist_bl_add_head(&candidate->hash_link, h);
425	set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags);
426	hlist_bl_unlock(h);
427
428	if (wait_for) {
429		fscache_wait_on_collision(candidate, wait_for);
430		fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision);
431	}
432	return true;
433
434collision:
435	trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
436			     fscache_cookie_collision);
437	pr_err("Duplicate cookie detected\n");
438	fscache_print_cookie(cursor, 'O');
439	fscache_print_cookie(candidate, 'N');
440	hlist_bl_unlock(h);
441	return false;
442}
443
444/*
445 * Request a cookie to represent a data storage object within a volume.
446 *
447 * We never let on to the netfs about errors.  We may set a negative cookie
448 * pointer, but that's okay
449 */
450struct fscache_cookie *__fscache_acquire_cookie(
451	struct fscache_volume *volume,
452	u8 advice,
453	const void *index_key, size_t index_key_len,
454	const void *aux_data, size_t aux_data_len,
455	loff_t object_size)
456{
457	struct fscache_cookie *cookie;
458
459	_enter("V=%x", volume->debug_id);
460
461	if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
462		return NULL;
463	if (!aux_data || !aux_data_len) {
464		aux_data = NULL;
465		aux_data_len = 0;
466	}
467
468	fscache_stat(&fscache_n_acquires);
469
470	cookie = fscache_alloc_cookie(volume, advice,
471				      index_key, index_key_len,
472				      aux_data, aux_data_len,
473				      object_size);
474	if (!cookie) {
475		fscache_stat(&fscache_n_acquires_oom);
476		return NULL;
477	}
478
479	if (!fscache_hash_cookie(cookie)) {
480		fscache_see_cookie(cookie, fscache_cookie_discard);
481		fscache_free_cookie(cookie);
482		return NULL;
483	}
484
485	trace_fscache_acquire(cookie);
486	fscache_stat(&fscache_n_acquires_ok);
487	_leave(" = c=%08x", cookie->debug_id);
488	return cookie;
489}
490EXPORT_SYMBOL(__fscache_acquire_cookie);
491
492/*
493 * Prepare a cache object to be written to.
494 */
495static void fscache_prepare_to_write(struct fscache_cookie *cookie)
496{
497	cookie->volume->cache->ops->prepare_to_write(cookie);
498}
499
500/*
501 * Look up a cookie in the cache.
502 */
503static void fscache_perform_lookup(struct fscache_cookie *cookie)
504{
505	enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
506	bool need_withdraw = false;
507
508	_enter("");
509
510	if (!cookie->volume->cache_priv) {
511		fscache_create_volume(cookie->volume, true);
512		if (!cookie->volume->cache_priv) {
513			fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
514			goto out;
515		}
516	}
517
518	if (!cookie->volume->cache->ops->lookup_cookie(cookie)) {
519		if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
520			fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
521		need_withdraw = true;
522		_leave(" [fail]");
523		goto out;
524	}
525
526	fscache_see_cookie(cookie, fscache_cookie_see_active);
527	spin_lock(&cookie->lock);
528	if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
529		__fscache_set_cookie_state(cookie,
530					   FSCACHE_COOKIE_STATE_INVALIDATING);
531	else
532		__fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
533	spin_unlock(&cookie->lock);
534	wake_up_cookie_state(cookie);
535	trace = fscache_access_lookup_cookie_end;
536
537out:
538	fscache_end_cookie_access(cookie, trace);
539	if (need_withdraw)
540		fscache_withdraw_cookie(cookie);
541	fscache_end_volume_access(cookie->volume, cookie, trace);
542}
543
544/*
545 * Begin the process of looking up a cookie.  We offload the actual process to
546 * a worker thread.
547 */
548static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
549{
550	if (will_modify) {
551		set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
552		set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
553	}
554	if (!fscache_begin_volume_access(cookie->volume, cookie,
555					 fscache_access_lookup_cookie))
556		return false;
557
558	__fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie);
559	__fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP);
560	set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
561	set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags);
562	return true;
563}
564
565/*
566 * Start using the cookie for I/O.  This prevents the backing object from being
567 * reaped by VM pressure.
568 */
569void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
570{
571	enum fscache_cookie_state state;
572	bool queue = false;
573	int n_active;
574
575	_enter("c=%08x", cookie->debug_id);
576
577	if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
578		 "Trying to use relinquished cookie\n"))
579		return;
580
581	spin_lock(&cookie->lock);
582
583	n_active = atomic_inc_return(&cookie->n_active);
584	trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref),
585			     n_active, atomic_read(&cookie->n_accesses),
586			     will_modify ?
587			     fscache_active_use_modify : fscache_active_use);
588
589again:
590	state = fscache_cookie_state(cookie);
591	switch (state) {
592	case FSCACHE_COOKIE_STATE_QUIESCENT:
593		queue = fscache_begin_lookup(cookie, will_modify);
594		break;
595
596	case FSCACHE_COOKIE_STATE_LOOKING_UP:
597	case FSCACHE_COOKIE_STATE_CREATING:
598		if (will_modify)
599			set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
600		break;
601	case FSCACHE_COOKIE_STATE_ACTIVE:
602	case FSCACHE_COOKIE_STATE_INVALIDATING:
603		if (will_modify &&
604		    !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
605			set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
606			queue = true;
607		}
608		/*
609		 * We could race with cookie_lru which may set LRU_DISCARD bit
610		 * but has yet to run the cookie state machine.  If this happens
611		 * and another thread tries to use the cookie, clear LRU_DISCARD
612		 * so we don't end up withdrawing the cookie while in use.
613		 */
614		if (test_and_clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags))
615			fscache_see_cookie(cookie, fscache_cookie_see_lru_discard_clear);
616		break;
617
618	case FSCACHE_COOKIE_STATE_FAILED:
619	case FSCACHE_COOKIE_STATE_WITHDRAWING:
620		break;
621
622	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
623		spin_unlock(&cookie->lock);
624		wait_var_event(&cookie->state,
625			       fscache_cookie_state(cookie) !=
626			       FSCACHE_COOKIE_STATE_LRU_DISCARDING);
627		spin_lock(&cookie->lock);
628		goto again;
629
630	case FSCACHE_COOKIE_STATE_DROPPED:
631	case FSCACHE_COOKIE_STATE_RELINQUISHING:
632		WARN(1, "Can't use cookie in state %u\n", state);
633		break;
634	}
635
636	spin_unlock(&cookie->lock);
637	if (queue)
638		fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
639	_leave("");
640}
641EXPORT_SYMBOL(__fscache_use_cookie);
642
643static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie)
644{
645	clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags);
646	if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
647		return;
648
649	cookie->unused_at = jiffies;
650	spin_lock(&fscache_cookie_lru_lock);
651	if (list_empty(&cookie->commit_link)) {
652		fscache_get_cookie(cookie, fscache_cookie_get_lru);
653		fscache_stat(&fscache_n_cookies_lru);
654	}
655	list_move_tail(&cookie->commit_link, &fscache_cookie_lru);
656
657	spin_unlock(&fscache_cookie_lru_lock);
658	timer_reduce(&fscache_cookie_lru_timer,
659		     jiffies + fscache_lru_cookie_timeout);
660}
661
662/*
663 * Stop using the cookie for I/O.
664 */
665void __fscache_unuse_cookie(struct fscache_cookie *cookie,
666			    const void *aux_data, const loff_t *object_size)
667{
668	unsigned int debug_id = cookie->debug_id;
669	unsigned int r = refcount_read(&cookie->ref);
670	unsigned int a = atomic_read(&cookie->n_accesses);
671	unsigned int c;
672
673	if (aux_data || object_size)
674		__fscache_update_cookie(cookie, aux_data, object_size);
675
676	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
677	c = atomic_fetch_add_unless(&cookie->n_active, -1, 1);
678	if (c != 1) {
679		trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse);
680		return;
681	}
682
683	spin_lock(&cookie->lock);
684	r = refcount_read(&cookie->ref);
685	a = atomic_read(&cookie->n_accesses);
686	c = atomic_dec_return(&cookie->n_active);
687	trace_fscache_active(debug_id, r, c, a, fscache_active_unuse);
688	if (c == 0)
689		fscache_unuse_cookie_locked(cookie);
690	spin_unlock(&cookie->lock);
691}
692EXPORT_SYMBOL(__fscache_unuse_cookie);
693
694/*
695 * Perform work upon the cookie, such as committing its cache state,
696 * relinquishing it or withdrawing the backing cache.  We're protected from the
697 * cache going away under us as object withdrawal must come through this
698 * non-reentrant work item.
699 */
700static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
701{
702	enum fscache_cookie_state state;
703	bool wake = false;
704
705	_enter("c=%x", cookie->debug_id);
706
707again:
708	spin_lock(&cookie->lock);
709again_locked:
710	state = cookie->state;
711	switch (state) {
712	case FSCACHE_COOKIE_STATE_QUIESCENT:
713		/* The QUIESCENT state is jumped to the LOOKING_UP state by
714		 * fscache_use_cookie().
715		 */
716
717		if (atomic_read(&cookie->n_accesses) == 0 &&
718		    test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
719			__fscache_set_cookie_state(cookie,
720						   FSCACHE_COOKIE_STATE_RELINQUISHING);
721			wake = true;
722			goto again_locked;
723		}
724		break;
725
726	case FSCACHE_COOKIE_STATE_LOOKING_UP:
727		spin_unlock(&cookie->lock);
728		fscache_init_access_gate(cookie);
729		fscache_perform_lookup(cookie);
730		goto again;
731
732	case FSCACHE_COOKIE_STATE_INVALIDATING:
733		spin_unlock(&cookie->lock);
734		fscache_perform_invalidation(cookie);
735		goto again;
736
737	case FSCACHE_COOKIE_STATE_ACTIVE:
738		if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
739			spin_unlock(&cookie->lock);
740			fscache_prepare_to_write(cookie);
741			spin_lock(&cookie->lock);
742		}
743		if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
744			__fscache_set_cookie_state(cookie,
745						   FSCACHE_COOKIE_STATE_LRU_DISCARDING);
746			wake = true;
747			goto again_locked;
748		}
749		fallthrough;
750
751	case FSCACHE_COOKIE_STATE_FAILED:
752		if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
753			fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
754
755		if (atomic_read(&cookie->n_accesses) != 0)
756			break;
757		if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
758			__fscache_set_cookie_state(cookie,
759						   FSCACHE_COOKIE_STATE_RELINQUISHING);
760			wake = true;
761			goto again_locked;
762		}
763		if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
764			__fscache_set_cookie_state(cookie,
765						   FSCACHE_COOKIE_STATE_WITHDRAWING);
766			wake = true;
767			goto again_locked;
768		}
769		break;
770
771	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
772	case FSCACHE_COOKIE_STATE_RELINQUISHING:
773	case FSCACHE_COOKIE_STATE_WITHDRAWING:
774		if (cookie->cache_priv) {
775			spin_unlock(&cookie->lock);
776			cookie->volume->cache->ops->withdraw_cookie(cookie);
777			spin_lock(&cookie->lock);
778		}
779
780		if (test_and_clear_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
781			fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
782
783		switch (state) {
784		case FSCACHE_COOKIE_STATE_RELINQUISHING:
785			fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
786			fscache_unhash_cookie(cookie);
787			__fscache_set_cookie_state(cookie,
788						   FSCACHE_COOKIE_STATE_DROPPED);
789			wake = true;
790			goto out;
791		case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
792			fscache_see_cookie(cookie, fscache_cookie_see_lru_discard);
793			break;
794		case FSCACHE_COOKIE_STATE_WITHDRAWING:
795			fscache_see_cookie(cookie, fscache_cookie_see_withdraw);
796			break;
797		default:
798			BUG();
799		}
800
801		clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
802		clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
803		clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
804		clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
805		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
806		__fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
807		wake = true;
808		goto again_locked;
809
810	case FSCACHE_COOKIE_STATE_DROPPED:
811		break;
812
813	default:
814		WARN_ONCE(1, "Cookie %x in unexpected state %u\n",
815			  cookie->debug_id, state);
816		break;
817	}
818
819out:
820	spin_unlock(&cookie->lock);
821	if (wake)
822		wake_up_cookie_state(cookie);
823	_leave("");
824}
825
826static void fscache_cookie_worker(struct work_struct *work)
827{
828	struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
829
830	fscache_see_cookie(cookie, fscache_cookie_see_work);
831	fscache_cookie_state_machine(cookie);
832	fscache_put_cookie(cookie, fscache_cookie_put_work);
833}
834
835/*
836 * Wait for the object to become inactive.  The cookie's work item will be
837 * scheduled when someone transitions n_accesses to 0 - but if someone's
838 * already done that, schedule it anyway.
839 */
840static void __fscache_withdraw_cookie(struct fscache_cookie *cookie)
841{
842	int n_accesses;
843	bool unpinned;
844
845	unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
846
847	/* Need to read the access count after unpinning */
848	n_accesses = atomic_read(&cookie->n_accesses);
849	if (unpinned)
850		trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
851				     n_accesses, fscache_access_cache_unpin);
852	if (n_accesses == 0)
853		fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
854}
855
856static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
857{
858	fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one);
859
860	spin_lock(&cookie->lock);
861	if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE ||
862	    time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) ||
863	    atomic_read(&cookie->n_active) > 0) {
864		spin_unlock(&cookie->lock);
865		fscache_stat(&fscache_n_cookies_lru_removed);
866	} else {
867		set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
868		spin_unlock(&cookie->lock);
869		fscache_stat(&fscache_n_cookies_lru_expired);
870		_debug("lru c=%x", cookie->debug_id);
871		__fscache_withdraw_cookie(cookie);
872	}
873
874	fscache_put_cookie(cookie, fscache_cookie_put_lru);
875}
876
877static void fscache_cookie_lru_worker(struct work_struct *work)
878{
879	struct fscache_cookie *cookie;
880	unsigned long unused_at;
881
882	spin_lock(&fscache_cookie_lru_lock);
883
884	while (!list_empty(&fscache_cookie_lru)) {
885		cookie = list_first_entry(&fscache_cookie_lru,
886					  struct fscache_cookie, commit_link);
887		unused_at = cookie->unused_at + fscache_lru_cookie_timeout;
888		if (time_before(jiffies, unused_at)) {
889			timer_reduce(&fscache_cookie_lru_timer, unused_at);
890			break;
891		}
892
893		list_del_init(&cookie->commit_link);
894		fscache_stat_d(&fscache_n_cookies_lru);
895		spin_unlock(&fscache_cookie_lru_lock);
896		fscache_cookie_lru_do_one(cookie);
897		spin_lock(&fscache_cookie_lru_lock);
898	}
899
900	spin_unlock(&fscache_cookie_lru_lock);
901}
902
903static void fscache_cookie_lru_timed_out(struct timer_list *timer)
904{
905	queue_work(fscache_wq, &fscache_cookie_lru_work);
906}
907
908static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie)
909{
910	bool need_put = false;
911
912	if (!list_empty(&cookie->commit_link)) {
913		spin_lock(&fscache_cookie_lru_lock);
914		if (!list_empty(&cookie->commit_link)) {
915			list_del_init(&cookie->commit_link);
916			fscache_stat_d(&fscache_n_cookies_lru);
917			fscache_stat(&fscache_n_cookies_lru_dropped);
918			need_put = true;
919		}
920		spin_unlock(&fscache_cookie_lru_lock);
921		if (need_put)
922			fscache_put_cookie(cookie, fscache_cookie_put_lru);
923	}
924}
925
926/*
927 * Remove a cookie from the hash table.
928 */
929static void fscache_unhash_cookie(struct fscache_cookie *cookie)
930{
931	struct hlist_bl_head *h;
932	unsigned int bucket;
933
934	bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
935	h = &fscache_cookie_hash[bucket];
936
937	hlist_bl_lock(h);
938	hlist_bl_del(&cookie->hash_link);
939	clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags);
940	hlist_bl_unlock(h);
941	fscache_stat(&fscache_n_relinquishes_dropped);
942}
943
944static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie)
945{
946	fscache_cookie_drop_from_lru(cookie);
947	__fscache_withdraw_cookie(cookie);
948}
949
950/**
951 * fscache_withdraw_cookie - Mark a cookie for withdrawal
952 * @cookie: The cookie to be withdrawn.
953 *
954 * Allow the cache backend to withdraw the backing for a cookie for its own
955 * reasons, even if that cookie is in active use.
956 */
957void fscache_withdraw_cookie(struct fscache_cookie *cookie)
958{
959	set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
960	fscache_drop_withdraw_cookie(cookie);
961}
962EXPORT_SYMBOL(fscache_withdraw_cookie);
963
964/*
965 * Allow the netfs to release a cookie back to the cache.
966 * - the object will be marked as recyclable on disk if retire is true
967 */
968void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
969{
970	fscache_stat(&fscache_n_relinquishes);
971	if (retire)
972		fscache_stat(&fscache_n_relinquishes_retire);
973
974	_enter("c=%08x{%d},%d",
975	       cookie->debug_id, atomic_read(&cookie->n_active), retire);
976
977	if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
978		 "Cookie c=%x already relinquished\n", cookie->debug_id))
979		return;
980
981	if (retire)
982		set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
983	trace_fscache_relinquish(cookie, retire);
984
985	ASSERTCMP(atomic_read(&cookie->n_active), ==, 0);
986	ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0);
987	atomic_dec(&cookie->volume->n_cookies);
988
989	if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) {
990		set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags);
991		fscache_drop_withdraw_cookie(cookie);
992	} else {
993		fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED);
994		fscache_unhash_cookie(cookie);
995	}
996	fscache_put_cookie(cookie, fscache_cookie_put_relinquish);
997}
998EXPORT_SYMBOL(__fscache_relinquish_cookie);
999
1000/*
1001 * Drop a reference to a cookie.
1002 */
1003void fscache_put_cookie(struct fscache_cookie *cookie,
1004			enum fscache_cookie_trace where)
1005{
1006	struct fscache_volume *volume = cookie->volume;
1007	unsigned int cookie_debug_id = cookie->debug_id;
1008	bool zero;
1009	int ref;
1010
1011	zero = __refcount_dec_and_test(&cookie->ref, &ref);
1012	trace_fscache_cookie(cookie_debug_id, ref - 1, where);
1013	if (zero) {
1014		fscache_free_cookie(cookie);
1015		fscache_put_volume(volume, fscache_volume_put_cookie);
1016	}
1017}
1018EXPORT_SYMBOL(fscache_put_cookie);
1019
1020/*
1021 * Get a reference to a cookie.
1022 */
1023struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
1024					  enum fscache_cookie_trace where)
1025{
1026	int ref;
1027
1028	__refcount_inc(&cookie->ref, &ref);
1029	trace_fscache_cookie(cookie->debug_id, ref + 1, where);
1030	return cookie;
1031}
1032EXPORT_SYMBOL(fscache_get_cookie);
1033
1034/*
1035 * Ask the cache to effect invalidation of a cookie.
1036 */
1037static void fscache_perform_invalidation(struct fscache_cookie *cookie)
1038{
1039	if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
1040		fscache_caching_failed(cookie);
1041	fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
1042}
1043
1044/*
1045 * Invalidate an object.
1046 */
1047void __fscache_invalidate(struct fscache_cookie *cookie,
1048			  const void *aux_data, loff_t new_size,
1049			  unsigned int flags)
1050{
1051	bool is_caching;
1052
1053	_enter("c=%x", cookie->debug_id);
1054
1055	fscache_stat(&fscache_n_invalidates);
1056
1057	if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
1058		 "Trying to invalidate relinquished cookie\n"))
1059		return;
1060
1061	if ((flags & FSCACHE_INVAL_DIO_WRITE) &&
1062	    test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
1063		return;
1064
1065	spin_lock(&cookie->lock);
1066	set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
1067	fscache_update_aux(cookie, aux_data, &new_size);
1068	cookie->inval_counter++;
1069	trace_fscache_invalidate(cookie, new_size);
1070
1071	switch (cookie->state) {
1072	case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
1073	default:
1074		spin_unlock(&cookie->lock);
1075		_leave(" [no %u]", cookie->state);
1076		return;
1077
1078	case FSCACHE_COOKIE_STATE_LOOKING_UP:
1079		if (!test_and_set_bit(FSCACHE_COOKIE_DO_INVALIDATE, &cookie->flags))
1080			__fscache_begin_cookie_access(cookie, fscache_access_invalidate_cookie);
1081		fallthrough;
1082	case FSCACHE_COOKIE_STATE_CREATING:
1083		spin_unlock(&cookie->lock);
1084		_leave(" [look %x]", cookie->inval_counter);
1085		return;
1086
1087	case FSCACHE_COOKIE_STATE_ACTIVE:
1088		is_caching = fscache_begin_cookie_access(
1089			cookie, fscache_access_invalidate_cookie);
1090		if (is_caching)
1091			__fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING);
1092		spin_unlock(&cookie->lock);
1093		wake_up_cookie_state(cookie);
1094
1095		if (is_caching)
1096			fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
1097		_leave(" [inv]");
1098		return;
1099	}
1100}
1101EXPORT_SYMBOL(__fscache_invalidate);
1102
1103#ifdef CONFIG_PROC_FS
1104/*
1105 * Generate a list of extant cookies in /proc/fs/fscache/cookies
1106 */
1107static int fscache_cookies_seq_show(struct seq_file *m, void *v)
1108{
1109	struct fscache_cookie *cookie;
1110	unsigned int keylen = 0, auxlen = 0;
1111	u8 *p;
1112
1113	if (v == &fscache_cookies) {
1114		seq_puts(m,
1115			 "COOKIE   VOLUME   REF ACT ACC S FL DEF             \n"
1116			 "======== ======== === === === = == ================\n"
1117			 );
1118		return 0;
1119	}
1120
1121	cookie = list_entry(v, struct fscache_cookie, proc_link);
1122
1123	seq_printf(m,
1124		   "%08x %08x %3d %3d %3d %c %02lx",
1125		   cookie->debug_id,
1126		   cookie->volume->debug_id,
1127		   refcount_read(&cookie->ref),
1128		   atomic_read(&cookie->n_active),
1129		   atomic_read(&cookie->n_accesses),
1130		   fscache_cookie_states[cookie->state],
1131		   cookie->flags);
1132
1133	keylen = cookie->key_len;
1134	auxlen = cookie->aux_len;
1135
1136	if (keylen > 0 || auxlen > 0) {
1137		seq_puts(m, " ");
1138		p = keylen <= sizeof(cookie->inline_key) ?
1139			cookie->inline_key : cookie->key;
1140		for (; keylen > 0; keylen--)
1141			seq_printf(m, "%02x", *p++);
1142		if (auxlen > 0) {
1143			seq_puts(m, ", ");
1144			p = auxlen <= sizeof(cookie->inline_aux) ?
1145				cookie->inline_aux : cookie->aux;
1146			for (; auxlen > 0; auxlen--)
1147				seq_printf(m, "%02x", *p++);
1148		}
1149	}
1150
1151	seq_puts(m, "\n");
1152	return 0;
1153}
1154
1155static void *fscache_cookies_seq_start(struct seq_file *m, loff_t *_pos)
1156	__acquires(fscache_cookies_lock)
1157{
1158	read_lock(&fscache_cookies_lock);
1159	return seq_list_start_head(&fscache_cookies, *_pos);
1160}
1161
1162static void *fscache_cookies_seq_next(struct seq_file *m, void *v, loff_t *_pos)
1163{
1164	return seq_list_next(v, &fscache_cookies, _pos);
1165}
1166
1167static void fscache_cookies_seq_stop(struct seq_file *m, void *v)
1168	__releases(rcu)
1169{
1170	read_unlock(&fscache_cookies_lock);
1171}
1172
1173
1174const struct seq_operations fscache_cookies_seq_ops = {
1175	.start  = fscache_cookies_seq_start,
1176	.next   = fscache_cookies_seq_next,
1177	.stop   = fscache_cookies_seq_stop,
1178	.show   = fscache_cookies_seq_show,
1179};
1180#endif
1181