133965Sjdp// SPDX-License-Identifier: GPL-2.0-only
278828Sobrien/* Object lifetime handling and tracing.
333965Sjdp *
433965Sjdp * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
533965Sjdp * Written by David Howells (dhowells@redhat.com)
633965Sjdp */
733965Sjdp
833965Sjdp#include <linux/slab.h>
933965Sjdp#include "internal.h"
1033965Sjdp
1133965Sjdp/*
1233965Sjdp * Allocate an I/O request and initialise it.
1333965Sjdp */
1433965Sjdpstruct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
1533965Sjdp					     struct file *file,
1633965Sjdp					     loff_t start, size_t len,
1733965Sjdp					     enum netfs_io_origin origin)
1833965Sjdp{
1933965Sjdp	static atomic_t debug_ids;
2033965Sjdp	struct inode *inode = file ? file_inode(file) : mapping->host;
2133965Sjdp	struct netfs_inode *ctx = netfs_inode(inode);
2233965Sjdp	struct netfs_io_request *rreq;
2333965Sjdp	bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
2433965Sjdp			      origin == NETFS_DIO_READ ||
2533965Sjdp			      origin == NETFS_DIO_WRITE);
2633965Sjdp	bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
2733965Sjdp	int ret;
2833965Sjdp
2933965Sjdp	rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request),
3033965Sjdp		       GFP_KERNEL);
3133965Sjdp	if (!rreq)
3233965Sjdp		return ERR_PTR(-ENOMEM);
3333965Sjdp
3433965Sjdp	rreq->start	= start;
3533965Sjdp	rreq->len	= len;
3633965Sjdp	rreq->upper_len	= len;
3733965Sjdp	rreq->origin	= origin;
3833965Sjdp	rreq->netfs_ops	= ctx->ops;
3933965Sjdp	rreq->mapping	= mapping;
4033965Sjdp	rreq->inode	= inode;
4133965Sjdp	rreq->i_size	= i_size_read(inode);
4233965Sjdp	rreq->debug_id	= atomic_inc_return(&debug_ids);
4333965Sjdp	INIT_LIST_HEAD(&rreq->subrequests);
4433965Sjdp	INIT_WORK(&rreq->work, NULL);
4533965Sjdp	refcount_set(&rreq->ref, 1);
4633965Sjdp
4733965Sjdp	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
4833965Sjdp	if (cached)
4933965Sjdp		__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
5033965Sjdp	if (file && file->f_flags & O_NONBLOCK)
5133965Sjdp		__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
5233965Sjdp	if (rreq->netfs_ops->init_request) {
5333965Sjdp		ret = rreq->netfs_ops->init_request(rreq, file);
5433965Sjdp		if (ret < 0) {
5533965Sjdp			kfree(rreq);
5633965Sjdp			return ERR_PTR(ret);
5733965Sjdp		}
5833965Sjdp	}
5933965Sjdp
6033965Sjdp	trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
6133965Sjdp	netfs_proc_add_rreq(rreq);
6233965Sjdp	netfs_stat(&netfs_n_rh_rreq);
6333965Sjdp	return rreq;
6433965Sjdp}
6533965Sjdp
6633965Sjdpvoid netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
6733965Sjdp{
6833965Sjdp	int r;
6933965Sjdp
7033965Sjdp	__refcount_inc(&rreq->ref, &r);
7133965Sjdp	trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
7233965Sjdp}
7333965Sjdp
7433965Sjdpvoid netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
7533965Sjdp{
7633965Sjdp	struct netfs_io_subrequest *subreq;
7733965Sjdp
7833965Sjdp	while (!list_empty(&rreq->subrequests)) {
7933965Sjdp		subreq = list_first_entry(&rreq->subrequests,
8033965Sjdp					  struct netfs_io_subrequest, rreq_link);
8133965Sjdp		list_del(&subreq->rreq_link);
8233965Sjdp		netfs_put_subrequest(subreq, was_async,
8333965Sjdp				     netfs_sreq_trace_put_clear);
8433965Sjdp	}
8533965Sjdp}
8633965Sjdp
8733965Sjdpstatic void netfs_free_request(struct work_struct *work)
8833965Sjdp{
8933965Sjdp	struct netfs_io_request *rreq =
9033965Sjdp		container_of(work, struct netfs_io_request, work);
9133965Sjdp	unsigned int i;
9233965Sjdp
9333965Sjdp	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
9433965Sjdp	netfs_proc_del_rreq(rreq);
9533965Sjdp	netfs_clear_subrequests(rreq, false);
9633965Sjdp	if (rreq->netfs_ops->free_request)
9733965Sjdp		rreq->netfs_ops->free_request(rreq);
9833965Sjdp	if (rreq->cache_resources.ops)
9933965Sjdp		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
10033965Sjdp	if (rreq->direct_bv) {
10133965Sjdp		for (i = 0; i < rreq->direct_bv_count; i++) {
10233965Sjdp			if (rreq->direct_bv[i].bv_page) {
10333965Sjdp				if (rreq->direct_bv_unpin)
10433965Sjdp					unpin_user_page(rreq->direct_bv[i].bv_page);
10533965Sjdp			}
10633965Sjdp		}
10733965Sjdp		kvfree(rreq->direct_bv);
10833965Sjdp	}
10933965Sjdp	kfree_rcu(rreq, rcu);
11033965Sjdp	netfs_stat_d(&netfs_n_rh_rreq);
11133965Sjdp}
11233965Sjdp
11333965Sjdpvoid netfs_put_request(struct netfs_io_request *rreq, bool was_async,
11433965Sjdp		       enum netfs_rreq_ref_trace what)
11533965Sjdp{
11633965Sjdp	unsigned int debug_id;
11733965Sjdp	bool dead;
11833965Sjdp	int r;
11933965Sjdp
12033965Sjdp	if (rreq) {
12133965Sjdp		debug_id = rreq->debug_id;
12233965Sjdp		dead = __refcount_dec_and_test(&rreq->ref, &r);
12333965Sjdp		trace_netfs_rreq_ref(debug_id, r - 1, what);
12433965Sjdp		if (dead) {
12533965Sjdp			if (was_async) {
12633965Sjdp				rreq->work.func = netfs_free_request;
12733965Sjdp				if (!queue_work(system_unbound_wq, &rreq->work))
12833965Sjdp					BUG();
12933965Sjdp			} else {
13033965Sjdp				netfs_free_request(&rreq->work);
13133965Sjdp			}
13233965Sjdp		}
13333965Sjdp	}
13433965Sjdp}
13533965Sjdp
13633965Sjdp/*
13733965Sjdp * Allocate and partially initialise an I/O request structure.
13833965Sjdp */
13933965Sjdpstruct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
14033965Sjdp{
14133965Sjdp	struct netfs_io_subrequest *subreq;
14233965Sjdp
14333965Sjdp	subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?:
14433965Sjdp			 sizeof(struct netfs_io_subrequest),
14533965Sjdp			 GFP_KERNEL);
14633965Sjdp	if (subreq) {
14733965Sjdp		INIT_WORK(&subreq->work, NULL);
14833965Sjdp		INIT_LIST_HEAD(&subreq->rreq_link);
14933965Sjdp		refcount_set(&subreq->ref, 2);
15033965Sjdp		subreq->rreq = rreq;
15133965Sjdp		netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
15233965Sjdp		netfs_stat(&netfs_n_rh_sreq);
15333965Sjdp	}
15433965Sjdp
15533965Sjdp	return subreq;
15633965Sjdp}
15733965Sjdp
15833965Sjdpvoid netfs_get_subrequest(struct netfs_io_subrequest *subreq,
15933965Sjdp			  enum netfs_sreq_ref_trace what)
16033965Sjdp{
16133965Sjdp	int r;
16233965Sjdp
16333965Sjdp	__refcount_inc(&subreq->ref, &r);
16433965Sjdp	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
16533965Sjdp			     what);
16633965Sjdp}
16733965Sjdp
16833965Sjdpstatic void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
16933965Sjdp				  bool was_async)
17033965Sjdp{
17133965Sjdp	struct netfs_io_request *rreq = subreq->rreq;
17233965Sjdp
17333965Sjdp	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
17433965Sjdp	if (rreq->netfs_ops->free_subrequest)
17533965Sjdp		rreq->netfs_ops->free_subrequest(subreq);
17633965Sjdp	kfree(subreq);
17733965Sjdp	netfs_stat_d(&netfs_n_rh_sreq);
17833965Sjdp	netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
17933965Sjdp}
18033965Sjdp
18133965Sjdpvoid netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
18233965Sjdp			  enum netfs_sreq_ref_trace what)
18333965Sjdp{
18433965Sjdp	unsigned int debug_index = subreq->debug_index;
18533965Sjdp	unsigned int debug_id = subreq->rreq->debug_id;
18633965Sjdp	bool dead;
18733965Sjdp	int r;
18833965Sjdp
18933965Sjdp	dead = __refcount_dec_and_test(&subreq->ref, &r);
19033965Sjdp	trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
19133965Sjdp	if (dead)
19233965Sjdp		netfs_free_subrequest(subreq, was_async);
19333965Sjdp}
19433965Sjdp