1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14	if (err) {
15		if (err == -EOPNOTSUPP)
16			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17		clear_bit(BIO_UPTODATE, &bio->bi_flags);
18	}
19
20	if (bio->bi_private)
21		complete(bio->bi_private);
22
23	bio_put(bio);
24}
25
26/**
27 * blkdev_issue_discard - queue a discard
28 * @bdev:	blockdev to issue discard for
29 * @sector:	start sector
30 * @nr_sects:	number of sectors to discard
31 * @gfp_mask:	memory allocation flags (for bio_alloc)
32 * @flags:	BLKDEV_IFL_* flags to control behaviour
33 *
34 * Description:
35 *    Issue a discard request for the sectors in question.
36 */
37int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39{
40	DECLARE_COMPLETION_ONSTACK(wait);
41	struct request_queue *q = bdev_get_queue(bdev);
42	int type = flags & BLKDEV_IFL_BARRIER ?
43		DISCARD_BARRIER : DISCARD_NOBARRIER;
44	unsigned int max_discard_sectors;
45	struct bio *bio;
46	int ret = 0;
47
48	if (!q)
49		return -ENXIO;
50
51	if (!blk_queue_discard(q))
52		return -EOPNOTSUPP;
53
54	/*
55	 * Ensure that max_discard_sectors is of the proper
56	 * granularity
57	 */
58	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59	if (q->limits.discard_granularity) {
60		unsigned int disc_sects = q->limits.discard_granularity >> 9;
61
62		max_discard_sectors &= ~(disc_sects - 1);
63	}
64
65	if (flags & BLKDEV_IFL_SECURE) {
66		if (!blk_queue_secdiscard(q))
67			return -EOPNOTSUPP;
68		type |= DISCARD_SECURE;
69	}
70
71	while (nr_sects && !ret) {
72		bio = bio_alloc(gfp_mask, 1);
73		if (!bio) {
74			ret = -ENOMEM;
75			break;
76		}
77
78		bio->bi_sector = sector;
79		bio->bi_end_io = blkdev_discard_end_io;
80		bio->bi_bdev = bdev;
81		if (flags & BLKDEV_IFL_WAIT)
82			bio->bi_private = &wait;
83
84		if (nr_sects > max_discard_sectors) {
85			bio->bi_size = max_discard_sectors << 9;
86			nr_sects -= max_discard_sectors;
87			sector += max_discard_sectors;
88		} else {
89			bio->bi_size = nr_sects << 9;
90			nr_sects = 0;
91		}
92
93		bio_get(bio);
94		submit_bio(type, bio);
95
96		if (flags & BLKDEV_IFL_WAIT)
97			wait_for_completion(&wait);
98
99		if (bio_flagged(bio, BIO_EOPNOTSUPP))
100			ret = -EOPNOTSUPP;
101		else if (!bio_flagged(bio, BIO_UPTODATE))
102			ret = -EIO;
103		bio_put(bio);
104	}
105
106	return ret;
107}
108EXPORT_SYMBOL(blkdev_issue_discard);
109
110struct bio_batch
111{
112	atomic_t 		done;
113	unsigned long 		flags;
114	struct completion 	*wait;
115	bio_end_io_t		*end_io;
116};
117
118static void bio_batch_end_io(struct bio *bio, int err)
119{
120	struct bio_batch *bb = bio->bi_private;
121
122	if (err) {
123		if (err == -EOPNOTSUPP)
124			set_bit(BIO_EOPNOTSUPP, &bb->flags);
125		else
126			clear_bit(BIO_UPTODATE, &bb->flags);
127	}
128	if (bb) {
129		if (bb->end_io)
130			bb->end_io(bio, err);
131		atomic_inc(&bb->done);
132		complete(bb->wait);
133	}
134	bio_put(bio);
135}
136
137/**
138 * blkdev_issue_zeroout generate number of zero filed write bios
139 * @bdev:	blockdev to issue
140 * @sector:	start sector
141 * @nr_sects:	number of sectors to write
142 * @gfp_mask:	memory allocation flags (for bio_alloc)
143 * @flags:	BLKDEV_IFL_* flags to control behaviour
144 *
145 * Description:
146 *  Generate and issue number of bios with zerofiled pages.
147 *  Send barrier at the beginning and at the end if requested. This guarantie
148 *  correct request ordering. Empty barrier allow us to avoid post queue flush.
149 */
150
151int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
152			sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
153{
154	int ret;
155	struct bio *bio;
156	struct bio_batch bb;
157	unsigned int sz, issued = 0;
158	DECLARE_COMPLETION_ONSTACK(wait);
159
160	atomic_set(&bb.done, 0);
161	bb.flags = 1 << BIO_UPTODATE;
162	bb.wait = &wait;
163	bb.end_io = NULL;
164
165	if (flags & BLKDEV_IFL_BARRIER) {
166		/* issue async barrier before the data */
167		ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
168		if (ret)
169			return ret;
170	}
171submit:
172	ret = 0;
173	while (nr_sects != 0) {
174		bio = bio_alloc(gfp_mask,
175				min(nr_sects, (sector_t)BIO_MAX_PAGES));
176		if (!bio) {
177			ret = -ENOMEM;
178			break;
179		}
180
181		bio->bi_sector = sector;
182		bio->bi_bdev   = bdev;
183		bio->bi_end_io = bio_batch_end_io;
184		if (flags & BLKDEV_IFL_WAIT)
185			bio->bi_private = &bb;
186
187		while (nr_sects != 0) {
188			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
189			if (sz == 0)
190				/* bio has maximum size possible */
191				break;
192			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
193			nr_sects -= ret >> 9;
194			sector += ret >> 9;
195			if (ret < (sz << 9))
196				break;
197		}
198		ret = 0;
199		issued++;
200		submit_bio(WRITE, bio);
201	}
202	/*
203	 * When all data bios are in flight. Send final barrier if requeted.
204	 */
205	if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
206		ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
207					flags & BLKDEV_IFL_WAIT);
208
209
210	if (flags & BLKDEV_IFL_WAIT)
211		/* Wait for bios in-flight */
212		while ( issued != atomic_read(&bb.done))
213			wait_for_completion(&wait);
214
215	if (!test_bit(BIO_UPTODATE, &bb.flags))
216		/* One of bios in the batch was completed with error.*/
217		ret = -EIO;
218
219	if (ret)
220		goto out;
221
222	if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
223		ret = -EOPNOTSUPP;
224		goto out;
225	}
226	if (nr_sects != 0)
227		goto submit;
228out:
229	return ret;
230}
231EXPORT_SYMBOL(blkdev_issue_zeroout);
232