1// SPDX-License-Identifier: GPL-2.0
2#include <linux/vmalloc.h>
3#include <linux/bitmap.h>
4#include "null_blk.h"
5
6#define CREATE_TRACE_POINTS
7#include "trace.h"
8
9#undef pr_fmt
10#define pr_fmt(fmt)	"null_blk: " fmt
11
12static inline sector_t mb_to_sects(unsigned long mb)
13{
14	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
15}
16
17static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
18{
19	return sect >> ilog2(dev->zone_size_sects);
20}
21
22static inline void null_lock_zone_res(struct nullb_device *dev)
23{
24	if (dev->need_zone_res_mgmt)
25		spin_lock_irq(&dev->zone_res_lock);
26}
27
28static inline void null_unlock_zone_res(struct nullb_device *dev)
29{
30	if (dev->need_zone_res_mgmt)
31		spin_unlock_irq(&dev->zone_res_lock);
32}
33
34static inline void null_init_zone_lock(struct nullb_device *dev,
35				       struct nullb_zone *zone)
36{
37	if (!dev->memory_backed)
38		spin_lock_init(&zone->spinlock);
39	else
40		mutex_init(&zone->mutex);
41}
42
43static inline void null_lock_zone(struct nullb_device *dev,
44				  struct nullb_zone *zone)
45{
46	if (!dev->memory_backed)
47		spin_lock_irq(&zone->spinlock);
48	else
49		mutex_lock(&zone->mutex);
50}
51
52static inline void null_unlock_zone(struct nullb_device *dev,
53				    struct nullb_zone *zone)
54{
55	if (!dev->memory_backed)
56		spin_unlock_irq(&zone->spinlock);
57	else
58		mutex_unlock(&zone->mutex);
59}
60
61int null_init_zoned_dev(struct nullb_device *dev,
62			struct queue_limits *lim)
63{
64	sector_t dev_capacity_sects, zone_capacity_sects;
65	struct nullb_zone *zone;
66	sector_t sector = 0;
67	unsigned int i;
68
69	if (!is_power_of_2(dev->zone_size)) {
70		pr_err("zone_size must be power-of-two\n");
71		return -EINVAL;
72	}
73	if (dev->zone_size > dev->size) {
74		pr_err("Zone size larger than device capacity\n");
75		return -EINVAL;
76	}
77
78	if (!dev->zone_capacity)
79		dev->zone_capacity = dev->zone_size;
80
81	if (dev->zone_capacity > dev->zone_size) {
82		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
83		       dev->zone_capacity, dev->zone_size);
84		return -EINVAL;
85	}
86
87	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
88	dev_capacity_sects = mb_to_sects(dev->size);
89	dev->zone_size_sects = mb_to_sects(dev->zone_size);
90	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
91		>> ilog2(dev->zone_size_sects);
92
93	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
94				    GFP_KERNEL | __GFP_ZERO);
95	if (!dev->zones)
96		return -ENOMEM;
97
98	spin_lock_init(&dev->zone_res_lock);
99
100	if (dev->zone_nr_conv >= dev->nr_zones) {
101		dev->zone_nr_conv = dev->nr_zones - 1;
102		pr_info("changed the number of conventional zones to %u",
103			dev->zone_nr_conv);
104	}
105
106	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
107	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
108		dev->zone_max_active = 0;
109		pr_info("zone_max_active limit disabled, limit >= zone count\n");
110	}
111
112	/* Max open zones has to be <= max active zones */
113	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
114		dev->zone_max_open = dev->zone_max_active;
115		pr_info("changed the maximum number of open zones to %u\n",
116			dev->nr_zones);
117	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
118		dev->zone_max_open = 0;
119		pr_info("zone_max_open limit disabled, limit >= zone count\n");
120	}
121	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
122	dev->imp_close_zone_no = dev->zone_nr_conv;
123
124	for (i = 0; i <  dev->zone_nr_conv; i++) {
125		zone = &dev->zones[i];
126
127		null_init_zone_lock(dev, zone);
128		zone->start = sector;
129		zone->len = dev->zone_size_sects;
130		zone->capacity = zone->len;
131		zone->wp = zone->start + zone->len;
132		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
133		zone->cond = BLK_ZONE_COND_NOT_WP;
134
135		sector += dev->zone_size_sects;
136	}
137
138	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
139		zone = &dev->zones[i];
140
141		null_init_zone_lock(dev, zone);
142		zone->start = zone->wp = sector;
143		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
144			zone->len = dev_capacity_sects - zone->start;
145		else
146			zone->len = dev->zone_size_sects;
147		zone->capacity =
148			min_t(sector_t, zone->len, zone_capacity_sects);
149		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
150		zone->cond = BLK_ZONE_COND_EMPTY;
151
152		sector += dev->zone_size_sects;
153	}
154
155	lim->zoned = true;
156	lim->chunk_sectors = dev->zone_size_sects;
157	lim->max_zone_append_sectors = dev->zone_size_sects;
158	lim->max_open_zones = dev->zone_max_open;
159	lim->max_active_zones = dev->zone_max_active;
160	return 0;
161}
162
163int null_register_zoned_dev(struct nullb *nullb)
164{
165	struct request_queue *q = nullb->q;
166
167	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
168	blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
169	nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
170	return blk_revalidate_disk_zones(nullb->disk, NULL);
171}
172
173void null_free_zoned_dev(struct nullb_device *dev)
174{
175	kvfree(dev->zones);
176	dev->zones = NULL;
177}
178
179int null_report_zones(struct gendisk *disk, sector_t sector,
180		unsigned int nr_zones, report_zones_cb cb, void *data)
181{
182	struct nullb *nullb = disk->private_data;
183	struct nullb_device *dev = nullb->dev;
184	unsigned int first_zone, i;
185	struct nullb_zone *zone;
186	struct blk_zone blkz;
187	int error;
188
189	first_zone = null_zone_no(dev, sector);
190	if (first_zone >= dev->nr_zones)
191		return 0;
192
193	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
194	trace_nullb_report_zones(nullb, nr_zones);
195
196	memset(&blkz, 0, sizeof(struct blk_zone));
197	zone = &dev->zones[first_zone];
198	for (i = 0; i < nr_zones; i++, zone++) {
199		/*
200		 * Stacked DM target drivers will remap the zone information by
201		 * modifying the zone information passed to the report callback.
202		 * So use a local copy to avoid corruption of the device zone
203		 * array.
204		 */
205		null_lock_zone(dev, zone);
206		blkz.start = zone->start;
207		blkz.len = zone->len;
208		blkz.wp = zone->wp;
209		blkz.type = zone->type;
210		blkz.cond = zone->cond;
211		blkz.capacity = zone->capacity;
212		null_unlock_zone(dev, zone);
213
214		error = cb(&blkz, i, data);
215		if (error)
216			return error;
217	}
218
219	return nr_zones;
220}
221
222/*
223 * This is called in the case of memory backing from null_process_cmd()
224 * with the target zone already locked.
225 */
226size_t null_zone_valid_read_len(struct nullb *nullb,
227				sector_t sector, unsigned int len)
228{
229	struct nullb_device *dev = nullb->dev;
230	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
231	unsigned int nr_sectors = len >> SECTOR_SHIFT;
232
233	/* Read must be below the write pointer position */
234	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
235	    sector + nr_sectors <= zone->wp)
236		return len;
237
238	if (sector > zone->wp)
239		return 0;
240
241	return (zone->wp - sector) << SECTOR_SHIFT;
242}
243
244static blk_status_t __null_close_zone(struct nullb_device *dev,
245				      struct nullb_zone *zone)
246{
247	switch (zone->cond) {
248	case BLK_ZONE_COND_CLOSED:
249		/* close operation on closed is not an error */
250		return BLK_STS_OK;
251	case BLK_ZONE_COND_IMP_OPEN:
252		dev->nr_zones_imp_open--;
253		break;
254	case BLK_ZONE_COND_EXP_OPEN:
255		dev->nr_zones_exp_open--;
256		break;
257	case BLK_ZONE_COND_EMPTY:
258	case BLK_ZONE_COND_FULL:
259	default:
260		return BLK_STS_IOERR;
261	}
262
263	if (zone->wp == zone->start) {
264		zone->cond = BLK_ZONE_COND_EMPTY;
265	} else {
266		zone->cond = BLK_ZONE_COND_CLOSED;
267		dev->nr_zones_closed++;
268	}
269
270	return BLK_STS_OK;
271}
272
273static void null_close_imp_open_zone(struct nullb_device *dev)
274{
275	struct nullb_zone *zone;
276	unsigned int zno, i;
277
278	zno = dev->imp_close_zone_no;
279	if (zno >= dev->nr_zones)
280		zno = dev->zone_nr_conv;
281
282	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
283		zone = &dev->zones[zno];
284		zno++;
285		if (zno >= dev->nr_zones)
286			zno = dev->zone_nr_conv;
287
288		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
289			__null_close_zone(dev, zone);
290			dev->imp_close_zone_no = zno;
291			return;
292		}
293	}
294}
295
296static blk_status_t null_check_active(struct nullb_device *dev)
297{
298	if (!dev->zone_max_active)
299		return BLK_STS_OK;
300
301	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
302			dev->nr_zones_closed < dev->zone_max_active)
303		return BLK_STS_OK;
304
305	return BLK_STS_ZONE_ACTIVE_RESOURCE;
306}
307
308static blk_status_t null_check_open(struct nullb_device *dev)
309{
310	if (!dev->zone_max_open)
311		return BLK_STS_OK;
312
313	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
314		return BLK_STS_OK;
315
316	if (dev->nr_zones_imp_open) {
317		if (null_check_active(dev) == BLK_STS_OK) {
318			null_close_imp_open_zone(dev);
319			return BLK_STS_OK;
320		}
321	}
322
323	return BLK_STS_ZONE_OPEN_RESOURCE;
324}
325
326/*
327 * This function matches the manage open zone resources function in the ZBC standard,
328 * with the addition of max active zones support (added in the ZNS standard).
329 *
330 * The function determines if a zone can transition to implicit open or explicit open,
331 * while maintaining the max open zone (and max active zone) limit(s). It may close an
332 * implicit open zone in order to make additional zone resources available.
333 *
334 * ZBC states that an implicit open zone shall be closed only if there is not
335 * room within the open limit. However, with the addition of an active limit,
336 * it is not certain that closing an implicit open zone will allow a new zone
337 * to be opened, since we might already be at the active limit capacity.
338 */
339static blk_status_t null_check_zone_resources(struct nullb_device *dev,
340					      struct nullb_zone *zone)
341{
342	blk_status_t ret;
343
344	switch (zone->cond) {
345	case BLK_ZONE_COND_EMPTY:
346		ret = null_check_active(dev);
347		if (ret != BLK_STS_OK)
348			return ret;
349		fallthrough;
350	case BLK_ZONE_COND_CLOSED:
351		return null_check_open(dev);
352	default:
353		/* Should never be called for other states */
354		WARN_ON(1);
355		return BLK_STS_IOERR;
356	}
357}
358
359static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
360				    unsigned int nr_sectors, bool append)
361{
362	struct nullb_device *dev = cmd->nq->dev;
363	unsigned int zno = null_zone_no(dev, sector);
364	struct nullb_zone *zone = &dev->zones[zno];
365	blk_status_t ret;
366
367	trace_nullb_zone_op(cmd, zno, zone->cond);
368
369	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
370		if (append)
371			return BLK_STS_IOERR;
372		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
373	}
374
375	null_lock_zone(dev, zone);
376
377	if (zone->cond == BLK_ZONE_COND_FULL ||
378	    zone->cond == BLK_ZONE_COND_READONLY ||
379	    zone->cond == BLK_ZONE_COND_OFFLINE) {
380		/* Cannot write to the zone */
381		ret = BLK_STS_IOERR;
382		goto unlock;
383	}
384
385	/*
386	 * Regular writes must be at the write pointer position.
387	 * Zone append writes are automatically issued at the write
388	 * pointer and the position returned using the request or BIO
389	 * sector.
390	 */
391	if (append) {
392		sector = zone->wp;
393		blk_mq_rq_from_pdu(cmd)->__sector = sector;
394	} else if (sector != zone->wp) {
395		ret = BLK_STS_IOERR;
396		goto unlock;
397	}
398
399	if (zone->wp + nr_sectors > zone->start + zone->capacity) {
400		ret = BLK_STS_IOERR;
401		goto unlock;
402	}
403
404	if (zone->cond == BLK_ZONE_COND_CLOSED ||
405	    zone->cond == BLK_ZONE_COND_EMPTY) {
406		null_lock_zone_res(dev);
407
408		ret = null_check_zone_resources(dev, zone);
409		if (ret != BLK_STS_OK) {
410			null_unlock_zone_res(dev);
411			goto unlock;
412		}
413		if (zone->cond == BLK_ZONE_COND_CLOSED) {
414			dev->nr_zones_closed--;
415			dev->nr_zones_imp_open++;
416		} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
417			dev->nr_zones_imp_open++;
418		}
419
420		if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
421			zone->cond = BLK_ZONE_COND_IMP_OPEN;
422
423		null_unlock_zone_res(dev);
424	}
425
426	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
427	if (ret != BLK_STS_OK)
428		goto unlock;
429
430	zone->wp += nr_sectors;
431	if (zone->wp == zone->start + zone->capacity) {
432		null_lock_zone_res(dev);
433		if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
434			dev->nr_zones_exp_open--;
435		else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
436			dev->nr_zones_imp_open--;
437		zone->cond = BLK_ZONE_COND_FULL;
438		null_unlock_zone_res(dev);
439	}
440
441	ret = BLK_STS_OK;
442
443unlock:
444	null_unlock_zone(dev, zone);
445
446	return ret;
447}
448
449static blk_status_t null_open_zone(struct nullb_device *dev,
450				   struct nullb_zone *zone)
451{
452	blk_status_t ret = BLK_STS_OK;
453
454	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
455		return BLK_STS_IOERR;
456
457	null_lock_zone_res(dev);
458
459	switch (zone->cond) {
460	case BLK_ZONE_COND_EXP_OPEN:
461		/* open operation on exp open is not an error */
462		goto unlock;
463	case BLK_ZONE_COND_EMPTY:
464		ret = null_check_zone_resources(dev, zone);
465		if (ret != BLK_STS_OK)
466			goto unlock;
467		break;
468	case BLK_ZONE_COND_IMP_OPEN:
469		dev->nr_zones_imp_open--;
470		break;
471	case BLK_ZONE_COND_CLOSED:
472		ret = null_check_zone_resources(dev, zone);
473		if (ret != BLK_STS_OK)
474			goto unlock;
475		dev->nr_zones_closed--;
476		break;
477	case BLK_ZONE_COND_FULL:
478	default:
479		ret = BLK_STS_IOERR;
480		goto unlock;
481	}
482
483	zone->cond = BLK_ZONE_COND_EXP_OPEN;
484	dev->nr_zones_exp_open++;
485
486unlock:
487	null_unlock_zone_res(dev);
488
489	return ret;
490}
491
492static blk_status_t null_close_zone(struct nullb_device *dev,
493				    struct nullb_zone *zone)
494{
495	blk_status_t ret;
496
497	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
498		return BLK_STS_IOERR;
499
500	null_lock_zone_res(dev);
501	ret = __null_close_zone(dev, zone);
502	null_unlock_zone_res(dev);
503
504	return ret;
505}
506
507static blk_status_t null_finish_zone(struct nullb_device *dev,
508				     struct nullb_zone *zone)
509{
510	blk_status_t ret = BLK_STS_OK;
511
512	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
513		return BLK_STS_IOERR;
514
515	null_lock_zone_res(dev);
516
517	switch (zone->cond) {
518	case BLK_ZONE_COND_FULL:
519		/* finish operation on full is not an error */
520		goto unlock;
521	case BLK_ZONE_COND_EMPTY:
522		ret = null_check_zone_resources(dev, zone);
523		if (ret != BLK_STS_OK)
524			goto unlock;
525		break;
526	case BLK_ZONE_COND_IMP_OPEN:
527		dev->nr_zones_imp_open--;
528		break;
529	case BLK_ZONE_COND_EXP_OPEN:
530		dev->nr_zones_exp_open--;
531		break;
532	case BLK_ZONE_COND_CLOSED:
533		ret = null_check_zone_resources(dev, zone);
534		if (ret != BLK_STS_OK)
535			goto unlock;
536		dev->nr_zones_closed--;
537		break;
538	default:
539		ret = BLK_STS_IOERR;
540		goto unlock;
541	}
542
543	zone->cond = BLK_ZONE_COND_FULL;
544	zone->wp = zone->start + zone->len;
545
546unlock:
547	null_unlock_zone_res(dev);
548
549	return ret;
550}
551
552static blk_status_t null_reset_zone(struct nullb_device *dev,
553				    struct nullb_zone *zone)
554{
555	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
556		return BLK_STS_IOERR;
557
558	null_lock_zone_res(dev);
559
560	switch (zone->cond) {
561	case BLK_ZONE_COND_EMPTY:
562		/* reset operation on empty is not an error */
563		null_unlock_zone_res(dev);
564		return BLK_STS_OK;
565	case BLK_ZONE_COND_IMP_OPEN:
566		dev->nr_zones_imp_open--;
567		break;
568	case BLK_ZONE_COND_EXP_OPEN:
569		dev->nr_zones_exp_open--;
570		break;
571	case BLK_ZONE_COND_CLOSED:
572		dev->nr_zones_closed--;
573		break;
574	case BLK_ZONE_COND_FULL:
575		break;
576	default:
577		null_unlock_zone_res(dev);
578		return BLK_STS_IOERR;
579	}
580
581	zone->cond = BLK_ZONE_COND_EMPTY;
582	zone->wp = zone->start;
583
584	null_unlock_zone_res(dev);
585
586	if (dev->memory_backed)
587		return null_handle_discard(dev, zone->start, zone->len);
588
589	return BLK_STS_OK;
590}
591
592static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
593				   sector_t sector)
594{
595	struct nullb_device *dev = cmd->nq->dev;
596	unsigned int zone_no;
597	struct nullb_zone *zone;
598	blk_status_t ret;
599	size_t i;
600
601	if (op == REQ_OP_ZONE_RESET_ALL) {
602		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
603			zone = &dev->zones[i];
604			null_lock_zone(dev, zone);
605			if (zone->cond != BLK_ZONE_COND_EMPTY &&
606			    zone->cond != BLK_ZONE_COND_READONLY &&
607			    zone->cond != BLK_ZONE_COND_OFFLINE) {
608				null_reset_zone(dev, zone);
609				trace_nullb_zone_op(cmd, i, zone->cond);
610			}
611			null_unlock_zone(dev, zone);
612		}
613		return BLK_STS_OK;
614	}
615
616	zone_no = null_zone_no(dev, sector);
617	zone = &dev->zones[zone_no];
618
619	null_lock_zone(dev, zone);
620
621	if (zone->cond == BLK_ZONE_COND_READONLY ||
622	    zone->cond == BLK_ZONE_COND_OFFLINE) {
623		ret = BLK_STS_IOERR;
624		goto unlock;
625	}
626
627	switch (op) {
628	case REQ_OP_ZONE_RESET:
629		ret = null_reset_zone(dev, zone);
630		break;
631	case REQ_OP_ZONE_OPEN:
632		ret = null_open_zone(dev, zone);
633		break;
634	case REQ_OP_ZONE_CLOSE:
635		ret = null_close_zone(dev, zone);
636		break;
637	case REQ_OP_ZONE_FINISH:
638		ret = null_finish_zone(dev, zone);
639		break;
640	default:
641		ret = BLK_STS_NOTSUPP;
642		break;
643	}
644
645	if (ret == BLK_STS_OK)
646		trace_nullb_zone_op(cmd, zone_no, zone->cond);
647
648unlock:
649	null_unlock_zone(dev, zone);
650
651	return ret;
652}
653
654blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
655				    sector_t sector, sector_t nr_sectors)
656{
657	struct nullb_device *dev;
658	struct nullb_zone *zone;
659	blk_status_t sts;
660
661	switch (op) {
662	case REQ_OP_WRITE:
663		return null_zone_write(cmd, sector, nr_sectors, false);
664	case REQ_OP_ZONE_APPEND:
665		return null_zone_write(cmd, sector, nr_sectors, true);
666	case REQ_OP_ZONE_RESET:
667	case REQ_OP_ZONE_RESET_ALL:
668	case REQ_OP_ZONE_OPEN:
669	case REQ_OP_ZONE_CLOSE:
670	case REQ_OP_ZONE_FINISH:
671		return null_zone_mgmt(cmd, op, sector);
672	default:
673		dev = cmd->nq->dev;
674		zone = &dev->zones[null_zone_no(dev, sector)];
675		if (zone->cond == BLK_ZONE_COND_OFFLINE)
676			return BLK_STS_IOERR;
677
678		null_lock_zone(dev, zone);
679		sts = null_process_cmd(cmd, op, sector, nr_sectors);
680		null_unlock_zone(dev, zone);
681		return sts;
682	}
683}
684
685/*
686 * Set a zone in the read-only or offline condition.
687 */
688static void null_set_zone_cond(struct nullb_device *dev,
689			       struct nullb_zone *zone, enum blk_zone_cond cond)
690{
691	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
692			 cond != BLK_ZONE_COND_OFFLINE))
693		return;
694
695	null_lock_zone(dev, zone);
696
697	/*
698	 * If the read-only condition is requested again to zones already in
699	 * read-only condition, restore back normal empty condition. Do the same
700	 * if the offline condition is requested for offline zones. Otherwise,
701	 * set the specified zone condition to the zones. Finish the zones
702	 * beforehand to free up zone resources.
703	 */
704	if (zone->cond == cond) {
705		zone->cond = BLK_ZONE_COND_EMPTY;
706		zone->wp = zone->start;
707		if (dev->memory_backed)
708			null_handle_discard(dev, zone->start, zone->len);
709	} else {
710		if (zone->cond != BLK_ZONE_COND_READONLY &&
711		    zone->cond != BLK_ZONE_COND_OFFLINE)
712			null_finish_zone(dev, zone);
713		zone->cond = cond;
714		zone->wp = (sector_t)-1;
715	}
716
717	null_unlock_zone(dev, zone);
718}
719
720/*
721 * Identify a zone from the sector written to configfs file. Then set zone
722 * condition to the zone.
723 */
724ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
725			size_t count, enum blk_zone_cond cond)
726{
727	unsigned long long sector;
728	unsigned int zone_no;
729	int ret;
730
731	if (!dev->zoned) {
732		pr_err("null_blk device is not zoned\n");
733		return -EINVAL;
734	}
735
736	if (!dev->zones) {
737		pr_err("null_blk device is not yet powered\n");
738		return -EINVAL;
739	}
740
741	ret = kstrtoull(page, 0, &sector);
742	if (ret < 0)
743		return ret;
744
745	zone_no = null_zone_no(dev, sector);
746	if (zone_no >= dev->nr_zones) {
747		pr_err("Sector out of range\n");
748		return -EINVAL;
749	}
750
751	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
752		pr_err("Can not change condition of conventional zones\n");
753		return -EINVAL;
754	}
755
756	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
757
758	return count;
759}
760