• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/mtd/ubi/
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (���������������� ����������), Thomas Gleixner
19 */
20
21/*
22 * UBI wear-leveling sub-system.
23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only %0xFF bytes.
33 *
34 * When physical eraseblocks are returned to the WL sub-system by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL sub-system.
38 *
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
42 *
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
48 *
49 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50 * bad.
51 *
52 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
59 * RB-trees, as well as (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 *  o we don't want to move physical eraseblocks just after we have given them
69 *    to the user; instead, we first want to let users fill them up with data;
70 *
71 *  o there is a chance that the user will put the physical eraseblock very
72 *    soon, so it makes sense not to move it for some time, but wait; this is
73 *    especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87 *   erroneous - e.g., there was a read error;
88 * o the WL movement is temporarily prohibited (@wl->pq queue);
89 * o scrubbing is needed (@wl->scrub tree).
90 *
91 * Depending on the sub-state, wear-leveling entries of the used physical
92 * eraseblocks may be kept in one of those structures.
93 *
94 * Note, in this implementation, we keep a small in-RAM object for each physical
95 * eraseblock. This is surely not a scalable solution. But it appears to be good
96 * enough for moderately large flashes and it is simple. In future, one may
97 * re-work this sub-system and make it more scalable.
98 *
99 * At the moment this sub-system does not utilize the sequence number, which
100 * was introduced relatively recently. But it would be wise to do this because
101 * the sequence number of a logical eraseblock characterizes how old is it. For
102 * example, when we move a PEB with low erase counter, and we need to pick the
103 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
104 * pick target PEB with an average EC if our PEB is not very "old". This is a
105 * room for future re-works of the WL sub-system.
106 */
107
108#include <linux/slab.h>
109#include <linux/crc32.h>
110#include <linux/freezer.h>
111#include <linux/kthread.h>
112#include "ubi.h"
113
114/* Number of physical eraseblocks reserved for wear-leveling purposes */
115#define WL_RESERVED_PEBS 1
116
117/*
118 * Maximum difference between two erase counters. If this threshold is
119 * exceeded, the WL sub-system starts moving data from used physical
120 * eraseblocks with low erase counter to free physical eraseblocks with high
121 * erase counter.
122 */
123#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
124
125/*
126 * When a physical eraseblock is moved, the WL sub-system has to pick the target
127 * physical eraseblock to move to. The simplest way would be just to pick the
128 * one with the highest erase counter. But in certain workloads this could lead
129 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
130 * situation when the picked physical eraseblock is constantly erased after the
131 * data is written to it. So, we have a constant which limits the highest erase
132 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
133 * does not pick eraseblocks with erase counter greater than the lowest erase
134 * counter plus %WL_FREE_MAX_DIFF.
135 */
136#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
137
138/*
139 * Maximum number of consecutive background thread failures which is enough to
140 * switch to read-only mode.
141 */
142#define WL_MAX_FAILURES 32
143
144/**
145 * struct ubi_work - UBI work description data structure.
146 * @list: a link in the list of pending works
147 * @func: worker function
148 * @e: physical eraseblock to erase
149 * @torture: if the physical eraseblock has to be tortured
150 *
151 * The @func pointer points to the worker function. If the @cancel argument is
152 * not zero, the worker has to free the resources and exit immediately. The
153 * worker has to return zero in case of success and a negative error code in
154 * case of failure.
155 */
156struct ubi_work {
157	struct list_head list;
158	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
159	/* The below fields are only relevant to erasure works */
160	struct ubi_wl_entry *e;
161	int torture;
162};
163
164#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
165static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167				     struct rb_root *root);
168static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
169#else
170#define paranoid_check_ec(ubi, pnum, ec) 0
171#define paranoid_check_in_wl_tree(e, root)
172#define paranoid_check_in_pq(ubi, e) 0
173#endif
174
175/**
176 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177 * @e: the wear-leveling entry to add
178 * @root: the root of the tree
179 *
180 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181 * the @ubi->used and @ubi->free RB-trees.
182 */
183static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184{
185	struct rb_node **p, *parent = NULL;
186
187	p = &root->rb_node;
188	while (*p) {
189		struct ubi_wl_entry *e1;
190
191		parent = *p;
192		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193
194		if (e->ec < e1->ec)
195			p = &(*p)->rb_left;
196		else if (e->ec > e1->ec)
197			p = &(*p)->rb_right;
198		else {
199			ubi_assert(e->pnum != e1->pnum);
200			if (e->pnum < e1->pnum)
201				p = &(*p)->rb_left;
202			else
203				p = &(*p)->rb_right;
204		}
205	}
206
207	rb_link_node(&e->u.rb, parent, p);
208	rb_insert_color(&e->u.rb, root);
209}
210
211/**
212 * do_work - do one pending work.
213 * @ubi: UBI device description object
214 *
215 * This function returns zero in case of success and a negative error code in
216 * case of failure.
217 */
218static int do_work(struct ubi_device *ubi)
219{
220	int err;
221	struct ubi_work *wrk;
222
223	cond_resched();
224
225	/*
226	 * @ubi->work_sem is used to synchronize with the workers. Workers take
227	 * it in read mode, so many of them may be doing works at a time. But
228	 * the queue flush code has to be sure the whole queue of works is
229	 * done, and it takes the mutex in write mode.
230	 */
231	down_read(&ubi->work_sem);
232	spin_lock(&ubi->wl_lock);
233	if (list_empty(&ubi->works)) {
234		spin_unlock(&ubi->wl_lock);
235		up_read(&ubi->work_sem);
236		return 0;
237	}
238
239	wrk = list_entry(ubi->works.next, struct ubi_work, list);
240	list_del(&wrk->list);
241	ubi->works_count -= 1;
242	ubi_assert(ubi->works_count >= 0);
243	spin_unlock(&ubi->wl_lock);
244
245	/*
246	 * Call the worker function. Do not touch the work structure
247	 * after this call as it will have been freed or reused by that
248	 * time by the worker function.
249	 */
250	err = wrk->func(ubi, wrk, 0);
251	if (err)
252		ubi_err("work failed with error code %d", err);
253	up_read(&ubi->work_sem);
254
255	return err;
256}
257
258/**
259 * produce_free_peb - produce a free physical eraseblock.
260 * @ubi: UBI device description object
261 *
262 * This function tries to make a free PEB by means of synchronous execution of
263 * pending works. This may be needed if, for example the background thread is
264 * disabled. Returns zero in case of success and a negative error code in case
265 * of failure.
266 */
267static int produce_free_peb(struct ubi_device *ubi)
268{
269	int err;
270
271	spin_lock(&ubi->wl_lock);
272	while (!ubi->free.rb_node) {
273		spin_unlock(&ubi->wl_lock);
274
275		dbg_wl("do one work synchronously");
276		err = do_work(ubi);
277		if (err)
278			return err;
279
280		spin_lock(&ubi->wl_lock);
281	}
282	spin_unlock(&ubi->wl_lock);
283
284	return 0;
285}
286
287/**
288 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
289 * @e: the wear-leveling entry to check
290 * @root: the root of the tree
291 *
292 * This function returns non-zero if @e is in the @root RB-tree and zero if it
293 * is not.
294 */
295static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
296{
297	struct rb_node *p;
298
299	p = root->rb_node;
300	while (p) {
301		struct ubi_wl_entry *e1;
302
303		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
304
305		if (e->pnum == e1->pnum) {
306			ubi_assert(e == e1);
307			return 1;
308		}
309
310		if (e->ec < e1->ec)
311			p = p->rb_left;
312		else if (e->ec > e1->ec)
313			p = p->rb_right;
314		else {
315			ubi_assert(e->pnum != e1->pnum);
316			if (e->pnum < e1->pnum)
317				p = p->rb_left;
318			else
319				p = p->rb_right;
320		}
321	}
322
323	return 0;
324}
325
326/**
327 * prot_queue_add - add physical eraseblock to the protection queue.
328 * @ubi: UBI device description object
329 * @e: the physical eraseblock to add
330 *
331 * This function adds @e to the tail of the protection queue @ubi->pq, where
332 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
333 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
334 * be locked.
335 */
336static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
337{
338	int pq_tail = ubi->pq_head - 1;
339
340	if (pq_tail < 0)
341		pq_tail = UBI_PROT_QUEUE_LEN - 1;
342	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
343	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
344	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
345}
346
347/**
348 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
349 * @root: the RB-tree where to look for
350 * @max: highest possible erase counter
351 *
352 * This function looks for a wear leveling entry with erase counter closest to
353 * @max and less than @max.
354 */
355static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
356{
357	struct rb_node *p;
358	struct ubi_wl_entry *e;
359
360	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
361	max += e->ec;
362
363	p = root->rb_node;
364	while (p) {
365		struct ubi_wl_entry *e1;
366
367		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
368		if (e1->ec >= max)
369			p = p->rb_left;
370		else {
371			p = p->rb_right;
372			e = e1;
373		}
374	}
375
376	return e;
377}
378
379/**
380 * ubi_wl_get_peb - get a physical eraseblock.
381 * @ubi: UBI device description object
382 * @dtype: type of data which will be stored in this physical eraseblock
383 *
384 * This function returns a physical eraseblock in case of success and a
385 * negative error code in case of failure. Might sleep.
386 */
387int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
388{
389	int err, medium_ec;
390	struct ubi_wl_entry *e, *first, *last;
391
392	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
393		   dtype == UBI_UNKNOWN);
394
395retry:
396	spin_lock(&ubi->wl_lock);
397	if (!ubi->free.rb_node) {
398		if (ubi->works_count == 0) {
399			ubi_assert(list_empty(&ubi->works));
400			ubi_err("no free eraseblocks");
401			spin_unlock(&ubi->wl_lock);
402			return -ENOSPC;
403		}
404		spin_unlock(&ubi->wl_lock);
405
406		err = produce_free_peb(ubi);
407		if (err < 0)
408			return err;
409		goto retry;
410	}
411
412	switch (dtype) {
413	case UBI_LONGTERM:
414		/*
415		 * For long term data we pick a physical eraseblock with high
416		 * erase counter. But the highest erase counter we can pick is
417		 * bounded by the the lowest erase counter plus
418		 * %WL_FREE_MAX_DIFF.
419		 */
420		e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
421		break;
422	case UBI_UNKNOWN:
423		/*
424		 * For unknown data we pick a physical eraseblock with medium
425		 * erase counter. But we by no means can pick a physical
426		 * eraseblock with erase counter greater or equivalent than the
427		 * lowest erase counter plus %WL_FREE_MAX_DIFF.
428		 */
429		first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
430					u.rb);
431		last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
432
433		if (last->ec - first->ec < WL_FREE_MAX_DIFF)
434			e = rb_entry(ubi->free.rb_node,
435					struct ubi_wl_entry, u.rb);
436		else {
437			medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
438			e = find_wl_entry(&ubi->free, medium_ec);
439		}
440		break;
441	case UBI_SHORTTERM:
442		/*
443		 * For short term data we pick a physical eraseblock with the
444		 * lowest erase counter as we expect it will be erased soon.
445		 */
446		e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
447		break;
448	default:
449		BUG();
450	}
451
452	paranoid_check_in_wl_tree(e, &ubi->free);
453
454	/*
455	 * Move the physical eraseblock to the protection queue where it will
456	 * be protected from being moved for some time.
457	 */
458	rb_erase(&e->u.rb, &ubi->free);
459	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
460	prot_queue_add(ubi, e);
461	spin_unlock(&ubi->wl_lock);
462
463	err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464				   ubi->peb_size - ubi->vid_hdr_aloffset);
465	if (err) {
466		ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
467		return err;
468	}
469
470	return e->pnum;
471}
472
473/**
474 * prot_queue_del - remove a physical eraseblock from the protection queue.
475 * @ubi: UBI device description object
476 * @pnum: the physical eraseblock to remove
477 *
478 * This function deletes PEB @pnum from the protection queue and returns zero
479 * in case of success and %-ENODEV if the PEB was not found.
480 */
481static int prot_queue_del(struct ubi_device *ubi, int pnum)
482{
483	struct ubi_wl_entry *e;
484
485	e = ubi->lookuptbl[pnum];
486	if (!e)
487		return -ENODEV;
488
489	if (paranoid_check_in_pq(ubi, e))
490		return -ENODEV;
491
492	list_del(&e->u.list);
493	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
494	return 0;
495}
496
497/**
498 * sync_erase - synchronously erase a physical eraseblock.
499 * @ubi: UBI device description object
500 * @e: the the physical eraseblock to erase
501 * @torture: if the physical eraseblock has to be tortured
502 *
503 * This function returns zero in case of success and a negative error code in
504 * case of failure.
505 */
506static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
507		      int torture)
508{
509	int err;
510	struct ubi_ec_hdr *ec_hdr;
511	unsigned long long ec = e->ec;
512
513	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
514
515	err = paranoid_check_ec(ubi, e->pnum, e->ec);
516	if (err)
517		return -EINVAL;
518
519	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
520	if (!ec_hdr)
521		return -ENOMEM;
522
523	err = ubi_io_sync_erase(ubi, e->pnum, torture);
524	if (err < 0)
525		goto out_free;
526
527	ec += err;
528	if (ec > UBI_MAX_ERASECOUNTER) {
529		/*
530		 * Erase counter overflow. Upgrade UBI and use 64-bit
531		 * erase counters internally.
532		 */
533		ubi_err("erase counter overflow at PEB %d, EC %llu",
534			e->pnum, ec);
535		err = -EINVAL;
536		goto out_free;
537	}
538
539	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
540
541	ec_hdr->ec = cpu_to_be64(ec);
542
543	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
544	if (err)
545		goto out_free;
546
547	e->ec = ec;
548	spin_lock(&ubi->wl_lock);
549	if (e->ec > ubi->max_ec)
550		ubi->max_ec = e->ec;
551	spin_unlock(&ubi->wl_lock);
552
553out_free:
554	kfree(ec_hdr);
555	return err;
556}
557
558/**
559 * serve_prot_queue - check if it is time to stop protecting PEBs.
560 * @ubi: UBI device description object
561 *
562 * This function is called after each erase operation and removes PEBs from the
563 * tail of the protection queue. These PEBs have been protected for long enough
564 * and should be moved to the used tree.
565 */
566static void serve_prot_queue(struct ubi_device *ubi)
567{
568	struct ubi_wl_entry *e, *tmp;
569	int count;
570
571	/*
572	 * There may be several protected physical eraseblock to remove,
573	 * process them all.
574	 */
575repeat:
576	count = 0;
577	spin_lock(&ubi->wl_lock);
578	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
579		dbg_wl("PEB %d EC %d protection over, move to used tree",
580			e->pnum, e->ec);
581
582		list_del(&e->u.list);
583		wl_tree_add(e, &ubi->used);
584		if (count++ > 32) {
585			/*
586			 * Let's be nice and avoid holding the spinlock for
587			 * too long.
588			 */
589			spin_unlock(&ubi->wl_lock);
590			cond_resched();
591			goto repeat;
592		}
593	}
594
595	ubi->pq_head += 1;
596	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
597		ubi->pq_head = 0;
598	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
599	spin_unlock(&ubi->wl_lock);
600}
601
602/**
603 * schedule_ubi_work - schedule a work.
604 * @ubi: UBI device description object
605 * @wrk: the work to schedule
606 *
607 * This function adds a work defined by @wrk to the tail of the pending works
608 * list.
609 */
610static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
611{
612	spin_lock(&ubi->wl_lock);
613	list_add_tail(&wrk->list, &ubi->works);
614	ubi_assert(ubi->works_count >= 0);
615	ubi->works_count += 1;
616	if (ubi->thread_enabled)
617		wake_up_process(ubi->bgt_thread);
618	spin_unlock(&ubi->wl_lock);
619}
620
621static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
622			int cancel);
623
624/**
625 * schedule_erase - schedule an erase work.
626 * @ubi: UBI device description object
627 * @e: the WL entry of the physical eraseblock to erase
628 * @torture: if the physical eraseblock has to be tortured
629 *
630 * This function returns zero in case of success and a %-ENOMEM in case of
631 * failure.
632 */
633static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
634			  int torture)
635{
636	struct ubi_work *wl_wrk;
637
638	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
639	       e->pnum, e->ec, torture);
640
641	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
642	if (!wl_wrk)
643		return -ENOMEM;
644
645	wl_wrk->func = &erase_worker;
646	wl_wrk->e = e;
647	wl_wrk->torture = torture;
648
649	schedule_ubi_work(ubi, wl_wrk);
650	return 0;
651}
652
653/**
654 * wear_leveling_worker - wear-leveling worker function.
655 * @ubi: UBI device description object
656 * @wrk: the work object
657 * @cancel: non-zero if the worker has to free memory and exit
658 *
659 * This function copies a more worn out physical eraseblock to a less worn out
660 * one. Returns zero in case of success and a negative error code in case of
661 * failure.
662 */
663static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
664				int cancel)
665{
666	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
667	int vol_id = -1, uninitialized_var(lnum);
668	struct ubi_wl_entry *e1, *e2;
669	struct ubi_vid_hdr *vid_hdr;
670
671	kfree(wrk);
672	if (cancel)
673		return 0;
674
675	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
676	if (!vid_hdr)
677		return -ENOMEM;
678
679	mutex_lock(&ubi->move_mutex);
680	spin_lock(&ubi->wl_lock);
681	ubi_assert(!ubi->move_from && !ubi->move_to);
682	ubi_assert(!ubi->move_to_put);
683
684	if (!ubi->free.rb_node ||
685	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
686		/*
687		 * No free physical eraseblocks? Well, they must be waiting in
688		 * the queue to be erased. Cancel movement - it will be
689		 * triggered again when a free physical eraseblock appears.
690		 *
691		 * No used physical eraseblocks? They must be temporarily
692		 * protected from being moved. They will be moved to the
693		 * @ubi->used tree later and the wear-leveling will be
694		 * triggered again.
695		 */
696		dbg_wl("cancel WL, a list is empty: free %d, used %d",
697		       !ubi->free.rb_node, !ubi->used.rb_node);
698		goto out_cancel;
699	}
700
701	if (!ubi->scrub.rb_node) {
702		/*
703		 * Now pick the least worn-out used physical eraseblock and a
704		 * highly worn-out free physical eraseblock. If the erase
705		 * counters differ much enough, start wear-leveling.
706		 */
707		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
708		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
709
710		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
711			dbg_wl("no WL needed: min used EC %d, max free EC %d",
712			       e1->ec, e2->ec);
713			goto out_cancel;
714		}
715		paranoid_check_in_wl_tree(e1, &ubi->used);
716		rb_erase(&e1->u.rb, &ubi->used);
717		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
718		       e1->pnum, e1->ec, e2->pnum, e2->ec);
719	} else {
720		/* Perform scrubbing */
721		scrubbing = 1;
722		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
723		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
724		paranoid_check_in_wl_tree(e1, &ubi->scrub);
725		rb_erase(&e1->u.rb, &ubi->scrub);
726		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
727	}
728
729	paranoid_check_in_wl_tree(e2, &ubi->free);
730	rb_erase(&e2->u.rb, &ubi->free);
731	ubi->move_from = e1;
732	ubi->move_to = e2;
733	spin_unlock(&ubi->wl_lock);
734
735	/*
736	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
737	 * We so far do not know which logical eraseblock our physical
738	 * eraseblock (@e1) belongs to. We have to read the volume identifier
739	 * header first.
740	 *
741	 * Note, we are protected from this PEB being unmapped and erased. The
742	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
743	 * which is being moved was unmapped.
744	 */
745
746	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747	if (err && err != UBI_IO_BITFLIPS) {
748		if (err == UBI_IO_PEB_FREE) {
749			/*
750			 * We are trying to move PEB without a VID header. UBI
751			 * always write VID headers shortly after the PEB was
752			 * given, so we have a situation when it has not yet
753			 * had a chance to write it, because it was preempted.
754			 * So add this PEB to the protection queue so far,
755			 * because presumably more data will be written there
756			 * (including the missing VID header), and then we'll
757			 * move it.
758			 */
759			dbg_wl("PEB %d has no VID header", e1->pnum);
760			protect = 1;
761			goto out_not_moved;
762		}
763
764		ubi_err("error %d while reading VID header from PEB %d",
765			err, e1->pnum);
766		goto out_error;
767	}
768
769	vol_id = be32_to_cpu(vid_hdr->vol_id);
770	lnum = be32_to_cpu(vid_hdr->lnum);
771
772	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
773	if (err) {
774		if (err == MOVE_CANCEL_RACE) {
775			/*
776			 * The LEB has not been moved because the volume is
777			 * being deleted or the PEB has been put meanwhile. We
778			 * should prevent this PEB from being selected for
779			 * wear-leveling movement again, so put it to the
780			 * protection queue.
781			 */
782			protect = 1;
783			goto out_not_moved;
784		}
785
786		if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
787		    err == MOVE_TARGET_RD_ERR) {
788			/*
789			 * Target PEB had bit-flips or write error - torture it.
790			 */
791			torture = 1;
792			goto out_not_moved;
793		}
794
795		if (err == MOVE_SOURCE_RD_ERR) {
796			/*
797			 * An error happened while reading the source PEB. Do
798			 * not switch to R/O mode in this case, and give the
799			 * upper layers a possibility to recover from this,
800			 * e.g. by unmapping corresponding LEB. Instead, just
801			 * put this PEB to the @ubi->erroneous list to prevent
802			 * UBI from trying to move it over and over again.
803			 */
804			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
805				ubi_err("too many erroneous eraseblocks (%d)",
806					ubi->erroneous_peb_count);
807				goto out_error;
808			}
809			erroneous = 1;
810			goto out_not_moved;
811		}
812
813		if (err < 0)
814			goto out_error;
815
816		ubi_assert(0);
817	}
818
819	/* The PEB has been successfully moved */
820	if (scrubbing)
821		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
822			e1->pnum, vol_id, lnum, e2->pnum);
823	ubi_free_vid_hdr(ubi, vid_hdr);
824
825	spin_lock(&ubi->wl_lock);
826	if (!ubi->move_to_put) {
827		wl_tree_add(e2, &ubi->used);
828		e2 = NULL;
829	}
830	ubi->move_from = ubi->move_to = NULL;
831	ubi->move_to_put = ubi->wl_scheduled = 0;
832	spin_unlock(&ubi->wl_lock);
833
834	err = schedule_erase(ubi, e1, 0);
835	if (err) {
836		kmem_cache_free(ubi_wl_entry_slab, e1);
837		if (e2)
838			kmem_cache_free(ubi_wl_entry_slab, e2);
839		goto out_ro;
840	}
841
842	if (e2) {
843		/*
844		 * Well, the target PEB was put meanwhile, schedule it for
845		 * erasure.
846		 */
847		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
848		       e2->pnum, vol_id, lnum);
849		err = schedule_erase(ubi, e2, 0);
850		if (err) {
851			kmem_cache_free(ubi_wl_entry_slab, e2);
852			goto out_ro;
853		}
854	}
855
856	dbg_wl("done");
857	mutex_unlock(&ubi->move_mutex);
858	return 0;
859
860	/*
861	 * For some reasons the LEB was not moved, might be an error, might be
862	 * something else. @e1 was not changed, so return it back. @e2 might
863	 * have been changed, schedule it for erasure.
864	 */
865out_not_moved:
866	if (vol_id != -1)
867		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
868		       e1->pnum, vol_id, lnum, e2->pnum, err);
869	else
870		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
871		       e1->pnum, e2->pnum, err);
872	spin_lock(&ubi->wl_lock);
873	if (protect)
874		prot_queue_add(ubi, e1);
875	else if (erroneous) {
876		wl_tree_add(e1, &ubi->erroneous);
877		ubi->erroneous_peb_count += 1;
878	} else if (scrubbing)
879		wl_tree_add(e1, &ubi->scrub);
880	else
881		wl_tree_add(e1, &ubi->used);
882	ubi_assert(!ubi->move_to_put);
883	ubi->move_from = ubi->move_to = NULL;
884	ubi->wl_scheduled = 0;
885	spin_unlock(&ubi->wl_lock);
886
887	ubi_free_vid_hdr(ubi, vid_hdr);
888	err = schedule_erase(ubi, e2, torture);
889	if (err) {
890		kmem_cache_free(ubi_wl_entry_slab, e2);
891		goto out_ro;
892	}
893	mutex_unlock(&ubi->move_mutex);
894	return 0;
895
896out_error:
897	if (vol_id != -1)
898		ubi_err("error %d while moving PEB %d to PEB %d",
899			err, e1->pnum, e2->pnum);
900	else
901		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
902			err, e1->pnum, vol_id, lnum, e2->pnum);
903	spin_lock(&ubi->wl_lock);
904	ubi->move_from = ubi->move_to = NULL;
905	ubi->move_to_put = ubi->wl_scheduled = 0;
906	spin_unlock(&ubi->wl_lock);
907
908	ubi_free_vid_hdr(ubi, vid_hdr);
909	kmem_cache_free(ubi_wl_entry_slab, e1);
910	kmem_cache_free(ubi_wl_entry_slab, e2);
911
912out_ro:
913	ubi_ro_mode(ubi);
914	mutex_unlock(&ubi->move_mutex);
915	ubi_assert(err != 0);
916	return err < 0 ? err : -EIO;
917
918out_cancel:
919	ubi->wl_scheduled = 0;
920	spin_unlock(&ubi->wl_lock);
921	mutex_unlock(&ubi->move_mutex);
922	ubi_free_vid_hdr(ubi, vid_hdr);
923	return 0;
924}
925
926/**
927 * ensure_wear_leveling - schedule wear-leveling if it is needed.
928 * @ubi: UBI device description object
929 *
930 * This function checks if it is time to start wear-leveling and schedules it
931 * if yes. This function returns zero in case of success and a negative error
932 * code in case of failure.
933 */
934static int ensure_wear_leveling(struct ubi_device *ubi)
935{
936	int err = 0;
937	struct ubi_wl_entry *e1;
938	struct ubi_wl_entry *e2;
939	struct ubi_work *wrk;
940
941	spin_lock(&ubi->wl_lock);
942	if (ubi->wl_scheduled)
943		/* Wear-leveling is already in the work queue */
944		goto out_unlock;
945
946	/*
947	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
948	 * the WL worker has to be scheduled anyway.
949	 */
950	if (!ubi->scrub.rb_node) {
951		if (!ubi->used.rb_node || !ubi->free.rb_node)
952			/* No physical eraseblocks - no deal */
953			goto out_unlock;
954
955		/*
956		 * We schedule wear-leveling only if the difference between the
957		 * lowest erase counter of used physical eraseblocks and a high
958		 * erase counter of free physical eraseblocks is greater than
959		 * %UBI_WL_THRESHOLD.
960		 */
961		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
962		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
963
964		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
965			goto out_unlock;
966		dbg_wl("schedule wear-leveling");
967	} else
968		dbg_wl("schedule scrubbing");
969
970	ubi->wl_scheduled = 1;
971	spin_unlock(&ubi->wl_lock);
972
973	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
974	if (!wrk) {
975		err = -ENOMEM;
976		goto out_cancel;
977	}
978
979	wrk->func = &wear_leveling_worker;
980	schedule_ubi_work(ubi, wrk);
981	return err;
982
983out_cancel:
984	spin_lock(&ubi->wl_lock);
985	ubi->wl_scheduled = 0;
986out_unlock:
987	spin_unlock(&ubi->wl_lock);
988	return err;
989}
990
991/**
992 * erase_worker - physical eraseblock erase worker function.
993 * @ubi: UBI device description object
994 * @wl_wrk: the work object
995 * @cancel: non-zero if the worker has to free memory and exit
996 *
997 * This function erases a physical eraseblock and perform torture testing if
998 * needed. It also takes care about marking the physical eraseblock bad if
999 * needed. Returns zero in case of success and a negative error code in case of
1000 * failure.
1001 */
1002static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1003			int cancel)
1004{
1005	struct ubi_wl_entry *e = wl_wrk->e;
1006	int pnum = e->pnum, err, need;
1007
1008	if (cancel) {
1009		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1010		kfree(wl_wrk);
1011		kmem_cache_free(ubi_wl_entry_slab, e);
1012		return 0;
1013	}
1014
1015	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1016
1017	err = sync_erase(ubi, e, wl_wrk->torture);
1018	if (!err) {
1019		/* Fine, we've erased it successfully */
1020		kfree(wl_wrk);
1021
1022		spin_lock(&ubi->wl_lock);
1023		wl_tree_add(e, &ubi->free);
1024		spin_unlock(&ubi->wl_lock);
1025
1026		/*
1027		 * One more erase operation has happened, take care about
1028		 * protected physical eraseblocks.
1029		 */
1030		serve_prot_queue(ubi);
1031
1032		/* And take care about wear-leveling */
1033		err = ensure_wear_leveling(ubi);
1034		return err;
1035	}
1036
1037	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1038	kfree(wl_wrk);
1039	kmem_cache_free(ubi_wl_entry_slab, e);
1040
1041	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1042	    err == -EBUSY) {
1043		int err1;
1044
1045		/* Re-schedule the LEB for erasure */
1046		err1 = schedule_erase(ubi, e, 0);
1047		if (err1) {
1048			err = err1;
1049			goto out_ro;
1050		}
1051		return err;
1052	} else if (err != -EIO) {
1053		/*
1054		 * If this is not %-EIO, we have no idea what to do. Scheduling
1055		 * this physical eraseblock for erasure again would cause
1056		 * errors again and again. Well, lets switch to R/O mode.
1057		 */
1058		goto out_ro;
1059	}
1060
1061	/* It is %-EIO, the PEB went bad */
1062
1063	if (!ubi->bad_allowed) {
1064		ubi_err("bad physical eraseblock %d detected", pnum);
1065		goto out_ro;
1066	}
1067
1068	spin_lock(&ubi->volumes_lock);
1069	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1070	if (need > 0) {
1071		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1072		ubi->avail_pebs -= need;
1073		ubi->rsvd_pebs += need;
1074		ubi->beb_rsvd_pebs += need;
1075		if (need > 0)
1076			ubi_msg("reserve more %d PEBs", need);
1077	}
1078
1079	if (ubi->beb_rsvd_pebs == 0) {
1080		spin_unlock(&ubi->volumes_lock);
1081		ubi_err("no reserved physical eraseblocks");
1082		goto out_ro;
1083	}
1084	spin_unlock(&ubi->volumes_lock);
1085
1086	ubi_msg("mark PEB %d as bad", pnum);
1087	err = ubi_io_mark_bad(ubi, pnum);
1088	if (err)
1089		goto out_ro;
1090
1091	spin_lock(&ubi->volumes_lock);
1092	ubi->beb_rsvd_pebs -= 1;
1093	ubi->bad_peb_count += 1;
1094	ubi->good_peb_count -= 1;
1095	ubi_calculate_reserved(ubi);
1096	if (ubi->beb_rsvd_pebs)
1097		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1098	else
1099		ubi_warn("last PEB from the reserved pool was used");
1100	spin_unlock(&ubi->volumes_lock);
1101
1102	return err;
1103
1104out_ro:
1105	ubi_ro_mode(ubi);
1106	return err;
1107}
1108
1109/**
1110 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1111 * @ubi: UBI device description object
1112 * @pnum: physical eraseblock to return
1113 * @torture: if this physical eraseblock has to be tortured
1114 *
1115 * This function is called to return physical eraseblock @pnum to the pool of
1116 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1117 * occurred to this @pnum and it has to be tested. This function returns zero
1118 * in case of success, and a negative error code in case of failure.
1119 */
1120int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1121{
1122	int err;
1123	struct ubi_wl_entry *e;
1124
1125	dbg_wl("PEB %d", pnum);
1126	ubi_assert(pnum >= 0);
1127	ubi_assert(pnum < ubi->peb_count);
1128
1129retry:
1130	spin_lock(&ubi->wl_lock);
1131	e = ubi->lookuptbl[pnum];
1132	if (e == ubi->move_from) {
1133		/*
1134		 * User is putting the physical eraseblock which was selected to
1135		 * be moved. It will be scheduled for erasure in the
1136		 * wear-leveling worker.
1137		 */
1138		dbg_wl("PEB %d is being moved, wait", pnum);
1139		spin_unlock(&ubi->wl_lock);
1140
1141		/* Wait for the WL worker by taking the @ubi->move_mutex */
1142		mutex_lock(&ubi->move_mutex);
1143		mutex_unlock(&ubi->move_mutex);
1144		goto retry;
1145	} else if (e == ubi->move_to) {
1146		/*
1147		 * User is putting the physical eraseblock which was selected
1148		 * as the target the data is moved to. It may happen if the EBA
1149		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1150		 * but the WL sub-system has not put the PEB to the "used" tree
1151		 * yet, but it is about to do this. So we just set a flag which
1152		 * will tell the WL worker that the PEB is not needed anymore
1153		 * and should be scheduled for erasure.
1154		 */
1155		dbg_wl("PEB %d is the target of data moving", pnum);
1156		ubi_assert(!ubi->move_to_put);
1157		ubi->move_to_put = 1;
1158		spin_unlock(&ubi->wl_lock);
1159		return 0;
1160	} else {
1161		if (in_wl_tree(e, &ubi->used)) {
1162			paranoid_check_in_wl_tree(e, &ubi->used);
1163			rb_erase(&e->u.rb, &ubi->used);
1164		} else if (in_wl_tree(e, &ubi->scrub)) {
1165			paranoid_check_in_wl_tree(e, &ubi->scrub);
1166			rb_erase(&e->u.rb, &ubi->scrub);
1167		} else if (in_wl_tree(e, &ubi->erroneous)) {
1168			paranoid_check_in_wl_tree(e, &ubi->erroneous);
1169			rb_erase(&e->u.rb, &ubi->erroneous);
1170			ubi->erroneous_peb_count -= 1;
1171			ubi_assert(ubi->erroneous_peb_count >= 0);
1172			/* Erroneous PEBs should be tortured */
1173			torture = 1;
1174		} else {
1175			err = prot_queue_del(ubi, e->pnum);
1176			if (err) {
1177				ubi_err("PEB %d not found", pnum);
1178				ubi_ro_mode(ubi);
1179				spin_unlock(&ubi->wl_lock);
1180				return err;
1181			}
1182		}
1183	}
1184	spin_unlock(&ubi->wl_lock);
1185
1186	err = schedule_erase(ubi, e, torture);
1187	if (err) {
1188		spin_lock(&ubi->wl_lock);
1189		wl_tree_add(e, &ubi->used);
1190		spin_unlock(&ubi->wl_lock);
1191	}
1192
1193	return err;
1194}
1195
1196/**
1197 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1198 * @ubi: UBI device description object
1199 * @pnum: the physical eraseblock to schedule
1200 *
1201 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1202 * needs scrubbing. This function schedules a physical eraseblock for
1203 * scrubbing which is done in background. This function returns zero in case of
1204 * success and a negative error code in case of failure.
1205 */
1206int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1207{
1208	struct ubi_wl_entry *e;
1209
1210	dbg_msg("schedule PEB %d for scrubbing", pnum);
1211
1212retry:
1213	spin_lock(&ubi->wl_lock);
1214	e = ubi->lookuptbl[pnum];
1215	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1216				   in_wl_tree(e, &ubi->erroneous)) {
1217		spin_unlock(&ubi->wl_lock);
1218		return 0;
1219	}
1220
1221	if (e == ubi->move_to) {
1222		/*
1223		 * This physical eraseblock was used to move data to. The data
1224		 * was moved but the PEB was not yet inserted to the proper
1225		 * tree. We should just wait a little and let the WL worker
1226		 * proceed.
1227		 */
1228		spin_unlock(&ubi->wl_lock);
1229		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1230		yield();
1231		goto retry;
1232	}
1233
1234	if (in_wl_tree(e, &ubi->used)) {
1235		paranoid_check_in_wl_tree(e, &ubi->used);
1236		rb_erase(&e->u.rb, &ubi->used);
1237	} else {
1238		int err;
1239
1240		err = prot_queue_del(ubi, e->pnum);
1241		if (err) {
1242			ubi_err("PEB %d not found", pnum);
1243			ubi_ro_mode(ubi);
1244			spin_unlock(&ubi->wl_lock);
1245			return err;
1246		}
1247	}
1248
1249	wl_tree_add(e, &ubi->scrub);
1250	spin_unlock(&ubi->wl_lock);
1251
1252	/*
1253	 * Technically scrubbing is the same as wear-leveling, so it is done
1254	 * by the WL worker.
1255	 */
1256	return ensure_wear_leveling(ubi);
1257}
1258
1259/**
1260 * ubi_wl_flush - flush all pending works.
1261 * @ubi: UBI device description object
1262 *
1263 * This function returns zero in case of success and a negative error code in
1264 * case of failure.
1265 */
1266int ubi_wl_flush(struct ubi_device *ubi)
1267{
1268	int err;
1269
1270	/*
1271	 * Erase while the pending works queue is not empty, but not more than
1272	 * the number of currently pending works.
1273	 */
1274	dbg_wl("flush (%d pending works)", ubi->works_count);
1275	while (ubi->works_count) {
1276		err = do_work(ubi);
1277		if (err)
1278			return err;
1279	}
1280
1281	/*
1282	 * Make sure all the works which have been done in parallel are
1283	 * finished.
1284	 */
1285	down_write(&ubi->work_sem);
1286	up_write(&ubi->work_sem);
1287
1288	/*
1289	 * And in case last was the WL worker and it canceled the LEB
1290	 * movement, flush again.
1291	 */
1292	while (ubi->works_count) {
1293		dbg_wl("flush more (%d pending works)", ubi->works_count);
1294		err = do_work(ubi);
1295		if (err)
1296			return err;
1297	}
1298
1299	return 0;
1300}
1301
1302/**
1303 * tree_destroy - destroy an RB-tree.
1304 * @root: the root of the tree to destroy
1305 */
1306static void tree_destroy(struct rb_root *root)
1307{
1308	struct rb_node *rb;
1309	struct ubi_wl_entry *e;
1310
1311	rb = root->rb_node;
1312	while (rb) {
1313		if (rb->rb_left)
1314			rb = rb->rb_left;
1315		else if (rb->rb_right)
1316			rb = rb->rb_right;
1317		else {
1318			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1319
1320			rb = rb_parent(rb);
1321			if (rb) {
1322				if (rb->rb_left == &e->u.rb)
1323					rb->rb_left = NULL;
1324				else
1325					rb->rb_right = NULL;
1326			}
1327
1328			kmem_cache_free(ubi_wl_entry_slab, e);
1329		}
1330	}
1331}
1332
1333/**
1334 * ubi_thread - UBI background thread.
1335 * @u: the UBI device description object pointer
1336 */
1337int ubi_thread(void *u)
1338{
1339	int failures = 0;
1340	struct ubi_device *ubi = u;
1341
1342	ubi_msg("background thread \"%s\" started, PID %d",
1343		ubi->bgt_name, task_pid_nr(current));
1344
1345	set_freezable();
1346	for (;;) {
1347		int err;
1348
1349		if (kthread_should_stop())
1350			break;
1351
1352		if (try_to_freeze())
1353			continue;
1354
1355		spin_lock(&ubi->wl_lock);
1356		if (list_empty(&ubi->works) || ubi->ro_mode ||
1357			       !ubi->thread_enabled) {
1358			set_current_state(TASK_INTERRUPTIBLE);
1359			spin_unlock(&ubi->wl_lock);
1360			schedule();
1361			continue;
1362		}
1363		spin_unlock(&ubi->wl_lock);
1364
1365		err = do_work(ubi);
1366		if (err) {
1367			ubi_err("%s: work failed with error code %d",
1368				ubi->bgt_name, err);
1369			if (failures++ > WL_MAX_FAILURES) {
1370				/*
1371				 * Too many failures, disable the thread and
1372				 * switch to read-only mode.
1373				 */
1374				ubi_msg("%s: %d consecutive failures",
1375					ubi->bgt_name, WL_MAX_FAILURES);
1376				ubi_ro_mode(ubi);
1377				ubi->thread_enabled = 0;
1378				continue;
1379			}
1380		} else
1381			failures = 0;
1382
1383		cond_resched();
1384	}
1385
1386	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1387	return 0;
1388}
1389
1390/**
1391 * cancel_pending - cancel all pending works.
1392 * @ubi: UBI device description object
1393 */
1394static void cancel_pending(struct ubi_device *ubi)
1395{
1396	while (!list_empty(&ubi->works)) {
1397		struct ubi_work *wrk;
1398
1399		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1400		list_del(&wrk->list);
1401		wrk->func(ubi, wrk, 1);
1402		ubi->works_count -= 1;
1403		ubi_assert(ubi->works_count >= 0);
1404	}
1405}
1406
1407/**
1408 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1409 * @ubi: UBI device description object
1410 * @si: scanning information
1411 *
1412 * This function returns zero in case of success, and a negative error code in
1413 * case of failure.
1414 */
1415int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1416{
1417	int err, i;
1418	struct rb_node *rb1, *rb2;
1419	struct ubi_scan_volume *sv;
1420	struct ubi_scan_leb *seb, *tmp;
1421	struct ubi_wl_entry *e;
1422
1423	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1424	spin_lock_init(&ubi->wl_lock);
1425	mutex_init(&ubi->move_mutex);
1426	init_rwsem(&ubi->work_sem);
1427	ubi->max_ec = si->max_ec;
1428	INIT_LIST_HEAD(&ubi->works);
1429
1430	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1431
1432	err = -ENOMEM;
1433	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1434	if (!ubi->lookuptbl)
1435		return err;
1436
1437	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1438		INIT_LIST_HEAD(&ubi->pq[i]);
1439	ubi->pq_head = 0;
1440
1441	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1442		cond_resched();
1443
1444		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1445		if (!e)
1446			goto out_free;
1447
1448		e->pnum = seb->pnum;
1449		e->ec = seb->ec;
1450		ubi->lookuptbl[e->pnum] = e;
1451		if (schedule_erase(ubi, e, 0)) {
1452			kmem_cache_free(ubi_wl_entry_slab, e);
1453			goto out_free;
1454		}
1455	}
1456
1457	list_for_each_entry(seb, &si->free, u.list) {
1458		cond_resched();
1459
1460		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1461		if (!e)
1462			goto out_free;
1463
1464		e->pnum = seb->pnum;
1465		e->ec = seb->ec;
1466		ubi_assert(e->ec >= 0);
1467		wl_tree_add(e, &ubi->free);
1468		ubi->lookuptbl[e->pnum] = e;
1469	}
1470
1471	list_for_each_entry(seb, &si->corr, u.list) {
1472		cond_resched();
1473
1474		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1475		if (!e)
1476			goto out_free;
1477
1478		e->pnum = seb->pnum;
1479		e->ec = seb->ec;
1480		ubi->lookuptbl[e->pnum] = e;
1481		if (schedule_erase(ubi, e, 0)) {
1482			kmem_cache_free(ubi_wl_entry_slab, e);
1483			goto out_free;
1484		}
1485	}
1486
1487	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1488		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1489			cond_resched();
1490
1491			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1492			if (!e)
1493				goto out_free;
1494
1495			e->pnum = seb->pnum;
1496			e->ec = seb->ec;
1497			ubi->lookuptbl[e->pnum] = e;
1498			if (!seb->scrub) {
1499				dbg_wl("add PEB %d EC %d to the used tree",
1500				       e->pnum, e->ec);
1501				wl_tree_add(e, &ubi->used);
1502			} else {
1503				dbg_wl("add PEB %d EC %d to the scrub tree",
1504				       e->pnum, e->ec);
1505				wl_tree_add(e, &ubi->scrub);
1506			}
1507		}
1508	}
1509
1510	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1511		ubi_err("no enough physical eraseblocks (%d, need %d)",
1512			ubi->avail_pebs, WL_RESERVED_PEBS);
1513		goto out_free;
1514	}
1515	ubi->avail_pebs -= WL_RESERVED_PEBS;
1516	ubi->rsvd_pebs += WL_RESERVED_PEBS;
1517
1518	/* Schedule wear-leveling if needed */
1519	err = ensure_wear_leveling(ubi);
1520	if (err)
1521		goto out_free;
1522
1523	return 0;
1524
1525out_free:
1526	cancel_pending(ubi);
1527	tree_destroy(&ubi->used);
1528	tree_destroy(&ubi->free);
1529	tree_destroy(&ubi->scrub);
1530	kfree(ubi->lookuptbl);
1531	return err;
1532}
1533
1534/**
1535 * protection_queue_destroy - destroy the protection queue.
1536 * @ubi: UBI device description object
1537 */
1538static void protection_queue_destroy(struct ubi_device *ubi)
1539{
1540	int i;
1541	struct ubi_wl_entry *e, *tmp;
1542
1543	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1544		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1545			list_del(&e->u.list);
1546			kmem_cache_free(ubi_wl_entry_slab, e);
1547		}
1548	}
1549}
1550
1551/**
1552 * ubi_wl_close - close the wear-leveling sub-system.
1553 * @ubi: UBI device description object
1554 */
1555void ubi_wl_close(struct ubi_device *ubi)
1556{
1557	dbg_wl("close the WL sub-system");
1558	cancel_pending(ubi);
1559	protection_queue_destroy(ubi);
1560	tree_destroy(&ubi->used);
1561	tree_destroy(&ubi->erroneous);
1562	tree_destroy(&ubi->free);
1563	tree_destroy(&ubi->scrub);
1564	kfree(ubi->lookuptbl);
1565}
1566
1567#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1568
1569/**
1570 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1571 * @ubi: UBI device description object
1572 * @pnum: the physical eraseblock number to check
1573 * @ec: the erase counter to check
1574 *
1575 * This function returns zero if the erase counter of physical eraseblock @pnum
1576 * is equivalent to @ec, and a negative error code if not or if an error occurred.
1577 */
1578static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1579{
1580	int err;
1581	long long read_ec;
1582	struct ubi_ec_hdr *ec_hdr;
1583
1584	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1585	if (!ec_hdr)
1586		return -ENOMEM;
1587
1588	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1589	if (err && err != UBI_IO_BITFLIPS) {
1590		/* The header does not have to exist */
1591		err = 0;
1592		goto out_free;
1593	}
1594
1595	read_ec = be64_to_cpu(ec_hdr->ec);
1596	if (ec != read_ec) {
1597		ubi_err("paranoid check failed for PEB %d", pnum);
1598		ubi_err("read EC is %lld, should be %d", read_ec, ec);
1599		ubi_dbg_dump_stack();
1600		err = 1;
1601	} else
1602		err = 0;
1603
1604out_free:
1605	kfree(ec_hdr);
1606	return err;
1607}
1608
1609/**
1610 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1611 * @e: the wear-leveling entry to check
1612 * @root: the root of the tree
1613 *
1614 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1615 * is not.
1616 */
1617static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1618				     struct rb_root *root)
1619{
1620	if (in_wl_tree(e, root))
1621		return 0;
1622
1623	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1624		e->pnum, e->ec, root);
1625	ubi_dbg_dump_stack();
1626	return -EINVAL;
1627}
1628
1629/**
1630 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1631 *                        queue.
1632 * @ubi: UBI device description object
1633 * @e: the wear-leveling entry to check
1634 *
1635 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1636 */
1637static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1638{
1639	struct ubi_wl_entry *p;
1640	int i;
1641
1642	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1643		list_for_each_entry(p, &ubi->pq[i], u.list)
1644			if (p == e)
1645				return 0;
1646
1647	ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1648		e->pnum, e->ec);
1649	ubi_dbg_dump_stack();
1650	return -EINVAL;
1651}
1652#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
1653