1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright �� 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/mtd/mtd.h>
14#include <linux/compiler.h>
15#include <linux/sched.h> /* For cond_resched() */
16#include "nodelist.h"
17#include "debug.h"
18
19/**
20 *	jffs2_reserve_space - request physical space to write nodes to flash
21 *	@c: superblock info
22 *	@minsize: Minimum acceptable size of allocation
23 *	@len: Returned value of allocation length
24 *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
25 *
26 *	Requests a block of physical space on the flash. Returns zero for success
27 *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
28 *	error if appropriate. Doesn't return len since that's
29 *
30 *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31 *	allocation semaphore, to prevent more than one allocation from being
32 *	active at any time. The semaphore is later released by jffs2_commit_allocation()
33 *
34 *	jffs2_reserve_space() may trigger garbage collection in order to make room
35 *	for the requested allocation.
36 */
37
38static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
39				  uint32_t *len, uint32_t sumsize);
40
41int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42			uint32_t *len, int prio, uint32_t sumsize)
43{
44	int ret = -EAGAIN;
45	int blocksneeded = c->resv_blocks_write;
46	/* align it */
47	minsize = PAD(minsize);
48
49	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
50	mutex_lock(&c->alloc_sem);
51
52	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
53
54	spin_lock(&c->erase_completion_lock);
55
56	/* this needs a little more thought (true <tglx> :)) */
57	while(ret == -EAGAIN) {
58		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
59			uint32_t dirty, avail;
60
61			/* calculate real dirty size
62			 * dirty_size contains blocks on erase_pending_list
63			 * those blocks are counted in c->nr_erasing_blocks.
64			 * If one block is actually erased, it is not longer counted as dirty_space
65			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66			 * with c->nr_erasing_blocks * c->sector_size again.
67			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68			 * This helps us to force gc and pick eventually a clean block to spread the load.
69			 * We add unchecked_size here, as we hopefully will find some space to use.
70			 * This will affect the sum only once, as gc first finishes checking
71			 * of nodes.
72			 */
73			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74			if (dirty < c->nospc_dirty_size) {
75				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
77					break;
78				}
79				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
80					  dirty, c->unchecked_size, c->sector_size));
81
82				spin_unlock(&c->erase_completion_lock);
83				mutex_unlock(&c->alloc_sem);
84				return -ENOSPC;
85			}
86
87			/* Calc possibly available space. Possibly available means that we
88			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
89			 * more usable space. This will affect the sum only once, as gc first finishes checking
90			 * of nodes.
91			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
92			 * blocksneeded * sector_size.
93			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
94			 * the check above passes.
95			 */
96			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
97			if ( (avail / c->sector_size) <= blocksneeded) {
98				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
99					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
100					break;
101				}
102
103				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
104					  avail, blocksneeded * c->sector_size));
105				spin_unlock(&c->erase_completion_lock);
106				mutex_unlock(&c->alloc_sem);
107				return -ENOSPC;
108			}
109
110			mutex_unlock(&c->alloc_sem);
111
112			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
113				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
114				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
115			spin_unlock(&c->erase_completion_lock);
116
117			ret = jffs2_garbage_collect_pass(c);
118
119			if (ret == -EAGAIN) {
120				spin_lock(&c->erase_completion_lock);
121				if (c->nr_erasing_blocks &&
122				    list_empty(&c->erase_pending_list) &&
123				    list_empty(&c->erase_complete_list)) {
124					DECLARE_WAITQUEUE(wait, current);
125					set_current_state(TASK_UNINTERRUPTIBLE);
126					add_wait_queue(&c->erase_wait, &wait);
127					D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
128					spin_unlock(&c->erase_completion_lock);
129
130					schedule();
131				} else
132					spin_unlock(&c->erase_completion_lock);
133			} else if (ret)
134				return ret;
135
136			cond_resched();
137
138			if (signal_pending(current))
139				return -EINTR;
140
141			mutex_lock(&c->alloc_sem);
142			spin_lock(&c->erase_completion_lock);
143		}
144
145		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
146		if (ret) {
147			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
148		}
149	}
150	spin_unlock(&c->erase_completion_lock);
151	if (!ret)
152		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
153	if (ret)
154		mutex_unlock(&c->alloc_sem);
155	return ret;
156}
157
158int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
159			   uint32_t *len, uint32_t sumsize)
160{
161	int ret = -EAGAIN;
162	minsize = PAD(minsize);
163
164	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
165
166	spin_lock(&c->erase_completion_lock);
167	while(ret == -EAGAIN) {
168		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
169		if (ret) {
170			D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
171		}
172	}
173	spin_unlock(&c->erase_completion_lock);
174	if (!ret)
175		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
176
177	return ret;
178}
179
180
181/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
182
183static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
184{
185
186	if (c->nextblock == NULL) {
187		D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
188		  jeb->offset));
189		return;
190	}
191	/* Check, if we have a dirty block now, or if it was dirty already */
192	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
193		c->dirty_size += jeb->wasted_size;
194		c->wasted_size -= jeb->wasted_size;
195		jeb->dirty_size += jeb->wasted_size;
196		jeb->wasted_size = 0;
197		if (VERYDIRTY(c, jeb->dirty_size)) {
198			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
199			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
200			list_add_tail(&jeb->list, &c->very_dirty_list);
201		} else {
202			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
203			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
204			list_add_tail(&jeb->list, &c->dirty_list);
205		}
206	} else {
207		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
208		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
209		list_add_tail(&jeb->list, &c->clean_list);
210	}
211	c->nextblock = NULL;
212
213}
214
215/* Select a new jeb for nextblock */
216
217static int jffs2_find_nextblock(struct jffs2_sb_info *c)
218{
219	struct list_head *next;
220
221	/* Take the next block off the 'free' list */
222
223	if (list_empty(&c->free_list)) {
224
225		if (!c->nr_erasing_blocks &&
226			!list_empty(&c->erasable_list)) {
227			struct jffs2_eraseblock *ejeb;
228
229			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
230			list_move_tail(&ejeb->list, &c->erase_pending_list);
231			c->nr_erasing_blocks++;
232			jffs2_garbage_collect_trigger(c);
233			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
234				  ejeb->offset));
235		}
236
237		if (!c->nr_erasing_blocks &&
238			!list_empty(&c->erasable_pending_wbuf_list)) {
239			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
240			/* c->nextblock is NULL, no update to c->nextblock allowed */
241			spin_unlock(&c->erase_completion_lock);
242			jffs2_flush_wbuf_pad(c);
243			spin_lock(&c->erase_completion_lock);
244			/* Have another go. It'll be on the erasable_list now */
245			return -EAGAIN;
246		}
247
248		if (!c->nr_erasing_blocks) {
249			/* Ouch. We're in GC, or we wouldn't have got here.
250			   And there's no space left. At all. */
251			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
252				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
253				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
254			return -ENOSPC;
255		}
256
257		spin_unlock(&c->erase_completion_lock);
258		/* Don't wait for it; just erase one right now */
259		jffs2_erase_pending_blocks(c, 1);
260		spin_lock(&c->erase_completion_lock);
261
262		/* An erase may have failed, decreasing the
263		   amount of free space available. So we must
264		   restart from the beginning */
265		return -EAGAIN;
266	}
267
268	next = c->free_list.next;
269	list_del(next);
270	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
271	c->nr_free_blocks--;
272
273	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
274
275#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
276	/* adjust write buffer offset, else we get a non contiguous write bug */
277	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
278		c->wbuf_ofs = 0xffffffff;
279#endif
280
281	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
282
283	return 0;
284}
285
286/* Called with alloc sem _and_ erase_completion_lock */
287static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
288				  uint32_t *len, uint32_t sumsize)
289{
290	struct jffs2_eraseblock *jeb = c->nextblock;
291	uint32_t reserved_size;				/* for summary information at the end of the jeb */
292	int ret;
293
294 restart:
295	reserved_size = 0;
296
297	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
298							/* NOSUM_SIZE means not to generate summary */
299
300		if (jeb) {
301			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
302			dbg_summary("minsize=%d , jeb->free=%d ,"
303						"summary->size=%d , sumsize=%d\n",
304						minsize, jeb->free_size,
305						c->summary->sum_size, sumsize);
306		}
307
308		/* Is there enough space for writing out the current node, or we have to
309		   write out summary information now, close this jeb and select new nextblock? */
310		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
311					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
312
313			/* Has summary been disabled for this jeb? */
314			if (jffs2_sum_is_disabled(c->summary)) {
315				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
316				goto restart;
317			}
318
319			/* Writing out the collected summary information */
320			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
321			ret = jffs2_sum_write_sumnode(c);
322
323			if (ret)
324				return ret;
325
326			if (jffs2_sum_is_disabled(c->summary)) {
327				/* jffs2_write_sumnode() couldn't write out the summary information
328				   diabling summary for this jeb and free the collected information
329				 */
330				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
331				goto restart;
332			}
333
334			jffs2_close_nextblock(c, jeb);
335			jeb = NULL;
336			/* keep always valid value in reserved_size */
337			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
338		}
339	} else {
340		if (jeb && minsize > jeb->free_size) {
341			uint32_t waste;
342
343			/* Skip the end of this block and file it as having some dirty space */
344			/* If there's a pending write to it, flush now */
345
346			if (jffs2_wbuf_dirty(c)) {
347				spin_unlock(&c->erase_completion_lock);
348				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
349				jffs2_flush_wbuf_pad(c);
350				spin_lock(&c->erase_completion_lock);
351				jeb = c->nextblock;
352				goto restart;
353			}
354
355			spin_unlock(&c->erase_completion_lock);
356
357			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
358			if (ret)
359				return ret;
360			/* Just lock it again and continue. Nothing much can change because
361			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
362			   we hold c->erase_completion_lock in the majority of this function...
363			   but that's a question for another (more caffeine-rich) day. */
364			spin_lock(&c->erase_completion_lock);
365
366			waste = jeb->free_size;
367			jffs2_link_node_ref(c, jeb,
368					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
369					    waste, NULL);
370			jeb->dirty_size -= waste;
371			c->dirty_size -= waste;
372			jeb->wasted_size += waste;
373			c->wasted_size += waste;
374
375			jffs2_close_nextblock(c, jeb);
376			jeb = NULL;
377		}
378	}
379
380	if (!jeb) {
381
382		ret = jffs2_find_nextblock(c);
383		if (ret)
384			return ret;
385
386		jeb = c->nextblock;
387
388		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
389			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
390			goto restart;
391		}
392	}
393	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
394	   enough space */
395	*len = jeb->free_size - reserved_size;
396
397	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
398	    !jeb->first_node->next_in_ino) {
399		/* Only node in it beforehand was a CLEANMARKER node (we think).
400		   So mark it obsolete now that there's going to be another node
401		   in the block. This will reduce used_size to zero but We've
402		   already set c->nextblock so that jffs2_mark_node_obsolete()
403		   won't try to refile it to the dirty_list.
404		*/
405		spin_unlock(&c->erase_completion_lock);
406		jffs2_mark_node_obsolete(c, jeb->first_node);
407		spin_lock(&c->erase_completion_lock);
408	}
409
410	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
411		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
412	return 0;
413}
414
415/**
416 *	jffs2_add_physical_node_ref - add a physical node reference to the list
417 *	@c: superblock info
418 *	@new: new node reference to add
419 *	@len: length of this physical node
420 *
421 *	Should only be used to report nodes for which space has been allocated
422 *	by jffs2_reserve_space.
423 *
424 *	Must be called with the alloc_sem held.
425 */
426
427struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
428						       uint32_t ofs, uint32_t len,
429						       struct jffs2_inode_cache *ic)
430{
431	struct jffs2_eraseblock *jeb;
432	struct jffs2_raw_node_ref *new;
433
434	jeb = &c->blocks[ofs / c->sector_size];
435
436	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
437		  ofs & ~3, ofs & 3, len));
438	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
439	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
440	   even after refiling c->nextblock */
441	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
442	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
443		printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
444		if (c->nextblock)
445			printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
446		else
447			printk(KERN_WARNING "No nextblock");
448		printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
449		return ERR_PTR(-EINVAL);
450	}
451	spin_lock(&c->erase_completion_lock);
452
453	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
454
455	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
456		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
457		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
458			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
459		if (jffs2_wbuf_dirty(c)) {
460			/* Flush the last write in the block if it's outstanding */
461			spin_unlock(&c->erase_completion_lock);
462			jffs2_flush_wbuf_pad(c);
463			spin_lock(&c->erase_completion_lock);
464		}
465
466		list_add_tail(&jeb->list, &c->clean_list);
467		c->nextblock = NULL;
468	}
469	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
470	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
471
472	spin_unlock(&c->erase_completion_lock);
473
474	return new;
475}
476
477
478void jffs2_complete_reservation(struct jffs2_sb_info *c)
479{
480	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
481	spin_lock(&c->erase_completion_lock);
482	jffs2_garbage_collect_trigger(c);
483	spin_unlock(&c->erase_completion_lock);
484	mutex_unlock(&c->alloc_sem);
485}
486
487static inline int on_list(struct list_head *obj, struct list_head *head)
488{
489	struct list_head *this;
490
491	list_for_each(this, head) {
492		if (this == obj) {
493			D1(printk("%p is on list at %p\n", obj, head));
494			return 1;
495
496		}
497	}
498	return 0;
499}
500
501void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
502{
503	struct jffs2_eraseblock *jeb;
504	int blocknr;
505	struct jffs2_unknown_node n;
506	int ret, addedsize;
507	size_t retlen;
508	uint32_t freed_len;
509
510	if(unlikely(!ref)) {
511		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
512		return;
513	}
514	if (ref_obsolete(ref)) {
515		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
516		return;
517	}
518	blocknr = ref->flash_offset / c->sector_size;
519	if (blocknr >= c->nr_blocks) {
520		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
521		BUG();
522	}
523	jeb = &c->blocks[blocknr];
524
525	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
526	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
527		/* Hm. This may confuse static lock analysis. If any of the above
528		   three conditions is false, we're going to return from this
529		   function without actually obliterating any nodes or freeing
530		   any jffs2_raw_node_refs. So we don't need to stop erases from
531		   happening, or protect against people holding an obsolete
532		   jffs2_raw_node_ref without the erase_completion_lock. */
533		mutex_lock(&c->erase_free_sem);
534	}
535
536	spin_lock(&c->erase_completion_lock);
537
538	freed_len = ref_totlen(c, jeb, ref);
539
540	if (ref_flags(ref) == REF_UNCHECKED) {
541		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
542			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
543			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
544			BUG();
545		})
546		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
547		jeb->unchecked_size -= freed_len;
548		c->unchecked_size -= freed_len;
549	} else {
550		D1(if (unlikely(jeb->used_size < freed_len)) {
551			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
552			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
553			BUG();
554		})
555		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
556		jeb->used_size -= freed_len;
557		c->used_size -= freed_len;
558	}
559
560	// Take care, that wasted size is taken into concern
561	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
562		D1(printk("Dirtying\n"));
563		addedsize = freed_len;
564		jeb->dirty_size += freed_len;
565		c->dirty_size += freed_len;
566
567		/* Convert wasted space to dirty, if not a bad block */
568		if (jeb->wasted_size) {
569			if (on_list(&jeb->list, &c->bad_used_list)) {
570				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
571					  jeb->offset));
572				addedsize = 0; /* To fool the refiling code later */
573			} else {
574				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
575					  jeb->wasted_size, jeb->offset));
576				addedsize += jeb->wasted_size;
577				jeb->dirty_size += jeb->wasted_size;
578				c->dirty_size += jeb->wasted_size;
579				c->wasted_size -= jeb->wasted_size;
580				jeb->wasted_size = 0;
581			}
582		}
583	} else {
584		D1(printk("Wasting\n"));
585		addedsize = 0;
586		jeb->wasted_size += freed_len;
587		c->wasted_size += freed_len;
588	}
589	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
590
591	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
592	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
593
594	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
595		/* Flash scanning is in progress. Don't muck about with the block
596		   lists because they're not ready yet, and don't actually
597		   obliterate nodes that look obsolete. If they weren't
598		   marked obsolete on the flash at the time they _became_
599		   obsolete, there was probably a reason for that. */
600		spin_unlock(&c->erase_completion_lock);
601		/* We didn't lock the erase_free_sem */
602		return;
603	}
604
605	if (jeb == c->nextblock) {
606		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
607	} else if (!jeb->used_size && !jeb->unchecked_size) {
608		if (jeb == c->gcblock) {
609			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
610			c->gcblock = NULL;
611		} else {
612			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
613			list_del(&jeb->list);
614		}
615		if (jffs2_wbuf_dirty(c)) {
616			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
617			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
618		} else {
619			if (jiffies & 127) {
620				/* Most of the time, we just erase it immediately. Otherwise we
621				   spend ages scanning it on mount, etc. */
622				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
623				list_add_tail(&jeb->list, &c->erase_pending_list);
624				c->nr_erasing_blocks++;
625				jffs2_garbage_collect_trigger(c);
626			} else {
627				/* Sometimes, however, we leave it elsewhere so it doesn't get
628				   immediately reused, and we spread the load a bit. */
629				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
630				list_add_tail(&jeb->list, &c->erasable_list);
631			}
632		}
633		D1(printk(KERN_DEBUG "Done OK\n"));
634	} else if (jeb == c->gcblock) {
635		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
636	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
637		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
638		list_del(&jeb->list);
639		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
640		list_add_tail(&jeb->list, &c->dirty_list);
641	} else if (VERYDIRTY(c, jeb->dirty_size) &&
642		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
643		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
644		list_del(&jeb->list);
645		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
646		list_add_tail(&jeb->list, &c->very_dirty_list);
647	} else {
648		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
649			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
650	}
651
652	spin_unlock(&c->erase_completion_lock);
653
654	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
655		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
656		/* We didn't lock the erase_free_sem */
657		return;
658	}
659
660	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
661	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
662	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
663	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
664
665	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
666	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
667	if (ret) {
668		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
669		goto out_erase_sem;
670	}
671	if (retlen != sizeof(n)) {
672		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
673		goto out_erase_sem;
674	}
675	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
676		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
677		goto out_erase_sem;
678	}
679	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
680		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
681		goto out_erase_sem;
682	}
683	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
684	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
685	if (ret) {
686		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
687		goto out_erase_sem;
688	}
689	if (retlen != sizeof(n)) {
690		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
691		goto out_erase_sem;
692	}
693
694	/* Nodes which have been marked obsolete no longer need to be
695	   associated with any inode. Remove them from the per-inode list.
696
697	   Note we can't do this for NAND at the moment because we need
698	   obsolete dirent nodes to stay on the lists, because of the
699	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
700	   because we delete the inocache, and on NAND we need that to
701	   stay around until all the nodes are actually erased, in order
702	   to stop us from giving the same inode number to another newly
703	   created inode. */
704	if (ref->next_in_ino) {
705		struct jffs2_inode_cache *ic;
706		struct jffs2_raw_node_ref **p;
707
708		spin_lock(&c->erase_completion_lock);
709
710		ic = jffs2_raw_ref_to_ic(ref);
711		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
712			;
713
714		*p = ref->next_in_ino;
715		ref->next_in_ino = NULL;
716
717		switch (ic->class) {
718#ifdef CONFIG_JFFS2_FS_XATTR
719			case RAWNODE_CLASS_XATTR_DATUM:
720				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
721				break;
722			case RAWNODE_CLASS_XATTR_REF:
723				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
724				break;
725#endif
726			default:
727				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
728					jffs2_del_ino_cache(c, ic);
729				break;
730		}
731		spin_unlock(&c->erase_completion_lock);
732	}
733
734 out_erase_sem:
735	mutex_unlock(&c->erase_free_sem);
736}
737
738int jffs2_thread_should_wake(struct jffs2_sb_info *c)
739{
740	int ret = 0;
741	uint32_t dirty;
742	int nr_very_dirty = 0;
743	struct jffs2_eraseblock *jeb;
744
745	if (!list_empty(&c->erase_complete_list) ||
746	    !list_empty(&c->erase_pending_list))
747		return 1;
748
749	if (c->unchecked_size) {
750		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
751			  c->unchecked_size, c->checked_ino));
752		return 1;
753	}
754
755	/* dirty_size contains blocks on erase_pending_list
756	 * those blocks are counted in c->nr_erasing_blocks.
757	 * If one block is actually erased, it is not longer counted as dirty_space
758	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
759	 * with c->nr_erasing_blocks * c->sector_size again.
760	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
761	 * This helps us to force gc and pick eventually a clean block to spread the load.
762	 */
763	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
764
765	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
766			(dirty > c->nospc_dirty_size))
767		ret = 1;
768
769	list_for_each_entry(jeb, &c->very_dirty_list, list) {
770		nr_very_dirty++;
771		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
772			ret = 1;
773			/* In debug mode, actually go through and count them all */
774			D1(continue);
775			break;
776		}
777	}
778
779	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
780		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
781
782	return ret;
783}
784