Lines Matching defs:wbuf

76 	struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
80 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
81 c->leb_size - wbuf->offs - wbuf->used);
83 err = ubifs_wbuf_sync_nolock(wbuf);
95 err = ubifs_wbuf_sync_nolock(wbuf);
104 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
307 * @wbuf: write-buffer to move node to
309 * This function moves node @snod to @wbuf, changes TNC correspondingly, and
314 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
316 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
319 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
345 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
347 if (wbuf->lnum == -1) {
368 avail = c->leb_size - wbuf->offs - wbuf->used;
376 err = move_node(c, sleb, snod, wbuf);
383 avail = c->leb_size - wbuf->offs - wbuf->used;
401 err = move_node(c, sleb, snod, wbuf);
445 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
465 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
468 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
471 ubifs_assert(wbuf->lnum != lnum);
579 err = ubifs_wbuf_sync_nolock(wbuf);
644 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
652 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
660 ubifs_assert(!wbuf->used);
696 * the wbuf lock, or while we have been running GC. In that
712 space_before = c->leb_size - wbuf->offs - wbuf->used;
713 if (wbuf->lnum == -1)
752 space_after = c->leb_size - wbuf->offs - wbuf->used;
799 err = ubifs_wbuf_sync_nolock(wbuf);
807 mutex_unlock(&wbuf->io_mutex);
813 ubifs_wbuf_sync_nolock(wbuf);
815 mutex_unlock(&wbuf->io_mutex);
913 struct ubifs_wbuf *wbuf;
916 wbuf = &c->jheads[GCHD].wbuf;
917 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
932 mutex_unlock(&wbuf->io_mutex);