Lines Matching refs:offs

38  * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned
87 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
92 err = ubi_read(c->ubi, lnum, buf, offs, len);
99 len, lnum, offs, err);
105 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
114 err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
116 err = dbg_leb_write(c, lnum, buf, offs, len);
119 len, lnum, offs, err);
221 * @offs: offset within the logical eraseblock
245 int lnum, int offs, int quiet, int must_chk_crc)
251 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
252 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
273 if (node_len + offs > c->leb_size)
305 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
569 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
571 ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
576 if (c->leb_size - wbuf->offs >= c->max_write_size)
577 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
590 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
595 wbuf->offs += sync_len;
597 * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.
600 * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make
601 * sure that @wbuf->offs + @wbuf->size is aligned to
606 if (c->leb_size - wbuf->offs < c->max_write_size)
607 wbuf->size = c->leb_size - wbuf->offs;
608 else if (wbuf->offs & (c->max_write_size - 1))
609 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
619 c->leb_size - wbuf->offs, dirt);
627 * @offs: logical eraseblock offset to seek to
629 * This function targets the write-buffer to logical eraseblock @lnum:@offs.
633 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
637 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
639 ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
640 ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
646 wbuf->offs = offs;
647 if (c->leb_size - wbuf->offs < c->max_write_size)
648 wbuf->size = c->leb_size - wbuf->offs;
649 else if (wbuf->offs & (c->max_write_size - 1))
650 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
747 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
749 ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
750 ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
758 if (c->leb_size - wbuf->offs >= c->max_write_size)
759 ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
761 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
784 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
786 wbuf->offs, wbuf->size);
791 wbuf->offs += wbuf->size;
792 if (c->leb_size - wbuf->offs >= c->max_write_size)
795 wbuf->size = c->leb_size - wbuf->offs;
817 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
819 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
824 wbuf->offs += wbuf->size;
828 } else if (wbuf->offs & (c->max_write_size - 1)) {
837 wbuf->size, wbuf->lnum, wbuf->offs);
838 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
843 wbuf->offs += wbuf->size;
860 wbuf->offs);
866 wbuf->offs, m);
869 wbuf->offs += m;
887 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
890 wbuf->offs += n;
910 if (c->leb_size - wbuf->offs >= c->max_write_size)
913 wbuf->size = c->leb_size - wbuf->offs;
921 int free = c->leb_size - wbuf->offs - wbuf->used;
935 len, wbuf->lnum, wbuf->offs, err);
948 * @offs: offset within the logical eraseblock
958 int offs, int hmac_offs)
963 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
965 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
966 ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
977 err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
990 * @offs: offset within the logical eraseblock
999 int offs)
1001 return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
1011 * @offs: offset within the logical eraseblock
1020 int lnum, int offs)
1026 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
1028 ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1029 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1033 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1037 return ubifs_read_node(c, buf, type, len, lnum, offs);
1041 rlen = wbuf->offs - offs;
1046 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1051 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1062 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1077 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
1090 * @offs: offset within the logical eraseblock
1097 int lnum, int offs)
1102 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
1103 ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1104 ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
1105 ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1108 err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
1118 err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1134 offs, ubi_is_mapped(c->ubi, lnum));
1167 wbuf->lnum = wbuf->offs = -1;