1/* 2 * Copyright 2016, NICTA 3 * 4 * This software may be distributed and modified according to the terms of 5 * the GNU General Public License version 2. Note that NO WARRANTY is provided. 6 * See "LICENSE_GPLv2.txt" for details. 7 * 8 * @TAG(NICTA_GPL) 9 */ 10 11#include <bilbyfs.h> 12 13int wbuf_init(struct bilbyfs_info *bi) 14{ 15 bi->rbuf.buf = vmalloc(bi->super.leb_size); 16 if (bi->rbuf.buf) { 17 bi->rbuf.size = bi->super.leb_size; 18 return 0; 19 } 20 wbuf_clean(bi); 21 return -ENOMEM; 22} 23 24void wbuf_clean(struct bilbyfs_info *bi) 25{ 26 vfree(bi->rbuf.buf); 27 memset(&bi->rbuf, 0, sizeof(bi->rbuf)); 28} 29 30void wbuf_start(struct bilbyfs_info *bi, struct bilbyfs_wbuf *wbuf) 31{ 32 bilbyfs_assert(bi->super.leb_size == bi->vi.usable_leb_size); 33 wbuf->used = 0; 34 wbuf->sync_offs = 0; 35 wbuf->avail = bi->super.leb_size; 36 wbuf->size = bi->super.leb_size; 37} 38 39int wbuf_write_obj(struct bilbyfs_info *bi, void *obj, int objlen, struct bilbyfs_wbuf *wbuf) 40{ 41 bilbyfs_assert(objlen % BILBYFS_OBJ_PADDING == 0); 42 bilbyfs_debug("wbuf_write_obj({lnum=%u,offs=%u,len=%u,oid=%llx}) = %d\n", 43 fsm_get_lnum(bi), wbuf->used, objlen, 44 ((struct obj_ch *)obj)->type >= BILBYFS_SUP_OBJ ? 0 : get_obj_id(obj), 45 (wbuf->avail < objlen ? -EINVAL : 0)); 46 if (wbuf->avail < objlen) 47 return -EINVAL; 48 49 memcpy(wbuf->buf + wbuf->used, obj, objlen); 50 wbuf->avail -= objlen; 51 wbuf->used += objlen; 52 return 0; 53} 54 55int wbuf_prepare_commit(struct bilbyfs_info *bi, u32 *padding_sz, struct bilbyfs_wbuf *wbuf) 56{ 57 struct obj_ch *ch; 58 int pad_sz; 59 60 /* Ensure non-empty transactions */ 61 bilbyfs_assert(wbuf->used > 0); 62 if (wbuf->avail == 0) 63 return wbuf->used; 64 /* padding to the next flash page */ 65 pad_sz = ALIGN(wbuf->used, bi->super.min_io_size) - wbuf->used; 66 if (pad_sz < BILBYFS_CH_SZ) { 67 memset(wbuf->buf + wbuf->used, BILBYFS_PAD_BYTE, pad_sz); 68 } else { 69 ch = wbuf->buf + wbuf->used; 70 pack_obj_pad(ch, pad_sz); 71 pack_obj_header(ch, next_sqnum(bi), BILBYFS_TRANS_ATOM); 72 } 73 if (padding_sz) 74 *padding_sz = pad_sz; 75 76 wbuf->avail -= pad_sz; 77 wbuf->used += pad_sz; 78 return wbuf->used; 79} 80 81int wbuf_atom_leb_commit(struct bilbyfs_info *bi, int lnum, struct bilbyfs_wbuf *wbuf) 82{ 83 int err; 84 85 /* Ensure that a LEB is big enough to store the data */ 86 bilbyfs_assert(bi->super.leb_size >= wbuf->used); 87 /* Ensure that we do not commit an empty transaction */ 88 bilbyfs_assert(wbuf->used > 0); 89 /* Ensure that we called wbuf_prepare_commit() */ 90 bilbyfs_assert(wbuf->used == ALIGN(wbuf->used, bi->super.min_io_size)); 91 92 err = ubi_leb_change(bi->ubi, lnum, wbuf->buf, wbuf->used); 93 bilbyfs_debug("ubi_leb_change(ubi, %d, %p, %d) = %d\n", 94 lnum, wbuf->buf, wbuf->used, err); 95 return err; 96} 97 98int wbuf_commit(struct bilbyfs_info *bi, u32 lnum, struct bilbyfs_wbuf *wbuf) 99{ 100 int err; 101 102 /* Ensure that sync is within used range */ 103 bilbyfs_assert(wbuf->sync_offs < wbuf->used); 104 /* Ensure that sync is aligned to min-io-size */ 105 bilbyfs_assert(wbuf->sync_offs == ALIGN(wbuf->sync_offs, bi->super.min_io_size)); 106 /* Ensure that buffer contains enough room for synchronizing the 107 * remaining data */ 108 bilbyfs_assert(wbuf->size >= wbuf->used); 109 /* Ensure that we called wbuf_prepare_commit() */ 110 bilbyfs_assert(wbuf->used == ALIGN(wbuf->used, bi->super.min_io_size)); 111 112 err = ubi_leb_write(bi->ubi, lnum, wbuf->buf + wbuf->sync_offs, wbuf->sync_offs, wbuf->used - wbuf->sync_offs); 113 bilbyfs_debug("ubi_leb_write(ubi, %d, %p, %d, %d) = %d\n", 114 lnum, wbuf->buf + wbuf->sync_offs, wbuf->sync_offs, wbuf->used - wbuf->sync_offs, err); 115 if (!err) 116 wbuf->sync_offs = wbuf->used; 117 return err; 118} 119 120static int wbuf_read_obj_pages(struct bilbyfs_info *bi, struct obj_addr *addr, 121 struct bilbyfs_rbuf *rbuf) 122{ 123 int min_io_size = bi->super.min_io_size; 124 int max_io_size = bi->super.max_io_size; 125 int offs_in_page = addr->offs % min_io_size; 126 int aligned_offs = addr->offs - offs_in_page; 127 int aligned_end = ALIGN(addr->offs + addr->len, min_io_size); 128 int toread; 129 int offs; 130 int err; 131 132 offs = 0; 133 toread = aligned_end - aligned_offs; 134 bilbyfs_assert(aligned_offs == ALIGN(aligned_offs, min_io_size)); 135 bilbyfs_assert(toread == ALIGN(toread, min_io_size)); 136 137 while (toread > max_io_size) { 138 err = ubi_read(bi->ubi, addr->lnum, rbuf->buf + offs, 139 aligned_offs + offs, max_io_size); 140 if (err) 141 return err; 142 offs += max_io_size; 143 toread -= max_io_size; 144 } 145 while (toread >= min_io_size) { 146 err = ubi_read(bi->ubi, addr->lnum, rbuf->buf + offs, 147 aligned_offs + offs, min_io_size); 148 if (err) 149 return err; 150 offs += min_io_size; 151 toread -= min_io_size; 152 } 153 bilbyfs_assert(toread <= 0); 154 /* offset from where we started to read */ 155 rbuf->offs = aligned_offs; 156 return offs_in_page; 157} 158 159int wbuf_read_obj(struct bilbyfs_info *bi, void *buf, struct obj_addr *addr) 160{ 161 int err; 162 int offs_in_page; 163 164 err = wbuf_read_obj_pages(bi, addr, &bi->rbuf); 165 if (err < 0) 166 return err; 167 168 offs_in_page = err; 169 memcpy(buf, bi->rbuf.buf + offs_in_page, addr->len); 170 return 0; 171} 172 173int wbuf_read_sum(struct bilbyfs_info *bi, int lnum, struct bilbyfs_rbuf *rbuf, u32 *sum_offs_ret) 174{ 175 int io_sz = bi->super.min_io_size; 176 int sum_offs; 177 int nb_read; 178 int offs = bi->super.leb_size - io_sz; 179 int err; 180 int i; 181 struct obj_sum *sum; 182 /* struct timeval st, stp; */ 183 184 bilbyfs_assert(rbuf->size == bi->super.leb_size); 185 /* bilbyfs_err("reading 1 page from erase-block %x\n", lnum); */ 186 bi->nb_pages += 1; 187 /* 188 do_gettimeofday(&st); 189 */ 190 err = ubi_read(bi->ubi, lnum, rbuf->buf + offs, offs, io_sz); 191 if (err) 192 return err; 193 /* 194 do_gettimeofday(&stp); 195 pr_err("timed ubi_read() : %ld.%ld\n", stp.tv_sec - st.tv_sec, stp. tv_usec - st.tv_usec); 196 */ 197 sum_offs = le32_to_cpu(*(u32*)(rbuf->buf + bi->super.leb_size - BILBYFS_OBJ_SUM_OFFS_SZ)); 198 if (sum_offs >= (bi->super.leb_size - BILBYFS_OBJ_SUM_OFFS_SZ)) 199 return -ENOENT; 200 offs = sum_offs - sum_offs % io_sz; 201 nb_read = (bi->super.leb_size - offs) / io_sz; 202 /* bilbyfs_err("reading more pages (%u) from erase-block %x\n", nb_read - 1, lnum); */ 203 bi->nb_extra_pages += nb_read - 1; 204 for (i = 0; i < nb_read - 1; i++) { 205 /* 206 do_gettimeofday(&st); 207 */ 208 err = ubi_read(bi->ubi, lnum, rbuf->buf + offs, offs, io_sz); 209 if (err) 210 return err; 211 /* 212 do_gettimeofday(&stp); 213 pr_err("timed ubi_read() e : %ld.%ld\n", stp.tv_sec - st.tv_sec, stp. tv_usec - st.tv_usec); 214 */ 215 offs += io_sz; 216 } 217 sum= rbuf->buf + sum_offs; 218 bilbyfs_debug("summary (.lnum=%d, .nb_read=%d, .sum_sz=%u, io_sz=%d, sum.size=%u, sum.nb_entry=%u)\n", lnum, nb_read, bi->super.leb_size - sum_offs, io_sz, le32_to_cpu(sum->ch.len), le32_to_cpu(sum->nb_sum_entry)); 219 rbuf->offs = 0; 220 *sum_offs_ret = sum_offs; 221 return 0; 222} 223 224/* Read an entires LEB but stops when encountering a block that starts by 0xffff 225 * and assumes that the remaining pages are full of 0xff too. 226 */ 227int wbuf_read_leb_fast(struct bilbyfs_info *bi, int lnum, struct bilbyfs_rbuf *rbuf) 228{ 229 int io_sz = bi->super.min_io_size; 230 int nb_read = bi->super.leb_size / io_sz; 231 int offs = 0; 232 int err; 233 int i; 234 235 bilbyfs_assert(rbuf->size == bi->super.leb_size); 236 for (i = 0; i < nb_read; i++) { 237 err = ubi_read(bi->ubi, lnum, rbuf->buf + offs, offs, io_sz); 238 if (err) 239 return err; 240 if (*(u64*)(rbuf->buf + offs) == 0xffffffffffffffff) { 241 break; 242 } 243 offs += io_sz; 244 } 245 rbuf->offs = 0; 246 return 0; 247} 248 249int wbuf_read_leb(struct bilbyfs_info *bi, int lnum, struct bilbyfs_rbuf *rbuf) 250{ 251 int io_sz = bi->super.min_io_size; 252 int nb_read = bi->super.leb_size / io_sz; 253 int offs = 0; 254 int err; 255 int i; 256 257 bilbyfs_assert(rbuf->size == bi->super.leb_size); 258 for (i = 0; i < nb_read; i++) { 259 err = ubi_read(bi->ubi, lnum, rbuf->buf + offs, offs, io_sz); 260 if (err) 261 return err; 262 offs += io_sz; 263 } 264 rbuf->offs = 0; 265 return 0; 266} 267 268int wbuf_erase(struct bilbyfs_info *bi, int lnum) 269{ 270 int err = ubi_leb_erase(bi->ubi, lnum); 271 bilbyfs_debug("ubi_leb_erase(lnum=%d) = %d\n", lnum, err); 272 return err; 273} 274 275void *wbuf_next_obj_addr(struct bilbyfs_info *bi, struct obj_addr *addr, 276 struct bilbyfs_rbuf *rbuf) 277{ 278 struct obj_ch *obj; 279 u32 leb_offs; 280 u32 rbuf_offs; 281 int err; 282 283 /* Ensures that rbuf covers addr */ 284 bilbyfs_assert(rbuf->offs <= addr->offs); 285 286 /* Note: This function also works if addr->len = 0, it will 287 * just check that there exists a valid object at addr->offs 288 * and store its length in addr->len. 289 */ 290 291 leb_offs = addr->offs + addr->len; 292 if (leb_offs >= (rbuf->offs + rbuf->size)) 293 return ERR_PTR(-ENOENT); 294 295 /* Make offs an offset in the rbuf */ 296 rbuf_offs = leb_offs - rbuf->offs; 297 /* Get a pointer to obj in rbuf */ 298 obj = rbuf->buf + rbuf_offs; 299 err = check_obj_header(obj, rbuf->size - rbuf_offs); 300 if (!err) { 301 addr->offs = leb_offs; 302 addr->len = le32_to_cpu(obj->len); 303 addr->sqnum = le64_to_cpu(obj->sqnum); 304 return obj; 305 } 306 307 /* Scan for pad byte */ 308 if (err == -ENOENT) { 309 if (*((u8 *) obj) == BILBYFS_PAD_BYTE) { 310 addr->offs = leb_offs; 311 addr->len = 8; 312 return wbuf_next_obj_addr(bi, addr, rbuf); 313 } 314 } 315 return ERR_PTR(err); 316} 317 318