• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/mtd/ubi/

Lines Matching refs:ubi

47 #include "ubi.h"
59 * object is inserted to the lock tree (@ubi->ltree).
74 * @ubi: UBI device description object
80 static unsigned long long next_sqnum(struct ubi_device *ubi)
84 spin_lock(&ubi->ltree_lock);
85 sqnum = ubi->global_sqnum++;
86 spin_unlock(&ubi->ltree_lock);
93 * @ubi: UBI device description object
99 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
108 * @ubi: UBI device description object
114 * @ubi->ltree_lock has to be locked.
116 static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
121 p = ubi->ltree.rb_node;
146 * @ubi: UBI device description object
155 static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
167 spin_lock(&ubi->ltree_lock);
168 le1 = ltree_lookup(ubi, vol_id, lnum);
182 * @ubi->ltree RB-tree.
186 p = &ubi->ltree.rb_node;
205 rb_insert_color(&le->rb, &ubi->ltree);
208 spin_unlock(&ubi->ltree_lock);
218 * @ubi: UBI device description object
225 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
229 le = ltree_add_entry(ubi, vol_id, lnum);
238 * @ubi: UBI device description object
242 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
247 spin_lock(&ubi->ltree_lock);
248 le = ltree_lookup(ubi, vol_id, lnum);
252 rb_erase(&le->rb, &ubi->ltree);
255 spin_unlock(&ubi->ltree_lock);
264 * @ubi: UBI device description object
271 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
275 le = ltree_add_entry(ubi, vol_id, lnum);
284 * @ubi: UBI device description object
288 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
293 spin_lock(&ubi->ltree_lock);
294 le = ltree_lookup(ubi, vol_id, lnum);
298 rb_erase(&le->rb, &ubi->ltree);
302 spin_unlock(&ubi->ltree_lock);
311 * @ubi: UBI device description object
319 int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
321 int idx = vol_id2idx(ubi, vol_id), err, pnum;
322 struct ubi_volume *vol = ubi->volumes[idx];
324 if (ubi->ro_mode)
327 err = leb_write_lock(ubi, vol_id, lnum);
339 err = ubi_wl_put_peb(ubi, pnum, 0);
342 leb_write_unlock(ubi, vol_id, lnum);
348 * @ubi: UBI device description object
365 int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
370 struct ubi_volume *vol = ubi->volumes[idx];
373 err = leb_read_lock(ubi, vol_id, lnum);
386 leb_read_unlock(ubi, vol_id, lnum);
400 vid_hdr = ubi_zalloc_vid_hdr(ubi);
406 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
414 ubi_ro_mode(ubi);
424 ubi_free_vid_hdr(ubi, vid_hdr);
427 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
456 err = ubi_wl_scrub_peb(ubi, pnum);
458 leb_read_unlock(ubi, vol_id, lnum);
462 ubi_free_vid_hdr(ubi, vid_hdr);
464 leb_read_unlock(ubi, vol_id, lnum);
470 * @ubi: UBI device description object
484 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
487 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
488 struct ubi_volume *vol = ubi->volumes[idx];
492 vid_hdr = ubi_zalloc_vid_hdr(ubi);
498 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
500 ubi_free_vid_hdr(ubi, vid_hdr);
506 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
513 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
514 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
528 err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
537 err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
544 ubi_free_vid_hdr(ubi, vid_hdr);
547 ubi_wl_put_peb(ubi, pnum, 1);
553 ubi_wl_put_peb(ubi, new_pnum, 1);
554 ubi_free_vid_hdr(ubi, vid_hdr);
563 ubi_wl_put_peb(ubi, new_pnum, 1);
565 ubi_free_vid_hdr(ubi, vid_hdr);
574 * @ubi: UBI device description object
587 int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
590 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
591 struct ubi_volume *vol = ubi->volumes[idx];
594 if (ubi->ro_mode)
597 err = leb_write_lock(ubi, vol_id, lnum);
606 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
609 if (err == -EIO && ubi->bad_allowed)
610 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
612 ubi_ro_mode(ubi);
614 leb_write_unlock(ubi, vol_id, lnum);
622 vid_hdr = ubi_zalloc_vid_hdr(ubi);
624 leb_write_unlock(ubi, vol_id, lnum);
629 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
632 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
636 pnum = ubi_wl_get_peb(ubi, dtype);
638 ubi_free_vid_hdr(ubi, vid_hdr);
639 leb_write_unlock(ubi, vol_id, lnum);
646 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
653 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
662 leb_write_unlock(ubi, vol_id, lnum);
663 ubi_free_vid_hdr(ubi, vid_hdr);
667 if (err != -EIO || !ubi->bad_allowed) {
668 ubi_ro_mode(ubi);
669 leb_write_unlock(ubi, vol_id, lnum);
670 ubi_free_vid_hdr(ubi, vid_hdr);
679 err = ubi_wl_put_peb(ubi, pnum, 1);
681 ubi_ro_mode(ubi);
682 leb_write_unlock(ubi, vol_id, lnum);
683 ubi_free_vid_hdr(ubi, vid_hdr);
687 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
694 * @ubi: UBI device description object
715 int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
719 int idx = vol_id2idx(ubi, vol_id);
720 struct ubi_volume *vol = ubi->volumes[idx];
724 if (ubi->ro_mode)
729 len = ALIGN(data_size, ubi->min_io_size);
731 ubi_assert(len % ubi->min_io_size == 0);
733 vid_hdr = ubi_zalloc_vid_hdr(ubi);
737 err = leb_write_lock(ubi, vol_id, lnum);
739 ubi_free_vid_hdr(ubi, vid_hdr);
743 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
746 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
756 pnum = ubi_wl_get_peb(ubi, dtype);
758 ubi_free_vid_hdr(ubi, vid_hdr);
759 leb_write_unlock(ubi, vol_id, lnum);
766 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
773 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
783 leb_write_unlock(ubi, vol_id, lnum);
784 ubi_free_vid_hdr(ubi, vid_hdr);
788 if (err != -EIO || !ubi->bad_allowed) {
794 ubi_ro_mode(ubi);
795 leb_write_unlock(ubi, vol_id, lnum);
796 ubi_free_vid_hdr(ubi, vid_hdr);
800 err = ubi_wl_put_peb(ubi, pnum, 1);
802 ubi_ro_mode(ubi);
803 leb_write_unlock(ubi, vol_id, lnum);
804 ubi_free_vid_hdr(ubi, vid_hdr);
808 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
815 * @ubi: UBI device description object
828 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
831 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
832 struct ubi_volume *vol = ubi->volumes[idx];
836 if (ubi->ro_mode)
839 vid_hdr = ubi_zalloc_vid_hdr(ubi);
843 err = leb_write_lock(ubi, vol_id, lnum);
845 ubi_free_vid_hdr(ubi, vid_hdr);
849 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
852 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
862 pnum = ubi_wl_get_peb(ubi, dtype);
864 ubi_free_vid_hdr(ubi, vid_hdr);
865 leb_write_unlock(ubi, vol_id, lnum);
872 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
879 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
886 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
888 ubi_free_vid_hdr(ubi, vid_hdr);
889 leb_write_unlock(ubi, vol_id, lnum);
894 leb_write_unlock(ubi, vol_id, lnum);
895 ubi_free_vid_hdr(ubi, vid_hdr);
899 if (err != -EIO || !ubi->bad_allowed) {
905 ubi_ro_mode(ubi);
906 leb_write_unlock(ubi, vol_id, lnum);
907 ubi_free_vid_hdr(ubi, vid_hdr);
911 err = ubi_wl_put_peb(ubi, pnum, 1);
913 ubi_ro_mode(ubi);
914 leb_write_unlock(ubi, vol_id, lnum);
915 ubi_free_vid_hdr(ubi, vid_hdr);
919 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
941 * @ubi: UBI device description object
952 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
967 aldata_size = ALIGN(data_size, ubi->min_io_size);
970 ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad);
980 err = leb_write_lock(ubi, vol_id, lnum);
990 idx = vol_id2idx(ubi, vol_id);
994 * @ubi->volumes_lock.
996 spin_lock(&ubi->volumes_lock);
997 vol = ubi->volumes[idx];
1000 spin_unlock(&ubi->volumes_lock);
1008 spin_unlock(&ubi->volumes_lock);
1011 spin_unlock(&ubi->volumes_lock);
1016 err = ubi_io_read_data(ubi, buf, from, 0, aldata_size);
1035 ubi_calc_data_len(ubi, buf, data_size);
1052 vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1061 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1069 err = ubi_io_write_data(ubi, buf, to, 0, aldata_size);
1085 err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size);
1105 leb_write_unlock(ubi, vol_id, lnum);
1112 leb_write_unlock(ubi, vol_id, lnum);
1120 * @ubi: UBI device description object
1126 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1136 spin_lock_init(&ubi->ltree_lock);
1137 ubi->ltree = RB_ROOT;
1147 ubi->global_sqnum = si->max_sqnum + 1;
1148 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1151 vol = ubi->volumes[i];
1167 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1182 if (ubi->bad_allowed) {
1183 ubi_calculate_reserved(ubi);
1185 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1187 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1190 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1192 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1194 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1195 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1203 if (!ubi->volumes[i])
1205 kfree(ubi->volumes[i]->eba_tbl);
1214 * @ubi: UBI device description object
1216 void ubi_eba_close(const struct ubi_device *ubi)
1218 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1223 if (!ubi->volumes[i])
1225 kfree(ubi->volumes[i]->eba_tbl);