• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mtd/ubi/

Lines Matching refs:ubi

47 #include "ubi.h"
54 * @ubi: UBI device description object
60 static unsigned long long next_sqnum(struct ubi_device *ubi)
64 spin_lock(&ubi->ltree_lock);
65 sqnum = ubi->global_sqnum++;
66 spin_unlock(&ubi->ltree_lock);
73 * @ubi: UBI device description object
79 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
88 * @ubi: UBI device description object
94 * @ubi->ltree_lock has to be locked.
96 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
101 p = ubi->ltree.rb_node;
126 * @ubi: UBI device description object
135 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
149 spin_lock(&ubi->ltree_lock);
150 le1 = ltree_lookup(ubi, vol_id, lnum);
164 * @ubi->ltree RB-tree.
168 p = &ubi->ltree.rb_node;
187 rb_insert_color(&le->rb, &ubi->ltree);
190 spin_unlock(&ubi->ltree_lock);
198 * @ubi: UBI device description object
205 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
209 le = ltree_add_entry(ubi, vol_id, lnum);
218 * @ubi: UBI device description object
222 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
226 spin_lock(&ubi->ltree_lock);
227 le = ltree_lookup(ubi, vol_id, lnum);
232 rb_erase(&le->rb, &ubi->ltree);
235 spin_unlock(&ubi->ltree_lock);
240 * @ubi: UBI device description object
247 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
251 le = ltree_add_entry(ubi, vol_id, lnum);
260 * @ubi: UBI device description object
269 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
273 le = ltree_add_entry(ubi, vol_id, lnum);
280 spin_lock(&ubi->ltree_lock);
284 rb_erase(&le->rb, &ubi->ltree);
287 spin_unlock(&ubi->ltree_lock);
294 * @ubi: UBI device description object
298 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
302 spin_lock(&ubi->ltree_lock);
303 le = ltree_lookup(ubi, vol_id, lnum);
308 rb_erase(&le->rb, &ubi->ltree);
311 spin_unlock(&ubi->ltree_lock);
316 * @ubi: UBI device description object
324 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
329 if (ubi->ro_mode)
332 err = leb_write_lock(ubi, vol_id, lnum);
344 err = ubi_wl_put_peb(ubi, pnum, 0);
347 leb_write_unlock(ubi, vol_id, lnum);
353 * @ubi: UBI device description object
370 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
377 err = leb_read_lock(ubi, vol_id, lnum);
390 leb_read_unlock(ubi, vol_id, lnum);
404 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
410 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
420 ubi_ro_mode(ubi);
430 ubi_free_vid_hdr(ubi, vid_hdr);
433 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
462 err = ubi_wl_scrub_peb(ubi, pnum);
464 leb_read_unlock(ubi, vol_id, lnum);
468 ubi_free_vid_hdr(ubi, vid_hdr);
470 leb_read_unlock(ubi, vol_id, lnum);
476 * @ubi: UBI device description object
490 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
493 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
494 struct ubi_volume *vol = ubi->volumes[idx];
497 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
502 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
504 ubi_free_vid_hdr(ubi, vid_hdr);
510 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
517 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
518 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
523 mutex_lock(&ubi->buf_mutex);
524 memset(ubi->peb_buf1 + offset, 0xFF, len);
528 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
533 memcpy(ubi->peb_buf1 + offset, buf, len);
535 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
537 mutex_unlock(&ubi->buf_mutex);
541 mutex_unlock(&ubi->buf_mutex);
542 ubi_free_vid_hdr(ubi, vid_hdr);
545 ubi_wl_put_peb(ubi, pnum, 1);
551 mutex_unlock(&ubi->buf_mutex);
553 ubi_wl_put_peb(ubi, new_pnum, 1);
554 ubi_free_vid_hdr(ubi, vid_hdr);
563 ubi_wl_put_peb(ubi, new_pnum, 1);
565 ubi_free_vid_hdr(ubi, vid_hdr);
574 * @ubi: UBI device description object
587 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
593 if (ubi->ro_mode)
596 err = leb_write_lock(ubi, vol_id, lnum);
605 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
608 if (err == -EIO && ubi->bad_allowed)
609 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
612 ubi_ro_mode(ubi);
614 leb_write_unlock(ubi, vol_id, lnum);
622 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
624 leb_write_unlock(ubi, vol_id, lnum);
629 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
632 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
636 pnum = ubi_wl_get_peb(ubi, dtype);
638 ubi_free_vid_hdr(ubi, vid_hdr);
639 leb_write_unlock(ubi, vol_id, lnum);
646 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
654 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
665 leb_write_unlock(ubi, vol_id, lnum);
666 ubi_free_vid_hdr(ubi, vid_hdr);
670 if (err != -EIO || !ubi->bad_allowed) {
671 ubi_ro_mode(ubi);
672 leb_write_unlock(ubi, vol_id, lnum);
673 ubi_free_vid_hdr(ubi, vid_hdr);
682 err = ubi_wl_put_peb(ubi, pnum, 1);
684 ubi_ro_mode(ubi);
685 leb_write_unlock(ubi, vol_id, lnum);
686 ubi_free_vid_hdr(ubi, vid_hdr);
690 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
697 * @ubi: UBI device description object
718 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
726 if (ubi->ro_mode)
731 len = ALIGN(data_size, ubi->min_io_size);
733 ubi_assert(!(len & (ubi->min_io_size - 1)));
735 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
739 err = leb_write_lock(ubi, vol_id, lnum);
741 ubi_free_vid_hdr(ubi, vid_hdr);
745 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
748 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
758 pnum = ubi_wl_get_peb(ubi, dtype);
760 ubi_free_vid_hdr(ubi, vid_hdr);
761 leb_write_unlock(ubi, vol_id, lnum);
768 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
775 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
785 leb_write_unlock(ubi, vol_id, lnum);
786 ubi_free_vid_hdr(ubi, vid_hdr);
790 if (err != -EIO || !ubi->bad_allowed) {
796 ubi_ro_mode(ubi);
797 leb_write_unlock(ubi, vol_id, lnum);
798 ubi_free_vid_hdr(ubi, vid_hdr);
802 err = ubi_wl_put_peb(ubi, pnum, 1);
804 ubi_ro_mode(ubi);
805 leb_write_unlock(ubi, vol_id, lnum);
806 ubi_free_vid_hdr(ubi, vid_hdr);
810 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
817 * @ubi: UBI device description object
831 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
833 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
840 if (ubi->ro_mode)
848 err = ubi_eba_unmap_leb(ubi, vol, lnum);
851 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
854 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
858 mutex_lock(&ubi->alc_mutex);
859 err = leb_write_lock(ubi, vol_id, lnum);
863 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
866 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
876 pnum = ubi_wl_get_peb(ubi, dtype);
885 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
892 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
900 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
908 leb_write_unlock(ubi, vol_id, lnum);
910 mutex_unlock(&ubi->alc_mutex);
911 ubi_free_vid_hdr(ubi, vid_hdr);
915 if (err != -EIO || !ubi->bad_allowed) {
921 ubi_ro_mode(ubi);
925 err = ubi_wl_put_peb(ubi, pnum, 1);
927 ubi_ro_mode(ubi);
931 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
965 * @ubi: UBI device description object
977 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
991 aldata_size = ALIGN(data_size, ubi->min_io_size);
994 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
996 idx = vol_id2idx(ubi, vol_id);
997 spin_lock(&ubi->volumes_lock);
1004 vol = ubi->volumes[idx];
1005 spin_unlock(&ubi->volumes_lock);
1020 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1021 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1025 err = leb_write_trylock(ubi, vol_id, lnum);
1033 * probably waiting on @ubi->move_mutex. No need to continue the work,
1046 * this function utilizes the @ubi->peb_buf1 buffer which is shared
1048 * @ubi->buf_mutex.
1050 mutex_lock(&ubi->buf_mutex);
1052 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1072 ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
1075 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
1089 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1091 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1101 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1114 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1128 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
1142 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1154 mutex_unlock(&ubi->buf_mutex);
1156 leb_write_unlock(ubi, vol_id, lnum);
1162 * @ubi: UBI device description object
1178 static void print_rsvd_warning(struct ubi_device *ubi,
1186 int min = ubi->beb_rsvd_level / 10;
1190 if (ubi->beb_rsvd_pebs > min)
1195 " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1200 * @ubi: UBI device description object
1206 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1216 spin_lock_init(&ubi->ltree_lock);
1217 mutex_init(&ubi->alc_mutex);
1218 ubi->ltree = RB_ROOT;
1220 ubi->global_sqnum = si->max_sqnum + 1;
1221 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1224 vol = ubi->volumes[i];
1240 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1255 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1257 ubi->avail_pebs, EBA_RESERVED_PEBS);
1261 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1262 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1264 if (ubi->bad_allowed) {
1265 ubi_calculate_reserved(ubi);
1267 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1269 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1270 print_rsvd_warning(ubi, si);
1272 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1274 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1275 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1283 if (!ubi->volumes[i])
1285 kfree(ubi->volumes[i]->eba_tbl);
1286 ubi->volumes[i]->eba_tbl = NULL;