• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/mtd/ubi/

Lines Matching refs:ubi

26 #include "ubi.h"
150 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
157 static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec);
161 #define paranoid_check_ec(ubi, pnum, ec) 0
185 * the @ubi->used and @ubi->free RB-trees.
221 static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
223 wl_tree_add(e, &ubi->free);
225 static inline void used_tree_add(struct ubi_device *ubi,
228 wl_tree_add(e, &ubi->used);
230 static inline void scrub_tree_add(struct ubi_device *ubi,
233 wl_tree_add(e, &ubi->scrub);
235 static inline void free_tree_del(struct ubi_device *ubi,
238 paranoid_check_in_wl_tree(e, &ubi->free);
239 rb_erase(&e->rb, &ubi->free);
241 static inline void used_tree_del(struct ubi_device *ubi,
244 paranoid_check_in_wl_tree(e, &ubi->used);
245 rb_erase(&e->rb, &ubi->used);
247 static inline void scrub_tree_del(struct ubi_device *ubi,
250 paranoid_check_in_wl_tree(e, &ubi->scrub);
251 rb_erase(&e->rb, &ubi->scrub);
256 * @ubi: UBI device description object
261 static int do_work(struct ubi_device *ubi)
266 spin_lock(&ubi->wl_lock);
268 if (list_empty(&ubi->works)) {
269 spin_unlock(&ubi->wl_lock);
273 wrk = list_entry(ubi->works.next, struct ubi_work, list);
275 spin_unlock(&ubi->wl_lock);
282 err = wrk->func(ubi, wrk, 0);
286 spin_lock(&ubi->wl_lock);
287 ubi->works_count -= 1;
288 ubi_assert(ubi->works_count >= 0);
289 spin_unlock(&ubi->wl_lock);
295 * @ubi: UBI device description object
302 static int produce_free_peb(struct ubi_device *ubi)
306 spin_lock(&ubi->wl_lock);
307 while (tree_empty(&ubi->free)) {
308 spin_unlock(&ubi->wl_lock);
311 err = do_work(ubi);
315 spin_lock(&ubi->wl_lock);
317 spin_unlock(&ubi->wl_lock);
363 * @ubi: UBI device description object
371 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
378 pe->abs_ec = ubi->abs_ec + abs_ec;
380 p = &ubi->prot.pnum.rb_node;
391 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
393 p = &ubi->prot.aec.rb_node;
405 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
442 * @ubi: UBI device description object
448 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
462 spin_lock(&ubi->wl_lock);
463 if (tree_empty(&ubi->free)) {
464 if (ubi->works_count == 0) {
465 ubi_assert(list_empty(&ubi->works));
467 spin_unlock(&ubi->wl_lock);
471 spin_unlock(&ubi->wl_lock);
473 err = produce_free_peb(ubi);
489 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
500 first = rb_entry(rb_first(&ubi->free),
502 last = rb_entry(rb_last(&ubi->free),
506 e = rb_entry(ubi->free.rb_node,
510 e = find_wl_entry(&ubi->free, medium_ec);
520 e = rb_entry(rb_first(&ubi->free),
534 free_tree_del(ubi, e);
535 prot_tree_add(ubi, e, pe, protect);
538 spin_unlock(&ubi->wl_lock);
545 * @ubi: UBI device description object
548 static void prot_tree_del(struct ubi_device *ubi, int pnum)
553 p = ubi->prot.pnum.rb_node;
568 rb_erase(&pe->rb_aec, &ubi->prot.aec);
569 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
575 * @ubi: UBI device description object
582 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
590 err = paranoid_check_ec(ubi, e->pnum, e->ec);
594 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
598 err = ubi_io_sync_erase(ubi, e->pnum, torture);
618 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
623 spin_lock(&ubi->wl_lock);
624 if (e->ec > ubi->max_ec)
625 ubi->max_ec = e->ec;
626 spin_unlock(&ubi->wl_lock);
636 * @ubi: UBI device description object
643 static void check_protection_over(struct ubi_device *ubi)
652 spin_lock(&ubi->wl_lock);
653 if (tree_empty(&ubi->prot.aec)) {
654 spin_unlock(&ubi->wl_lock);
658 pe = rb_entry(rb_first(&ubi->prot.aec),
661 if (pe->abs_ec > ubi->abs_ec) {
662 spin_unlock(&ubi->wl_lock);
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 used_tree_add(ubi, pe->e);
671 spin_unlock(&ubi->wl_lock);
680 * @ubi: UBI device description object
686 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
688 spin_lock(&ubi->wl_lock);
689 list_add_tail(&wrk->list, &ubi->works);
690 ubi_assert(ubi->works_count >= 0);
691 ubi->works_count += 1;
692 if (ubi->thread_enabled)
693 wake_up_process(ubi->bgt_thread);
694 spin_unlock(&ubi->wl_lock);
697 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
702 * @ubi: UBI device description object
709 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
725 schedule_ubi_work(ubi, wl_wrk);
731 * @ubi: UBI device description object
739 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
751 vid_hdr = ubi_zalloc_vid_hdr(ubi);
755 spin_lock(&ubi->wl_lock);
761 if (ubi->move_to || tree_empty(&ubi->free) ||
762 (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) {
773 * @ubi->used tree later and the wear-leveling will be
777 tree_empty(&ubi->free), tree_empty(&ubi->used));
778 ubi->wl_scheduled = 0;
779 spin_unlock(&ubi->wl_lock);
780 ubi_free_vid_hdr(ubi, vid_hdr);
784 if (tree_empty(&ubi->scrub)) {
790 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
791 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
796 ubi->wl_scheduled = 0;
797 spin_unlock(&ubi->wl_lock);
798 ubi_free_vid_hdr(ubi, vid_hdr);
801 used_tree_del(ubi, e1);
805 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
806 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
807 scrub_tree_del(ubi, e1);
811 free_tree_del(ubi, e2);
812 ubi_assert(!ubi->move_from && !ubi->move_to);
813 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
814 ubi->move_from = e1;
815 ubi->move_to = e2;
816 spin_unlock(&ubi->wl_lock);
825 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
847 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
854 ubi_free_vid_hdr(ubi, vid_hdr);
855 spin_lock(&ubi->wl_lock);
856 if (!ubi->move_to_put)
857 used_tree_add(ubi, e2);
860 ubi->move_from = ubi->move_to = NULL;
861 ubi->move_from_put = ubi->move_to_put = 0;
862 ubi->wl_scheduled = 0;
863 spin_unlock(&ubi->wl_lock);
871 err = schedule_erase(ubi, e2, 0);
874 ubi_ro_mode(ubi);
878 err = schedule_erase(ubi, e1, 0);
881 ubi_ro_mode(ubi);
896 ubi_free_vid_hdr(ubi, vid_hdr);
897 spin_lock(&ubi->wl_lock);
898 ubi->wl_scheduled = 0;
899 if (ubi->move_from_put)
902 used_tree_add(ubi, e1);
903 ubi->move_from = ubi->move_to = NULL;
904 ubi->move_from_put = ubi->move_to_put = 0;
905 spin_unlock(&ubi->wl_lock);
913 err = schedule_erase(ubi, e1, 0);
916 ubi_ro_mode(ubi);
920 err = schedule_erase(ubi, e2, 0);
923 ubi_ro_mode(ubi);
932 * @ubi: UBI device description object
938 static int ensure_wear_leveling(struct ubi_device *ubi)
945 spin_lock(&ubi->wl_lock);
946 if (ubi->wl_scheduled)
951 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
954 if (tree_empty(&ubi->scrub)) {
955 if (tree_empty(&ubi->used) || tree_empty(&ubi->free))
965 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
966 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
974 ubi->wl_scheduled = 1;
975 spin_unlock(&ubi->wl_lock);
984 schedule_ubi_work(ubi, wrk);
988 spin_lock(&ubi->wl_lock);
989 ubi->wl_scheduled = 0;
991 spin_unlock(&ubi->wl_lock);
997 * @ubi: UBI device description object
1006 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1022 err = sync_erase(ubi, e, wl_wrk->torture);
1027 spin_lock(&ubi->wl_lock);
1028 ubi->abs_ec += 1;
1029 free_tree_add(ubi, e);
1030 spin_unlock(&ubi->wl_lock);
1036 check_protection_over(ubi);
1039 err = ensure_wear_leveling(ubi);
1052 ubi_ro_mode(ubi);
1058 if (!ubi->bad_allowed) {
1060 ubi_ro_mode(ubi);
1065 spin_lock(&ubi->volumes_lock);
1066 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1068 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1069 ubi->avail_pebs -= need;
1070 ubi->rsvd_pebs += need;
1071 ubi->beb_rsvd_pebs += need;
1076 if (ubi->beb_rsvd_pebs == 0) {
1077 spin_unlock(&ubi->volumes_lock);
1079 ubi_ro_mode(ubi);
1083 spin_unlock(&ubi->volumes_lock);
1086 err = ubi_io_mark_bad(ubi, pnum);
1088 ubi_ro_mode(ubi);
1092 spin_lock(&ubi->volumes_lock);
1093 ubi->beb_rsvd_pebs -= 1;
1094 ubi->bad_peb_count += 1;
1095 ubi->good_peb_count -= 1;
1096 ubi_calculate_reserved(ubi);
1097 if (ubi->beb_rsvd_pebs == 0)
1099 spin_unlock(&ubi->volumes_lock);
1108 * @ubi: UBI device description object
1117 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1124 ubi_assert(pnum < ubi->peb_count);
1126 spin_lock(&ubi->wl_lock);
1128 e = ubi->lookuptbl[pnum];
1129 if (e == ubi->move_from) {
1136 ubi_assert(!ubi->move_from_put);
1137 ubi->move_from_put = 1;
1138 spin_unlock(&ubi->wl_lock);
1140 } else if (e == ubi->move_to) {
1148 ubi_assert(!ubi->move_to_put);
1149 ubi->move_to_put = 1;
1150 spin_unlock(&ubi->wl_lock);
1153 if (in_wl_tree(e, &ubi->used))
1154 used_tree_del(ubi, e);
1155 else if (in_wl_tree(e, &ubi->scrub))
1156 scrub_tree_del(ubi, e);
1158 prot_tree_del(ubi, e->pnum);
1160 spin_unlock(&ubi->wl_lock);
1162 err = schedule_erase(ubi, e, torture);
1164 spin_lock(&ubi->wl_lock);
1165 used_tree_add(ubi, e);
1166 spin_unlock(&ubi->wl_lock);
1174 * @ubi: UBI device description object
1182 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1189 spin_lock(&ubi->wl_lock);
1190 e = ubi->lookuptbl[pnum];
1191 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1192 spin_unlock(&ubi->wl_lock);
1196 if (e == ubi->move_to) {
1203 spin_unlock(&ubi->wl_lock);
1209 if (in_wl_tree(e, &ubi->used))
1210 used_tree_del(ubi, e);
1212 prot_tree_del(ubi, pnum);
1214 scrub_tree_add(ubi, e);
1215 spin_unlock(&ubi->wl_lock);
1221 return ensure_wear_leveling(ubi);
1226 * @ubi: UBI device description object
1231 int ubi_wl_flush(struct ubi_device *ubi)
1235 pending_count = ubi->works_count;
1244 err = do_work(ubi);
1290 struct ubi_device *ubi = u;
1293 ubi->bgt_name, current->pid);
1304 spin_lock(&ubi->wl_lock);
1305 if (list_empty(&ubi->works) || ubi->ro_mode ||
1306 !ubi->thread_enabled) {
1308 spin_unlock(&ubi->wl_lock);
1312 spin_unlock(&ubi->wl_lock);
1314 err = do_work(ubi);
1317 ubi->bgt_name, err);
1324 ubi->bgt_name, WL_MAX_FAILURES);
1325 ubi_ro_mode(ubi);
1335 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1341 * @ubi: UBI device description object
1343 static void cancel_pending(struct ubi_device *ubi)
1345 while (!list_empty(&ubi->works)) {
1348 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1350 wrk->func(ubi, wrk, 1);
1351 ubi->works_count -= 1;
1352 ubi_assert(ubi->works_count >= 0);
1359 * @ubi: UBI device description object
1365 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1374 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1375 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1376 spin_lock_init(&ubi->wl_lock);
1377 ubi->max_ec = si->max_ec;
1378 INIT_LIST_HEAD(&ubi->works);
1380 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1382 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1383 if (IS_ERR(ubi->bgt_thread)) {
1384 err = PTR_ERR(ubi->bgt_thread);
1385 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1399 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1400 if (!ubi->lookuptbl)
1412 ubi->lookuptbl[e->pnum] = e;
1413 if (schedule_erase(ubi, e, 0)) {
1429 free_tree_add(ubi, e);
1430 ubi->lookuptbl[e->pnum] = e;
1442 ubi->lookuptbl[e->pnum] = e;
1443 if (schedule_erase(ubi, e, 0)) {
1459 ubi->lookuptbl[e->pnum] = e;
1463 used_tree_add(ubi, e);
1467 scrub_tree_add(ubi, e);
1472 if (WL_RESERVED_PEBS > ubi->avail_pebs) {
1474 ubi->avail_pebs, WL_RESERVED_PEBS);
1477 ubi->avail_pebs -= WL_RESERVED_PEBS;
1478 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1481 err = ensure_wear_leveling(ubi);
1488 cancel_pending(ubi);
1489 tree_destroy(&ubi->used);
1490 tree_destroy(&ubi->free);
1491 tree_destroy(&ubi->scrub);
1492 kfree(ubi->lookuptbl);
1500 * @ubi: UBI device description object
1502 static void protection_trees_destroy(struct ubi_device *ubi)
1507 rb = ubi->prot.aec.rb_node;
1532 * @ubi: UBI device description object
1534 void ubi_wl_close(struct ubi_device *ubi)
1536 dbg_wl("disable \"%s\"", ubi->bgt_name);
1537 if (ubi->bgt_thread)
1538 kthread_stop(ubi->bgt_thread);
1542 cancel_pending(ubi);
1543 protection_trees_destroy(ubi);
1544 tree_destroy(&ubi->used);
1545 tree_destroy(&ubi->free);
1546 tree_destroy(&ubi->scrub);
1547 kfree(ubi->lookuptbl);
1557 * @ubi: UBI device description object
1565 static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
1571 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1575 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);