• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mtd/ubi/

Lines Matching refs:ubi

112 #include "ubi.h"
158 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
165 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
168 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
170 #define paranoid_check_ec(ubi, pnum, ec) 0
172 #define paranoid_check_in_pq(ubi, e) 0
181 * the @ubi->used and @ubi->free RB-trees.
213 * @ubi: UBI device description object
218 static int do_work(struct ubi_device *ubi)
226 * @ubi->work_sem is used to synchronize with the workers. Workers take
231 down_read(&ubi->work_sem);
232 spin_lock(&ubi->wl_lock);
233 if (list_empty(&ubi->works)) {
234 spin_unlock(&ubi->wl_lock);
235 up_read(&ubi->work_sem);
239 wrk = list_entry(ubi->works.next, struct ubi_work, list);
241 ubi->works_count -= 1;
242 ubi_assert(ubi->works_count >= 0);
243 spin_unlock(&ubi->wl_lock);
250 err = wrk->func(ubi, wrk, 0);
253 up_read(&ubi->work_sem);
260 * @ubi: UBI device description object
267 static int produce_free_peb(struct ubi_device *ubi)
271 spin_lock(&ubi->wl_lock);
272 while (!ubi->free.rb_node) {
273 spin_unlock(&ubi->wl_lock);
276 err = do_work(ubi);
280 spin_lock(&ubi->wl_lock);
282 spin_unlock(&ubi->wl_lock);
328 * @ubi: UBI device description object
331 * This function adds @e to the tail of the protection queue @ubi->pq, where
336 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
338 int pq_tail = ubi->pq_head - 1;
343 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
381 * @ubi: UBI device description object
387 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
396 spin_lock(&ubi->wl_lock);
397 if (!ubi->free.rb_node) {
398 if (ubi->works_count == 0) {
399 ubi_assert(list_empty(&ubi->works));
401 spin_unlock(&ubi->wl_lock);
404 spin_unlock(&ubi->wl_lock);
406 err = produce_free_peb(ubi);
420 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
429 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
431 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
434 e = rb_entry(ubi->free.rb_node,
438 e = find_wl_entry(&ubi->free, medium_ec);
446 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
452 paranoid_check_in_wl_tree(e, &ubi->free);
458 rb_erase(&e->u.rb, &ubi->free);
460 prot_queue_add(ubi, e);
461 spin_unlock(&ubi->wl_lock);
463 err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464 ubi->peb_size - ubi->vid_hdr_aloffset);
475 * @ubi: UBI device description object
481 static int prot_queue_del(struct ubi_device *ubi, int pnum)
485 e = ubi->lookuptbl[pnum];
489 if (paranoid_check_in_pq(ubi, e))
499 * @ubi: UBI device description object
506 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
515 err = paranoid_check_ec(ubi, e->pnum, e->ec);
519 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
523 err = ubi_io_sync_erase(ubi, e->pnum, torture);
543 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
548 spin_lock(&ubi->wl_lock);
549 if (e->ec > ubi->max_ec)
550 ubi->max_ec = e->ec;
551 spin_unlock(&ubi->wl_lock);
560 * @ubi: UBI device description object
566 static void serve_prot_queue(struct ubi_device *ubi)
577 spin_lock(&ubi->wl_lock);
578 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
583 wl_tree_add(e, &ubi->used);
589 spin_unlock(&ubi->wl_lock);
595 ubi->pq_head += 1;
596 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
597 ubi->pq_head = 0;
598 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
599 spin_unlock(&ubi->wl_lock);
604 * @ubi: UBI device description object
610 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
612 spin_lock(&ubi->wl_lock);
613 list_add_tail(&wrk->list, &ubi->works);
614 ubi_assert(ubi->works_count >= 0);
615 ubi->works_count += 1;
616 if (ubi->thread_enabled)
617 wake_up_process(ubi->bgt_thread);
618 spin_unlock(&ubi->wl_lock);
621 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
626 * @ubi: UBI device description object
633 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
649 schedule_ubi_work(ubi, wl_wrk);
655 * @ubi: UBI device description object
663 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
675 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
679 mutex_lock(&ubi->move_mutex);
680 spin_lock(&ubi->wl_lock);
681 ubi_assert(!ubi->move_from && !ubi->move_to);
682 ubi_assert(!ubi->move_to_put);
684 if (!ubi->free.rb_node ||
685 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
693 * @ubi->used tree later and the wear-leveling will be
697 !ubi->free.rb_node, !ubi->used.rb_node);
701 if (!ubi->scrub.rb_node) {
707 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
708 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
715 paranoid_check_in_wl_tree(e1, &ubi->used);
716 rb_erase(&e1->u.rb, &ubi->used);
722 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
723 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
724 paranoid_check_in_wl_tree(e1, &ubi->scrub);
725 rb_erase(&e1->u.rb, &ubi->scrub);
729 paranoid_check_in_wl_tree(e2, &ubi->free);
730 rb_erase(&e2->u.rb, &ubi->free);
731 ubi->move_from = e1;
732 ubi->move_to = e2;
733 spin_unlock(&ubi->wl_lock);
746 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
772 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
801 * put this PEB to the @ubi->erroneous list to prevent
804 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
806 ubi->erroneous_peb_count);
823 ubi_free_vid_hdr(ubi, vid_hdr);
825 spin_lock(&ubi->wl_lock);
826 if (!ubi->move_to_put) {
827 wl_tree_add(e2, &ubi->used);
830 ubi->move_from = ubi->move_to = NULL;
831 ubi->move_to_put = ubi->wl_scheduled = 0;
832 spin_unlock(&ubi->wl_lock);
834 err = schedule_erase(ubi, e1, 0);
849 err = schedule_erase(ubi, e2, 0);
857 mutex_unlock(&ubi->move_mutex);
872 spin_lock(&ubi->wl_lock);
874 prot_queue_add(ubi, e1);
876 wl_tree_add(e1, &ubi->erroneous);
877 ubi->erroneous_peb_count += 1;
879 wl_tree_add(e1, &ubi->scrub);
881 wl_tree_add(e1, &ubi->used);
882 ubi_assert(!ubi->move_to_put);
883 ubi->move_from = ubi->move_to = NULL;
884 ubi->wl_scheduled = 0;
885 spin_unlock(&ubi->wl_lock);
887 ubi_free_vid_hdr(ubi, vid_hdr);
888 err = schedule_erase(ubi, e2, torture);
893 mutex_unlock(&ubi->move_mutex);
903 spin_lock(&ubi->wl_lock);
904 ubi->move_from = ubi->move_to = NULL;
905 ubi->move_to_put = ubi->wl_scheduled = 0;
906 spin_unlock(&ubi->wl_lock);
908 ubi_free_vid_hdr(ubi, vid_hdr);
913 ubi_ro_mode(ubi);
914 mutex_unlock(&ubi->move_mutex);
919 ubi->wl_scheduled = 0;
920 spin_unlock(&ubi->wl_lock);
921 mutex_unlock(&ubi->move_mutex);
922 ubi_free_vid_hdr(ubi, vid_hdr);
928 * @ubi: UBI device description object
934 static int ensure_wear_leveling(struct ubi_device *ubi)
941 spin_lock(&ubi->wl_lock);
942 if (ubi->wl_scheduled)
947 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
950 if (!ubi->scrub.rb_node) {
951 if (!ubi->used.rb_node || !ubi->free.rb_node)
961 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
962 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
970 ubi->wl_scheduled = 1;
971 spin_unlock(&ubi->wl_lock);
980 schedule_ubi_work(ubi, wrk);
984 spin_lock(&ubi->wl_lock);
985 ubi->wl_scheduled = 0;
987 spin_unlock(&ubi->wl_lock);
993 * @ubi: UBI device description object
1002 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1017 err = sync_erase(ubi, e, wl_wrk->torture);
1022 spin_lock(&ubi->wl_lock);
1023 wl_tree_add(e, &ubi->free);
1024 spin_unlock(&ubi->wl_lock);
1030 serve_prot_queue(ubi);
1033 err = ensure_wear_leveling(ubi);
1046 err1 = schedule_erase(ubi, e, 0);
1063 if (!ubi->bad_allowed) {
1068 spin_lock(&ubi->volumes_lock);
1069 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1071 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1072 ubi->avail_pebs -= need;
1073 ubi->rsvd_pebs += need;
1074 ubi->beb_rsvd_pebs += need;
1079 if (ubi->beb_rsvd_pebs == 0) {
1080 spin_unlock(&ubi->volumes_lock);
1084 spin_unlock(&ubi->volumes_lock);
1087 err = ubi_io_mark_bad(ubi, pnum);
1091 spin_lock(&ubi->volumes_lock);
1092 ubi->beb_rsvd_pebs -= 1;
1093 ubi->bad_peb_count += 1;
1094 ubi->good_peb_count -= 1;
1095 ubi_calculate_reserved(ubi);
1096 if (ubi->beb_rsvd_pebs)
1097 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1100 spin_unlock(&ubi->volumes_lock);
1105 ubi_ro_mode(ubi);
1111 * @ubi: UBI device description object
1120 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1127 ubi_assert(pnum < ubi->peb_count);
1130 spin_lock(&ubi->wl_lock);
1131 e = ubi->lookuptbl[pnum];
1132 if (e == ubi->move_from) {
1139 spin_unlock(&ubi->wl_lock);
1141 /* Wait for the WL worker by taking the @ubi->move_mutex */
1142 mutex_lock(&ubi->move_mutex);
1143 mutex_unlock(&ubi->move_mutex);
1145 } else if (e == ubi->move_to) {
1156 ubi_assert(!ubi->move_to_put);
1157 ubi->move_to_put = 1;
1158 spin_unlock(&ubi->wl_lock);
1161 if (in_wl_tree(e, &ubi->used)) {
1162 paranoid_check_in_wl_tree(e, &ubi->used);
1163 rb_erase(&e->u.rb, &ubi->used);
1164 } else if (in_wl_tree(e, &ubi->scrub)) {
1165 paranoid_check_in_wl_tree(e, &ubi->scrub);
1166 rb_erase(&e->u.rb, &ubi->scrub);
1167 } else if (in_wl_tree(e, &ubi->erroneous)) {
1168 paranoid_check_in_wl_tree(e, &ubi->erroneous);
1169 rb_erase(&e->u.rb, &ubi->erroneous);
1170 ubi->erroneous_peb_count -= 1;
1171 ubi_assert(ubi->erroneous_peb_count >= 0);
1175 err = prot_queue_del(ubi, e->pnum);
1178 ubi_ro_mode(ubi);
1179 spin_unlock(&ubi->wl_lock);
1184 spin_unlock(&ubi->wl_lock);
1186 err = schedule_erase(ubi, e, torture);
1188 spin_lock(&ubi->wl_lock);
1189 wl_tree_add(e, &ubi->used);
1190 spin_unlock(&ubi->wl_lock);
1198 * @ubi: UBI device description object
1206 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1216 in_wl_tree(e, &ubi->erroneous)) {
1217 spin_unlock(&ubi->wl_lock);
1221 if (e == ubi->move_to) {
1228 spin_unlock(&ubi->wl_lock);
1234 if (in_wl_tree(e, &ubi->used)) {
1235 paranoid_check_in_wl_tree(e, &ubi->used);
1236 rb_erase(&e->u.rb, &ubi->used);
1240 err = prot_queue_del(ubi, e->pnum);
1243 ubi_ro_mode(ubi);
1244 spin_unlock(&ubi->wl_lock);
1249 wl_tree_add(e, &ubi->scrub);
1250 spin_unlock(&ubi->wl_lock);
1256 return ensure_wear_leveling(ubi);
1261 * @ubi: UBI device description object
1266 int ubi_wl_flush(struct ubi_device *ubi)
1274 dbg_wl("flush (%d pending works)", ubi->works_count);
1275 while (ubi->works_count) {
1276 err = do_work(ubi);
1285 down_write(&ubi->work_sem);
1286 up_write(&ubi->work_sem);
1292 while (ubi->works_count) {
1293 dbg_wl("flush more (%d pending works)", ubi->works_count);
1294 err = do_work(ubi);
1340 struct ubi_device *ubi = u;
1343 ubi->bgt_name, task_pid_nr(current));
1355 spin_lock(&ubi->wl_lock);
1356 if (list_empty(&ubi->works) || ubi->ro_mode ||
1357 !ubi->thread_enabled) {
1359 spin_unlock(&ubi->wl_lock);
1363 spin_unlock(&ubi->wl_lock);
1365 err = do_work(ubi);
1368 ubi->bgt_name, err);
1375 ubi->bgt_name, WL_MAX_FAILURES);
1376 ubi_ro_mode(ubi);
1377 ubi->thread_enabled = 0;
1386 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1392 * @ubi: UBI device description object
1394 static void cancel_pending(struct ubi_device *ubi)
1396 while (!list_empty(&ubi->works)) {
1399 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1401 wrk->func(ubi, wrk, 1);
1402 ubi->works_count -= 1;
1403 ubi_assert(ubi->works_count >= 0);
1409 * @ubi: UBI device description object
1415 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1423 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1424 spin_lock_init(&ubi->wl_lock);
1425 mutex_init(&ubi->move_mutex);
1426 init_rwsem(&ubi->work_sem);
1427 ubi->max_ec = si->max_ec;
1428 INIT_LIST_HEAD(&ubi->works);
1430 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1433 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1434 if (!ubi->lookuptbl)
1438 INIT_LIST_HEAD(&ubi->pq[i]);
1439 ubi->pq_head = 0;
1450 ubi->lookuptbl[e->pnum] = e;
1451 if (schedule_erase(ubi, e, 0)) {
1467 wl_tree_add(e, &ubi->free);
1468 ubi->lookuptbl[e->pnum] = e;
1480 ubi->lookuptbl[e->pnum] = e;
1481 if (schedule_erase(ubi, e, 0)) {
1497 ubi->lookuptbl[e->pnum] = e;
1501 wl_tree_add(e, &ubi->used);
1505 wl_tree_add(e, &ubi->scrub);
1510 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1512 ubi->avail_pebs, WL_RESERVED_PEBS);
1515 ubi->avail_pebs -= WL_RESERVED_PEBS;
1516 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1519 err = ensure_wear_leveling(ubi);
1526 cancel_pending(ubi);
1527 tree_destroy(&ubi->used);
1528 tree_destroy(&ubi->free);
1529 tree_destroy(&ubi->scrub);
1530 kfree(ubi->lookuptbl);
1536 * @ubi: UBI device description object
1538 static void protection_queue_destroy(struct ubi_device *ubi)
1544 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1553 * @ubi: UBI device description object
1555 void ubi_wl_close(struct ubi_device *ubi)
1558 cancel_pending(ubi);
1559 protection_queue_destroy(ubi);
1560 tree_destroy(&ubi->used);
1561 tree_destroy(&ubi->erroneous);
1562 tree_destroy(&ubi->free);
1563 tree_destroy(&ubi->scrub);
1564 kfree(ubi->lookuptbl);
1571 * @ubi: UBI device description object
1578 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1584 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1588 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1632 * @ubi: UBI device description object
1635 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1637 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1643 list_for_each_entry(p, &ubi->pq[i], u.list)