Lines Matching refs:bb

457 static int prev_by_hint(struct badblocks *bb, sector_t s, int hint)
460 u64 *p = bb->page;
463 while ((hint < hint_end) && ((hint + 1) <= bb->count) &&
465 if ((hint + 1) == bb->count || BB_OFFSET(p[hint + 1]) > s) {
481 static int prev_badblocks(struct badblocks *bb, struct badblocks_context *bad,
489 if (!bb->count)
493 ret = prev_by_hint(bb, s, hint);
499 hi = bb->count;
500 p = bb->page;
534 static bool can_merge_behind(struct badblocks *bb,
539 u64 *p = bb->page;
554 static int behind_merge(struct badblocks *bb, struct badblocks_context *bad,
559 u64 *p = bb->page;
579 static bool can_merge_front(struct badblocks *bb, int prev,
583 u64 *p = bb->page;
597 static int front_merge(struct badblocks *bb, int prev, struct badblocks_context *bad)
601 u64 *p = bb->page;
610 if ((prev + 1) < bb->count &&
633 static bool can_combine_front(struct badblocks *bb, int prev,
636 u64 *p = bb->page;
651 * The caller of front_combine() will decrease bb->count, therefore
654 static void front_combine(struct badblocks *bb, int prev)
656 u64 *p = bb->page;
661 if ((prev + 1) < bb->count)
662 memmove(p + prev, p + prev + 1, (bb->count - prev - 1) * 8);
671 static bool overlap_front(struct badblocks *bb, int front,
674 u64 *p = bb->page;
686 static bool overlap_behind(struct badblocks *bb, struct badblocks_context *bad,
689 u64 *p = bb->page;
718 static bool can_front_overwrite(struct badblocks *bb, int prev,
721 u64 *p = bb->page;
724 WARN_ON(!overlap_front(bb, prev, bad));
748 if ((bb->count + (*extra)) >= MAX_BADBLOCKS)
761 static int front_overwrite(struct badblocks *bb, int prev,
764 u64 *p = bb->page;
778 (bb->count - prev - 1) * 8);
792 (bb->count - prev - 1) * 8);
806 (bb->count - prev - 1) * 8);
823 static int insert_at(struct badblocks *bb, int at, struct badblocks_context *bad)
825 u64 *p = bb->page;
828 WARN_ON(badblocks_full(bb));
831 if (at < bb->count)
832 memmove(p + at + 1, p + at, (bb->count - at) * 8);
838 static void badblocks_update_acked(struct badblocks *bb)
841 u64 *p = bb->page;
844 if (!bb->unacked_exist)
847 for (i = 0; i < bb->count ; i++) {
855 bb->unacked_exist = 0;
859 static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
871 if (bb->shift < 0)
879 if (bb->shift) {
883 rounddown(s, bb->shift);
884 roundup(next, bb->shift);
888 write_seqlock_irqsave(&bb->lock, flags);
893 p = bb->page;
900 if (badblocks_empty(bb)) {
901 len = insert_at(bb, 0, &bad);
902 bb->count++;
907 prev = prev_badblocks(bb, &bad, hint);
911 if (!badblocks_full(bb)) {
915 len = insert_at(bb, 0, &bad);
916 bb->count++;
923 if (overlap_behind(bb, &bad, 0)) {
924 if (can_merge_behind(bb, &bad, 0)) {
925 len = behind_merge(bb, &bad, 0);
940 if (can_combine_front(bb, prev, &bad)) {
941 front_combine(bb, prev);
942 bb->count--;
948 if (overlap_front(bb, prev, &bad)) {
949 if (can_merge_front(bb, prev, &bad)) {
950 len = front_merge(bb, prev, &bad);
955 if (!can_front_overwrite(bb, prev, &bad, &extra)) {
962 len = front_overwrite(bb, prev, &bad, extra);
964 bb->count += extra;
966 if (can_combine_front(bb, prev, &bad)) {
967 front_combine(bb, prev);
968 bb->count--;
975 if (can_merge_front(bb, prev, &bad)) {
976 len = front_merge(bb, prev, &bad);
983 if (badblocks_full(bb)) {
985 if (((prev + 1) < bb->count) &&
986 overlap_behind(bb, &bad, prev + 1) &&
1001 if ((prev + 1) < bb->count &&
1002 overlap_behind(bb, &bad, prev + 1))
1006 len = insert_at(bb, prev + 1, &bad);
1007 bb->count++;
1026 (prev + 1) < bb->count &&
1034 if ((prev + 2) < bb->count)
1036 (bb->count - (prev + 2)) * 8);
1037 bb->count--;
1040 if (space_desired && !badblocks_full(bb)) {
1050 set_changed(bb);
1053 bb->unacked_exist = 1;
1055 badblocks_update_acked(bb);
1058 write_sequnlock_irqrestore(&bb->lock, flags);
1071 * the caller to reduce bb->count.
1073 static int front_clear(struct badblocks *bb, int prev,
1078 u64 *p = bb->page;
1091 if ((prev + 1) < bb->count)
1093 (bb->count - prev - 1) * 8);
1116 static int front_splitting_clear(struct badblocks *bb, int prev,
1119 u64 *p = bb->page;
1128 memmove(p + prev + 2, p + prev + 1, (bb->count - prev - 1) * 8);
1134 static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
1142 if (bb->shift < 0)
1150 if (bb->shift) {
1160 roundup(s, bb->shift);
1161 rounddown(target, bb->shift);
1165 write_seqlock_irq(&bb->lock);
1168 p = bb->page;
1174 if (badblocks_empty(bb)) {
1181 prev = prev_badblocks(bb, &bad, hint);
1185 if (overlap_behind(bb, &bad, 0)) {
1200 if ((prev + 1) >= bb->count && !overlap_front(bb, prev, &bad)) {
1207 if (badblocks_full(bb) && (BB_OFFSET(p[prev]) < bad.start) &&
1213 if (overlap_front(bb, prev, &bad)) {
1217 if ((bb->count + 1) < MAX_BADBLOCKS) {
1218 len = front_splitting_clear(bb, prev, &bad);
1219 bb->count += 1;
1228 len = front_clear(bb, prev, &bad, &deleted);
1229 bb->count -= deleted;
1238 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) {
1261 badblocks_update_acked(bb);
1262 set_changed(bb);
1265 write_sequnlock_irq(&bb->lock);
1274 static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors,
1284 WARN_ON(bb->shift < 0 || sectors == 0);
1286 if (bb->shift > 0) {
1291 rounddown(s, bb->shift);
1292 roundup(target, bb->shift);
1297 seq = read_seqbegin(&bb->lock);
1299 p = bb->page;
1307 if (badblocks_empty(bb)) {
1312 prev = prev_badblocks(bb, &bad, hint);
1316 ((prev + 1) >= bb->count) && !overlap_front(bb, prev, &bad)) {
1322 if ((prev >= 0) && overlap_front(bb, prev, &bad)) {
1342 if ((prev + 1) < bb->count && overlap_behind(bb, &bad, prev + 1)) {
1367 if (read_seqretry(&bb->lock, seq))
1375 * @bb: the badblocks structure that holds all badblock information
1407 int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
1410 return _badblocks_check(bb, s, sectors, first_bad, bad_sectors);
1416 * @bb: the badblocks structure that holds all badblock information
1429 int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
1432 return _badblocks_set(bb, s, sectors, acknowledged);
1438 * @bb: the badblocks structure that holds all badblock information
1450 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
1452 return _badblocks_clear(bb, s, sectors);
1458 * @bb: the badblocks structure that holds all badblock information
1463 void ack_all_badblocks(struct badblocks *bb)
1465 if (bb->page == NULL || bb->changed)
1468 write_seqlock_irq(&bb->lock);
1470 if (bb->changed == 0 && bb->unacked_exist) {
1471 u64 *p = bb->page;
1474 for (i = 0; i < bb->count ; i++) {
1482 bb->unacked_exist = 0;
1484 write_sequnlock_irq(&bb->lock);
1490 * @bb: the badblocks structure that holds all badblock information
1497 ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
1501 u64 *p = bb->page;
1504 if (bb->shift < 0)
1508 seq = read_seqbegin(&bb->lock);
1513 while (len < PAGE_SIZE && i < bb->count) {
1524 (unsigned long long)s << bb->shift,
1525 length << bb->shift);
1528 bb->unacked_exist = 0;
1530 if (read_seqretry(&bb->lock, seq))
1539 * @bb: the badblocks structure that holds all badblock information
1547 ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
1567 if (badblocks_set(bb, sector, length, !unack))
1574 static int __badblocks_init(struct device *dev, struct badblocks *bb,
1577 bb->dev = dev;
1578 bb->count = 0;
1580 bb->shift = 0;
1582 bb->shift = -1;
1584 bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
1586 bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1587 if (!bb->page) {
1588 bb->shift = -1;
1591 seqlock_init(&bb->lock);
1598 * @bb: the badblocks structure that holds all badblock information
1605 int badblocks_init(struct badblocks *bb, int enable)
1607 return __badblocks_init(NULL, bb, enable);
1611 int devm_init_badblocks(struct device *dev, struct badblocks *bb)
1613 if (!bb)
1615 return __badblocks_init(dev, bb, 1);
1621 * @bb: the badblocks structure that holds all badblock information
1623 void badblocks_exit(struct badblocks *bb)
1625 if (!bb)
1627 if (bb->dev)
1628 devm_kfree(bb->dev, bb->page);
1630 kfree(bb->page);
1631 bb->page = NULL;