Lines Matching refs:part

93 static int build_block_map(struct partition *part, int block_no)
95 struct block *block = &part->blocks[block_no];
98 block->offset = part->block_size * block_no;
100 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
107 for (i=0; i<part->data_sectors_per_block; i++) {
110 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
123 if (entry >= part->sector_count) {
127 part->mbd.mtd->name, block_no, i, entry);
131 if (part->sector_map[entry] != -1) {
134 part->mbd.mtd->name, entry);
135 part->errors = 1;
139 part->sector_map[entry] = block->offset +
140 (i + part->header_sectors_per_block) * SECTOR_SIZE;
145 if (block->free_sectors == part->data_sectors_per_block)
146 part->reserved_block = block_no;
151 static int scan_header(struct partition *part)
158 sectors_per_block = part->block_size / SECTOR_SIZE;
159 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
161 if (part->total_blocks < 2)
165 part->header_sectors_per_block =
169 part->data_sectors_per_block = sectors_per_block -
170 part->header_sectors_per_block;
172 part->header_size = (HEADER_MAP_OFFSET +
173 part->data_sectors_per_block) * sizeof(u16);
175 part->cylinders = (part->data_sectors_per_block *
176 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
178 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
180 part->current_block = -1;
181 part->reserved_block = -1;
182 part->is_reclaiming = 0;
184 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
185 if (!part->header_cache)
188 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
190 if (!part->blocks)
193 part->sector_map = vmalloc(array_size(sizeof(u_long),
194 part->sector_count));
195 if (!part->sector_map)
198 for (i=0; i<part->sector_count; i++)
199 part->sector_map[i] = -1;
201 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202 rc = mtd_read(part->mbd.mtd, i * part->block_size,
203 part->header_size, &retlen,
204 (u_char *)part->header_cache);
206 if (!rc && retlen != part->header_size)
212 if (!build_block_map(part, i))
218 part->mbd.mtd->name);
223 if (part->reserved_block == -1) {
225 part->mbd.mtd->name);
227 part->errors = 1;
233 vfree(part->sector_map);
234 kfree(part->header_cache);
235 kfree(part->blocks);
242 struct partition *part = container_of(dev, struct partition, mbd);
247 if (sector >= part->sector_count)
250 addr = part->sector_map[sector];
252 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
259 "0x%lx\n", part->mbd.mtd->name, addr);
268 static int erase_block(struct partition *part, int block)
277 erase->addr = part->blocks[block].offset;
278 erase->len = part->block_size;
280 part->blocks[block].state = BLOCK_ERASING;
281 part->blocks[block].free_sectors = 0;
283 rc = mtd_erase(part->mbd.mtd, erase);
287 (unsigned long long)erase->len, part->mbd.mtd->name);
288 part->blocks[block].state = BLOCK_FAILED;
289 part->blocks[block].free_sectors = 0;
290 part->blocks[block].used_sectors = 0;
295 part->blocks[block].state = BLOCK_ERASED;
296 part->blocks[block].free_sectors = part->data_sectors_per_block;
297 part->blocks[block].used_sectors = 0;
298 part->blocks[block].erases++;
300 rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
307 part->mbd.mtd->name, part->blocks[block].offset);
308 part->blocks[block].state = BLOCK_FAILED;
310 part->blocks[block].state = BLOCK_OK;
319 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
326 part->is_reclaiming = 1;
332 map = kmalloc(part->header_size, GFP_KERNEL);
336 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
337 part->header_size, &retlen, (u_char *)map);
339 if (!rc && retlen != part->header_size)
344 "0x%lx\n", part->mbd.mtd->name,
345 part->blocks[block_no].offset);
350 for (i=0; i<part->data_sectors_per_block; i++) {
362 if (entry >= part->sector_count)
365 addr = part->blocks[block_no].offset +
366 (i + part->header_sectors_per_block) * SECTOR_SIZE;
370 if (!part->blocks[block_no].used_sectors--) {
371 rc = erase_block(part, block_no);
376 rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
385 part->mbd.mtd->name);
390 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
402 part->is_reclaiming = 0;
407 static int reclaim_block(struct partition *part, u_long *old_sector)
413 mtd_sync(part->mbd.mtd);
418 old_sector_block = *old_sector / part->block_size;
422 for (block=0; block<part->total_blocks; block++) {
425 if (block == part->reserved_block)
433 if (part->blocks[block].free_sectors)
436 this_score = part->blocks[block].used_sectors;
442 if (part->blocks[block].used_sectors ==
443 part->data_sectors_per_block)
447 this_score += part->blocks[block].erases;
458 part->current_block = -1;
459 part->reserved_block = best_block;
463 part->blocks[best_block].used_sectors,
464 part->blocks[best_block].free_sectors);
466 if (part->blocks[best_block].used_sectors)
467 rc = move_block_contents(part, best_block, old_sector);
469 rc = erase_block(part, best_block);
479 static int find_free_block(struct partition *part)
483 block = part->current_block == -1 ?
484 jiffies % part->total_blocks : part->current_block;
488 if (part->blocks[block].free_sectors &&
489 block != part->reserved_block)
492 if (part->blocks[block].state == BLOCK_UNUSED)
493 erase_block(part, block);
495 if (++block >= part->total_blocks)
503 static int find_writable_block(struct partition *part, u_long *old_sector)
508 block = find_free_block(part);
511 if (!part->is_reclaiming) {
512 rc = reclaim_block(part, old_sector);
516 block = find_free_block(part);
525 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
526 part->header_size, &retlen,
527 (u_char *)part->header_cache);
529 if (!rc && retlen != part->header_size)
534 "0x%lx\n", part->mbd.mtd->name,
535 part->blocks[block].offset);
539 part->current_block = block;
545 static int mark_sector_deleted(struct partition *part, u_long old_addr)
552 block = old_addr / part->block_size;
553 offset = (old_addr % part->block_size) / SECTOR_SIZE -
554 part->header_sectors_per_block;
556 addr = part->blocks[block].offset +
558 rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
566 "0x%lx\n", part->mbd.mtd->name, addr);
569 if (block == part->current_block)
570 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
572 part->blocks[block].used_sectors--;
574 if (!part->blocks[block].used_sectors &&
575 !part->blocks[block].free_sectors)
576 rc = erase_block(part, block);
582 static int find_free_sector(const struct partition *part, const struct block *block)
586 i = stop = part->data_sectors_per_block - block->free_sectors;
589 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
593 if (++i == part->data_sectors_per_block)
603 struct partition *part = container_of(dev, struct partition, mbd);
611 if (part->current_block == -1 ||
612 !part->blocks[part->current_block].free_sectors) {
614 rc = find_writable_block(part, old_addr);
619 block = &part->blocks[part->current_block];
621 i = find_free_sector(part, block);
628 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
630 rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
638 part->mbd.mtd->name, addr);
642 part->sector_map[sector] = addr;
646 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
649 rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
657 part->mbd.mtd->name, addr);
669 struct partition *part = container_of(dev, struct partition, mbd);
676 if (part->reserved_block == -1) {
681 if (sector >= part->sector_count) {
686 old_addr = part->sector_map[sector];
699 part->sector_map[sector] = -1;
702 rc = mark_sector_deleted(part, old_addr);
711 struct partition *part = container_of(dev, struct partition, mbd);
716 if (sector >= part->sector_count)
719 addr = part->sector_map[sector];
722 rc = mark_sector_deleted(part, addr);
726 part->sector_map[sector] = -1;
738 struct partition *part = container_of(dev, struct partition, mbd);
742 geo->cylinders = part->cylinders;
749 struct partition *part;
755 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
756 if (!part)
759 part->mbd.mtd = mtd;
762 part->block_size = block_size;
768 part->block_size = mtd->erasesize;
771 if (scan_header(part) == 0) {
772 part->mbd.size = part->sector_count;
773 part->mbd.tr = tr;
774 part->mbd.devnum = -1;
776 part->mbd.readonly = 1;
777 else if (part->errors) {
780 part->mbd.readonly = 1;
786 if (!add_mtd_blktrans_dev(&part->mbd))
790 kfree(part);
795 struct partition *part = container_of(dev, struct partition, mbd);
798 for (i=0; i<part->total_blocks; i++) {
800 part->mbd.mtd->name, i, part->blocks[i].erases);
803 vfree(part->sector_map);
804 kfree(part->header_cache);
805 kfree(part->blocks);
806 del_mtd_blktrans_dev(&part->mbd);