• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/mtd/

Lines Matching refs:part

95 static int build_block_map(struct partition *part, int block_no)
97 struct block *block = &part->blocks[block_no];
100 block->offset = part->block_size * block_no;
102 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
109 for (i=0; i<part->data_sectors_per_block; i++) {
112 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
125 if (entry >= part->sector_count) {
129 part->mbd.mtd->name, block_no, i, entry);
133 if (part->sector_map[entry] != -1) {
136 part->mbd.mtd->name, entry);
137 part->errors = 1;
141 part->sector_map[entry] = block->offset +
142 (i + part->header_sectors_per_block) * SECTOR_SIZE;
147 if (block->free_sectors == part->data_sectors_per_block)
148 part->reserved_block = block_no;
153 static int scan_header(struct partition *part)
160 sectors_per_block = part->block_size / SECTOR_SIZE;
161 part->total_blocks = part->mbd.mtd->size / part->block_size;
163 if (part->total_blocks < 2)
167 part->header_sectors_per_block =
171 part->data_sectors_per_block = sectors_per_block -
172 part->header_sectors_per_block;
174 part->header_size = (HEADER_MAP_OFFSET +
175 part->data_sectors_per_block) * sizeof(u16);
177 part->cylinders = (part->data_sectors_per_block *
178 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
180 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
182 part->current_block = -1;
183 part->reserved_block = -1;
184 part->is_reclaiming = 0;
186 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
187 if (!part->header_cache)
190 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
192 if (!part->blocks)
195 part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
196 if (!part->sector_map) {
198 "sector map", part->mbd.mtd->name);
202 for (i=0; i<part->sector_count; i++)
203 part->sector_map[i] = -1;
205 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
206 rc = part->mbd.mtd->read(part->mbd.mtd,
207 i * part->block_size, part->header_size,
208 &retlen, (u_char*)part->header_cache);
210 if (!rc && retlen != part->header_size)
216 if (!build_block_map(part, i))
222 part->mbd.mtd->name);
227 if (part->reserved_block == -1) {
229 part->mbd.mtd->name);
231 part->errors = 1;
237 vfree(part->sector_map);
238 kfree(part->header_cache);
239 kfree(part->blocks);
246 struct partition *part = (struct partition*)dev;
251 if (sector >= part->sector_count)
254 addr = part->sector_map[sector];
256 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
263 "0x%lx\n", part->mbd.mtd->name, addr);
274 struct partition *part;
279 part = (struct partition*)erase->priv;
281 i = erase->addr / part->block_size;
282 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
284 "on '%s'\n", erase->addr, part->mbd.mtd->name);
291 part->mbd.mtd->name, erase->state);
293 part->blocks[i].state = BLOCK_FAILED;
294 part->blocks[i].free_sectors = 0;
295 part->blocks[i].used_sectors = 0;
304 part->blocks[i].state = BLOCK_ERASED;
305 part->blocks[i].free_sectors = part->data_sectors_per_block;
306 part->blocks[i].used_sectors = 0;
307 part->blocks[i].erases++;
309 rc = part->mbd.mtd->write(part->mbd.mtd,
310 part->blocks[i].offset, sizeof(magic), &retlen,
319 part->mbd.mtd->name,
320 part->blocks[i].offset);
321 part->blocks[i].state = BLOCK_FAILED;
324 part->blocks[i].state = BLOCK_OK;
329 static int erase_block(struct partition *part, int block)
338 erase->mtd = part->mbd.mtd;
340 erase->addr = part->blocks[block].offset;
341 erase->len = part->block_size;
342 erase->priv = (u_long)part;
344 part->blocks[block].state = BLOCK_ERASING;
345 part->blocks[block].free_sectors = 0;
347 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
352 part->mbd.mtd->name);
360 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
367 part->is_reclaiming = 1;
373 map = kmalloc(part->header_size, GFP_KERNEL);
377 rc = part->mbd.mtd->read(part->mbd.mtd,
378 part->blocks[block_no].offset, part->header_size,
381 if (!rc && retlen != part->header_size)
386 "0x%lx\n", part->mbd.mtd->name,
387 part->blocks[block_no].offset);
392 for (i=0; i<part->data_sectors_per_block; i++) {
404 if (entry >= part->sector_count)
407 addr = part->blocks[block_no].offset +
408 (i + part->header_sectors_per_block) * SECTOR_SIZE;
412 if (!part->blocks[block_no].used_sectors--) {
413 rc = erase_block(part, block_no);
418 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
427 part->mbd.mtd->name);
432 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
444 part->is_reclaiming = 0;
449 static int reclaim_block(struct partition *part, u_long *old_sector)
455 if (part->mbd.mtd->sync)
456 part->mbd.mtd->sync(part->mbd.mtd);
461 old_sector_block = *old_sector / part->block_size;
465 for (block=0; block<part->total_blocks; block++) {
468 if (block == part->reserved_block)
476 if (part->blocks[block].free_sectors)
479 this_score = part->blocks[block].used_sectors;
485 if (part->blocks[block].used_sectors ==
486 part->data_sectors_per_block)
490 this_score += part->blocks[block].erases;
501 part->current_block = -1;
502 part->reserved_block = best_block;
506 part->blocks[best_block].used_sectors,
507 part->blocks[best_block].free_sectors);
509 if (part->blocks[best_block].used_sectors)
510 rc = move_block_contents(part, best_block, old_sector);
512 rc = erase_block(part, best_block);
522 static int find_free_block(struct partition *part)
526 block = part->current_block == -1 ?
527 jiffies % part->total_blocks : part->current_block;
531 if (part->blocks[block].free_sectors &&
532 block != part->reserved_block)
535 if (part->blocks[block].state == BLOCK_UNUSED)
536 erase_block(part, block);
538 if (++block >= part->total_blocks)
546 static int find_writable_block(struct partition *part, u_long *old_sector)
551 block = find_free_block(part);
554 if (!part->is_reclaiming) {
555 rc = reclaim_block(part, old_sector);
559 block = find_free_block(part);
568 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
569 part->header_size, &retlen, (u_char*)part->header_cache);
571 if (!rc && retlen != part->header_size)
576 "0x%lx\n", part->mbd.mtd->name,
577 part->blocks[block].offset);
581 part->current_block = block;
587 static int mark_sector_deleted(struct partition *part, u_long old_addr)
594 block = old_addr / part->block_size;
595 offset = (old_addr % part->block_size) / SECTOR_SIZE -
596 part->header_sectors_per_block;
598 addr = part->blocks[block].offset +
600 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
608 "0x%lx\n", part->mbd.mtd->name, addr);
612 if (block == part->current_block)
613 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
615 part->blocks[block].used_sectors--;
617 if (!part->blocks[block].used_sectors &&
618 !part->blocks[block].free_sectors)
619 rc = erase_block(part, block);
625 static int find_free_sector(const struct partition *part, const struct block *block)
629 i = stop = part->data_sectors_per_block - block->free_sectors;
632 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
636 if (++i == part->data_sectors_per_block)
646 struct partition *part = (struct partition*)dev;
654 if (part->current_block == -1 ||
655 !part->blocks[part->current_block].free_sectors) {
657 rc = find_writable_block(part, old_addr);
662 block = &part->blocks[part->current_block];
664 i = find_free_sector(part, block);
671 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
673 rc = part->mbd.mtd->write(part->mbd.mtd,
681 part->mbd.mtd->name, addr);
686 part->sector_map[sector] = addr;
690 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
693 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
701 part->mbd.mtd->name, addr);
714 struct partition *part = (struct partition*)dev;
721 if (part->reserved_block == -1) {
726 if (sector >= part->sector_count) {
731 old_addr = part->sector_map[sector];
744 part->sector_map[sector] = -1;
747 rc = mark_sector_deleted(part, old_addr);
755 struct partition *part = (struct partition*)dev;
759 geo->cylinders = part->cylinders;
766 struct partition *part;
771 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
772 if (!part)
775 part->mbd.mtd = mtd;
778 part->block_size = block_size;
785 part->block_size = mtd->erasesize;
788 if (scan_header(part) == 0) {
789 part->mbd.size = part->sector_count;
790 part->mbd.tr = tr;
791 part->mbd.devnum = -1;
793 part->mbd.readonly = 1;
794 else if (part->errors) {
797 part->mbd.readonly = 1;
803 if (!add_mtd_blktrans_dev((void*)part))
807 kfree(part);
812 struct partition *part = (struct partition*)dev;
815 for (i=0; i<part->total_blocks; i++) {
817 part->mbd.mtd->name, i, part->blocks[i].erases);
821 vfree(part->sector_map);
822 kfree(part->header_cache);
823 kfree(part->blocks);
824 kfree(part);