• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mtd/

Lines Matching defs:part

91 static int build_block_map(struct partition *part, int block_no)
93 struct block *block = &part->blocks[block_no];
96 block->offset = part->block_size * block_no;
98 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
105 for (i=0; i<part->data_sectors_per_block; i++) {
108 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
121 if (entry >= part->sector_count) {
125 part->mbd.mtd->name, block_no, i, entry);
129 if (part->sector_map[entry] != -1) {
132 part->mbd.mtd->name, entry);
133 part->errors = 1;
137 part->sector_map[entry] = block->offset +
138 (i + part->header_sectors_per_block) * SECTOR_SIZE;
143 if (block->free_sectors == part->data_sectors_per_block)
144 part->reserved_block = block_no;
149 static int scan_header(struct partition *part)
156 sectors_per_block = part->block_size / SECTOR_SIZE;
157 part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
159 if (part->total_blocks < 2)
163 part->header_sectors_per_block =
167 part->data_sectors_per_block = sectors_per_block -
168 part->header_sectors_per_block;
170 part->header_size = (HEADER_MAP_OFFSET +
171 part->data_sectors_per_block) * sizeof(u16);
173 part->cylinders = (part->data_sectors_per_block *
174 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
176 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
178 part->current_block = -1;
179 part->reserved_block = -1;
180 part->is_reclaiming = 0;
182 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
183 if (!part->header_cache)
186 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
188 if (!part->blocks)
191 part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
192 if (!part->sector_map) {
194 "sector map", part->mbd.mtd->name);
198 for (i=0; i<part->sector_count; i++)
199 part->sector_map[i] = -1;
201 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
202 rc = part->mbd.mtd->read(part->mbd.mtd,
203 i * part->block_size, part->header_size,
204 &retlen, (u_char*)part->header_cache);
206 if (!rc && retlen != part->header_size)
212 if (!build_block_map(part, i))
218 part->mbd.mtd->name);
223 if (part->reserved_block == -1) {
225 part->mbd.mtd->name);
227 part->errors = 1;
233 vfree(part->sector_map);
234 kfree(part->header_cache);
235 kfree(part->blocks);
242 struct partition *part = (struct partition*)dev;
247 if (sector >= part->sector_count)
250 addr = part->sector_map[sector];
252 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
259 "0x%lx\n", part->mbd.mtd->name, addr);
270 struct partition *part;
275 part = (struct partition*)erase->priv;
277 i = (u32)erase->addr / part->block_size;
278 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
281 "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
288 part->mbd.mtd->name, erase->state);
290 part->blocks[i].state = BLOCK_FAILED;
291 part->blocks[i].free_sectors = 0;
292 part->blocks[i].used_sectors = 0;
301 part->blocks[i].state = BLOCK_ERASED;
302 part->blocks[i].free_sectors = part->data_sectors_per_block;
303 part->blocks[i].used_sectors = 0;
304 part->blocks[i].erases++;
306 rc = part->mbd.mtd->write(part->mbd.mtd,
307 part->blocks[i].offset, sizeof(magic), &retlen,
316 part->mbd.mtd->name,
317 part->blocks[i].offset);
318 part->blocks[i].state = BLOCK_FAILED;
321 part->blocks[i].state = BLOCK_OK;
326 static int erase_block(struct partition *part, int block)
335 erase->mtd = part->mbd.mtd;
337 erase->addr = part->blocks[block].offset;
338 erase->len = part->block_size;
339 erase->priv = (u_long)part;
341 part->blocks[block].state = BLOCK_ERASING;
342 part->blocks[block].free_sectors = 0;
344 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
349 (unsigned long long)erase->len, part->mbd.mtd->name);
357 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
364 part->is_reclaiming = 1;
370 map = kmalloc(part->header_size, GFP_KERNEL);
374 rc = part->mbd.mtd->read(part->mbd.mtd,
375 part->blocks[block_no].offset, part->header_size,
378 if (!rc && retlen != part->header_size)
383 "0x%lx\n", part->mbd.mtd->name,
384 part->blocks[block_no].offset);
389 for (i=0; i<part->data_sectors_per_block; i++) {
401 if (entry >= part->sector_count)
404 addr = part->blocks[block_no].offset +
405 (i + part->header_sectors_per_block) * SECTOR_SIZE;
409 if (!part->blocks[block_no].used_sectors--) {
410 rc = erase_block(part, block_no);
415 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
424 part->mbd.mtd->name);
429 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
441 part->is_reclaiming = 0;
446 static int reclaim_block(struct partition *part, u_long *old_sector)
452 if (part->mbd.mtd->sync)
453 part->mbd.mtd->sync(part->mbd.mtd);
458 old_sector_block = *old_sector / part->block_size;
462 for (block=0; block<part->total_blocks; block++) {
465 if (block == part->reserved_block)
473 if (part->blocks[block].free_sectors)
476 this_score = part->blocks[block].used_sectors;
482 if (part->blocks[block].used_sectors ==
483 part->data_sectors_per_block)
487 this_score += part->blocks[block].erases;
498 part->current_block = -1;
499 part->reserved_block = best_block;
503 part->blocks[best_block].used_sectors,
504 part->blocks[best_block].free_sectors);
506 if (part->blocks[best_block].used_sectors)
507 rc = move_block_contents(part, best_block, old_sector);
509 rc = erase_block(part, best_block);
519 static int find_free_block(struct partition *part)
523 block = part->current_block == -1 ?
524 jiffies % part->total_blocks : part->current_block;
528 if (part->blocks[block].free_sectors &&
529 block != part->reserved_block)
532 if (part->blocks[block].state == BLOCK_UNUSED)
533 erase_block(part, block);
535 if (++block >= part->total_blocks)
543 static int find_writable_block(struct partition *part, u_long *old_sector)
548 block = find_free_block(part);
551 if (!part->is_reclaiming) {
552 rc = reclaim_block(part, old_sector);
556 block = find_free_block(part);
565 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
566 part->header_size, &retlen, (u_char*)part->header_cache);
568 if (!rc && retlen != part->header_size)
573 "0x%lx\n", part->mbd.mtd->name,
574 part->blocks[block].offset);
578 part->current_block = block;
584 static int mark_sector_deleted(struct partition *part, u_long old_addr)
591 block = old_addr / part->block_size;
592 offset = (old_addr % part->block_size) / SECTOR_SIZE -
593 part->header_sectors_per_block;
595 addr = part->blocks[block].offset +
597 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
605 "0x%lx\n", part->mbd.mtd->name, addr);
609 if (block == part->current_block)
610 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
612 part->blocks[block].used_sectors--;
614 if (!part->blocks[block].used_sectors &&
615 !part->blocks[block].free_sectors)
616 rc = erase_block(part, block);
622 static int find_free_sector(const struct partition *part, const struct block *block)
626 i = stop = part->data_sectors_per_block - block->free_sectors;
629 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
633 if (++i == part->data_sectors_per_block)
643 struct partition *part = (struct partition*)dev;
651 if (part->current_block == -1 ||
652 !part->blocks[part->current_block].free_sectors) {
654 rc = find_writable_block(part, old_addr);
659 block = &part->blocks[part->current_block];
661 i = find_free_sector(part, block);
668 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
670 rc = part->mbd.mtd->write(part->mbd.mtd,
678 part->mbd.mtd->name, addr);
683 part->sector_map[sector] = addr;
687 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
690 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
698 part->mbd.mtd->name, addr);
711 struct partition *part = (struct partition*)dev;
718 if (part->reserved_block == -1) {
723 if (sector >= part->sector_count) {
728 old_addr = part->sector_map[sector];
741 part->sector_map[sector] = -1;
744 rc = mark_sector_deleted(part, old_addr);
752 struct partition *part = (struct partition*)dev;
756 geo->cylinders = part->cylinders;
763 struct partition *part;
768 part = kzalloc(sizeof(struct partition), GFP_KERNEL);
769 if (!part)
772 part->mbd.mtd = mtd;
775 part->block_size = block_size;
781 part->block_size = mtd->erasesize;
784 if (scan_header(part) == 0) {
785 part->mbd.size = part->sector_count;
786 part->mbd.tr = tr;
787 part->mbd.devnum = -1;
789 part->mbd.readonly = 1;
790 else if (part->errors) {
793 part->mbd.readonly = 1;
799 if (!add_mtd_blktrans_dev((void*)part))
803 kfree(part);
808 struct partition *part = (struct partition*)dev;
811 for (i=0; i<part->total_blocks; i++) {
813 part->mbd.mtd->name, i, part->blocks[i].erases);
817 vfree(part->sector_map);
818 kfree(part->header_cache);
819 kfree(part->blocks);