• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mtd/

Lines Matching defs:cxt

72 static void mark_page_used(struct mtdoops_context *cxt, int page)
74 set_bit(page, cxt->oops_page_used);
77 static void mark_page_unused(struct mtdoops_context *cxt, int page)
79 clear_bit(page, cxt->oops_page_used);
82 static int page_is_used(struct mtdoops_context *cxt, int page)
84 return test_bit(page, cxt->oops_page_used);
93 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
95 struct mtd_info *mtd = cxt->mtd;
130 mark_page_unused(cxt, page);
135 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
137 cxt->nextpage++;
138 if (cxt->nextpage >= cxt->oops_pages)
139 cxt->nextpage = 0;
140 cxt->nextcount++;
141 if (cxt->nextcount == 0xffffffff)
142 cxt->nextcount = 0;
144 if (page_is_used(cxt, cxt->nextpage)) {
145 schedule_work(&cxt->work_erase);
150 cxt->nextpage, cxt->nextcount);
156 struct mtdoops_context *cxt =
158 struct mtd_info *mtd = cxt->mtd;
165 mod = (cxt->nextpage * record_size) % mtd->erasesize;
167 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
168 if (cxt->nextpage >= cxt->oops_pages)
169 cxt->nextpage = 0;
173 ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
182 cxt->nextpage * record_size);
184 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
185 if (cxt->nextpage >= cxt->oops_pages)
186 cxt->nextpage = 0;
187 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
194 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
198 cxt->nextpage, cxt->nextcount);
203 ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
212 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
214 struct mtd_info *mtd = cxt->mtd;
220 hdr = cxt->oops_buf;
221 hdr[0] = cxt->nextcount;
225 ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
226 record_size, &retlen, cxt->oops_buf);
228 ret = mtd->write(mtd, cxt->nextpage * record_size,
229 record_size, &retlen, cxt->oops_buf);
233 cxt->nextpage * record_size, retlen, record_size, ret);
234 mark_page_used(cxt, cxt->nextpage);
235 memset(cxt->oops_buf, 0xff, record_size);
237 mtdoops_inc_counter(cxt);
242 struct mtdoops_context *cxt =
245 mtdoops_write(cxt, 0);
248 static void find_next_position(struct mtdoops_context *cxt)
250 struct mtd_info *mtd = cxt->mtd;
255 for (page = 0; page < cxt->oops_pages; page++) {
257 mark_page_used(cxt, page);
269 mark_page_unused(cxt, page);
288 cxt->nextpage = 0;
289 cxt->nextcount = 1;
290 schedule_work(&cxt->work_erase);
294 cxt->nextpage = maxpos;
295 cxt->nextcount = maxcount;
297 mtdoops_inc_counter(cxt);
304 struct mtdoops_context *cxt = container_of(dumper,
314 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
326 if (!cxt->mtd->panic_write)
329 mtdoops_write(cxt, 1);
334 schedule_work(&cxt->work_write);
339 struct mtdoops_context *cxt = &oops_cxt;
344 cxt->mtd_index = mtd->index;
346 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
366 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
368 if (!cxt->oops_page_used) {
373 cxt->dump.dump = mtdoops_do_dump;
374 err = kmsg_dump_register(&cxt->dump);
377 vfree(cxt->oops_page_used);
378 cxt->oops_page_used = NULL;
382 cxt->mtd = mtd;
383 cxt->oops_pages = (int)mtd->size / record_size;
384 find_next_position(cxt);
390 struct mtdoops_context *cxt = &oops_cxt;
392 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
395 if (kmsg_dump_unregister(&cxt->dump) < 0)
398 cxt->mtd = NULL;
410 struct mtdoops_context *cxt = &oops_cxt;
428 cxt->mtd_index = -1;
431 cxt->mtd_index = mtd_index;
433 cxt->oops_buf = vmalloc(record_size);
434 if (!cxt->oops_buf) {
438 memset(cxt->oops_buf, 0xff, record_size);
440 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
441 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
449 struct mtdoops_context *cxt = &oops_cxt;
452 vfree(cxt->oops_buf);
453 vfree(cxt->oops_page_used);