1#include <sys/ioctl.h>
2#include <linux/types.h>
3#include <unistd.h>
4#include <sys/stat.h>
5
6#include "libmtd.h"
7#include "common.h"
8
9struct erase_info_user {
10	__u32 start;
11	__u32 length;
12};
13
14/**
15 * MTD operation modes
16 *
17 * @MTD_OPS_PLACE_OOB:	OOB data are placed at the given offset (default)
18 * @MTD_OPS_AUTO_OOB:	OOB data are automatically placed at the free areas
19 *			which are defined by the internal ecclayout
20 * @MTD_OPS_RAW:	data are transferred as-is, with no error correction;
21 *			this mode implies %MTD_OPS_PLACE_OOB
22 *
23 * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
24 * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
25 * %MTD_FILE_MODE_RAW.
26 */
27enum {
28	MTD_OPS_PLACE_OOB = 0,
29	MTD_OPS_AUTO_OOB = 1,
30	MTD_OPS_RAW = 2,
31};
32
33#define MEMGETINFO 	_IOR('M', 1, struct mtd_dev_info)
34#define MEMERASE        _IOW('M', 2, struct erase_info_user)
35/* Check if an eraseblock is bad */
36#define MEMGETBADBLOCK      _IOW('M', 11, __kernel_loff_t)
37
38/* use this 'libmtd_log' to print message */
39void libmtd_log(const char *fmt, ...)
40{
41        va_list ap;
42        static FILE *filp;
43
44        if ((filp == NULL) && (filp = fopen("/dev/console", "a")) == NULL)
45                return;
46
47        va_start(ap, fmt);
48        vfprintf(filp, fmt, ap);
49        fputs("\n", filp);
50        va_end(ap);
51}
52
53static void erase_buffer(void *buffer, size_t size)
54{
55	const uint8_t kEraseByte = 0xff;
56
57	if (buffer != NULL && size > 0)
58		memset(buffer, kEraseByte, size);
59}
60
61/* the mtd is the full path name */
62int dni_mtd_write(const char *mtd_device, char *img, long long imgoffset, char *buf, int len, long long mtdoffset)
63{
64	int blockalign = 1; /* default to using actual block size */
65	bool writeoob = false;
66	bool pad = true;
67	bool markbad = false;
68	int fd, rfd, ret = -1;
69	size_t w, e, offset;
70	struct mtd_dev_info mtd;
71	long long blockstart = -1;
72	long long offs;
73	long long imglen = 0;
74	long long remain_len = 0;
75	int ebsize_aligned;
76	bool baderaseblock = false;
77	libmtd_t mtd_desc;
78	uint8_t write_mode;
79	int pagelen;
80	size_t filebuf_max = 0, cnt = 0;
81	unsigned char *filebuf = NULL;
82
83	/* Open the device */
84	if ((fd = open(mtd_device, O_RDWR | O_SYNC)) == -1) {
85		return -1;
86	}
87
88	mtd_desc = libmtd_open();
89	if (!mtd_desc) {
90		fprintf(stderr, "can't initialize libmtd");
91	}
92
93	/* Fill in MTD device capability structure */
94	if (mtd_get_dev_info(mtd_desc, mtd_device, &mtd) < 0)
95		errmsg_die("mtd_get_dev_info failed");
96
97	/*
98	 * Pretend erasesize is specified number of blocks - to match jffs2
99	 *   (virtual) block size
100	 * Use this value throughout unless otherwise necessary
101	 */
102	ebsize_aligned = mtd.eb_size * blockalign;
103	write_mode = MTD_OPS_PLACE_OOB;
104	offset = w = e = 0;
105	pagelen = mtd.min_io_size + ((writeoob) ? mtd.oob_size : 0);
106
107	/* Verify write data from buffer or img file */
108	if (img) {
109		if ((rfd = open(img, O_RDONLY)) < 0) {
110			dni_libmtd_log("unable to open file:%s\n", img);
111			goto fin;
112		}
113		struct stat st;
114		if (fstat(rfd, &st)) {
115			dni_libmtd_log("unable to stat img:%s\n", img);
116			goto fin;
117		}
118		imglen = st.st_size - imgoffset;
119		lseek(rfd, imgoffset, SEEK_CUR);
120	}
121	else {
122		imglen = len;
123	}
124
125	/* Check, if file is page-aligned */
126	if (!pad && (imglen % pagelen) != 0) {
127		dni_libmtd_log("Input file is not page-aligned. Use the padding " "option.\n");
128		goto fin;
129	}
130
131	/* Check, if length fits into device */
132	if ((imglen / pagelen) * mtd.min_io_size > mtd.size - mtdoffset) {
133		dni_libmtd_log("Image %lld bytes, NAND page %d bytes, OOB area %d"
134				" bytes, device size %lld bytes\n",
135				imglen, pagelen, mtd.oob_size, mtd.size);
136		dni_libmtd_log("Input file does not fit into device");
137		goto fin;
138	}
139
140	/*
141	 * Allocate a buffer big enough to contain all the data (OOB included)
142	 * for one eraseblock. The order of operations here matters; if ebsize
143	 * and pagelen are large enough, then "ebsize_aligned * pagelen" could
144	 * overflow a 32-bit data type.
145	 */
146	filebuf_max = ebsize_aligned / mtd.min_io_size * pagelen;
147	filebuf = xmalloc(filebuf_max);
148	erase_buffer(filebuf, filebuf_max);
149
150	if (imglen > filebuf_max) {
151		remain_len = imglen - filebuf_max;
152		offset = 0;
153	}
154	else {
155		offset = 0;
156		remain_len = 0;
157	}
158
159	/*
160	 * Get data from input and write to the device while there is
161	 * still input to read and we are still within the device
162	 * bounds. Note that in the case of standard input, the input
163	 * length is simply a quasi-boolean flag whose values are page
164	 * length or zero.
165	 */
166	while (mtdoffset < mtd.size && imglen < mtd.size) {
167	/*
168	 * New eraseblock, check for bad block(s)
169	 * Stay in the loop to be sure that, if mtdoffset changes because
170	 * of a bad block, the next block that will be written to
171	 * is also checked. Thus, we avoid errors if the block(s) after the
172	 * skipped block(s) is also bad (number of blocks depending on
173	 * the blockalign).
174	 */
175		if (img) {
176			erase_buffer(filebuf, filebuf_max);
177			if ((cnt = read(rfd, filebuf, filebuf_max)) < 0) {
178				dni_libmtd_log("File I/O error :%s\n", img);
179				goto fin;
180			}
181			else if (cnt == 0) {
182				dni_libmtd_log("No more data read\n");
183				break;
184			}
185			if (cnt < filebuf_max) {
186				if (!pad) {
187					goto fin;
188				}
189				erase_buffer(filebuf + cnt, filebuf_max - cnt);
190			}
191		}
192
193		while (blockstart != (mtdoffset & (~ebsize_aligned + 1))) {
194			blockstart = mtdoffset & (~ebsize_aligned + 1);
195			offs = blockstart;
196
197			baderaseblock = false;
198			/* Check all the blocks in an erase block for bad blocks */
199			do {
200				ret = mtd_is_bad(&mtd, fd, offs / ebsize_aligned);
201				if (ret < 0) {
202					goto fin;
203				} else if (ret == 1) {
204					baderaseblock = true;
205				}
206
207				if (baderaseblock) {
208					mtdoffset = blockstart + ebsize_aligned;
209
210					if (mtdoffset > mtd.size) {
211						errmsg("too many bad blocks, cannot complete request");
212						goto fin;
213					}
214				} else {
215					if (mtd_erase(mtd_desc, &mtd, fd, offs / ebsize_aligned)) {
216						int errno_tmp = errno;
217						if (errno_tmp != EIO)
218							goto fin;
219					}
220				}
221
222				offs +=  ebsize_aligned / blockalign;
223			} while (offs < blockstart + ebsize_aligned);
224		}
225
226		/* Write out data */
227		ret = mtd_write(mtd_desc, &mtd, fd, mtdoffset / mtd.eb_size,
228				mtdoffset % mtd.eb_size,
229				img ? filebuf : &buf[offset],
230				filebuf_max,
231				NULL,
232				0,
233				write_mode);
234		if (ret) {
235			long long i;
236			if (errno != EIO) {
237				goto fin;
238			}
239
240			dni_libmtd_log("Erasing failed write from %#08llx to %#08llx\n",
241				blockstart, blockstart + ebsize_aligned - 1);
242			for  (i = blockstart; i < blockstart + ebsize_aligned; i += mtd.eb_size) {
243				if (mtd_erase(mtd_desc, &mtd, fd, i / mtd.eb_size)) {
244					int errno_tmp = errno;
245					dni_libmtd_log("%s: MTD Erase failure", mtd_device);
246					if (errno_tmp != EIO)
247						goto fin;
248					if (markbad) {
249						dni_libmtd_log("Marking block at %08llx bad\n",
250							i & (~mtd.eb_size + 1));
251						if (mtd_mark_bad(&mtd, fd, i / mtd.eb_size)) {
252							dni_libmtd_log("%s: MTD Mark bad block failure", mtd_device);
253							goto fin;
254						}
255					}
256				}
257			}
258
259			 if (markbad) {
260				fprintf(stderr, "Marking block at %08llx bad\n",
261						mtdoffset & (~mtd.eb_size + 1));
262				if (mtd_mark_bad(&mtd, fd, mtdoffset / mtd.eb_size)) {
263					dni_libmtd_log("%s: MTD Mark bad block failure", mtd_device);
264					goto fin;
265				}
266			}
267			mtdoffset = blockstart + ebsize_aligned;
268			continue;
269	 	}
270		mtdoffset += mtd.eb_size;
271		/* Set offset to the next 128k data postition */
272		if (remain_len > 0) {
273			offset += filebuf_max;
274			remain_len -= filebuf_max;
275		}
276		else {
277			break;
278		}
279	}
280	ret = 0;
281fin:
282	close(fd);
283	if (rfd)
284		close(rfd);
285	free(filebuf);
286	return ret;
287}
288
289int dni_mtd_read(const char *mtd_device, char *buf, int len)
290{
291	long long mtdoffset = 0;
292	int blockalign = 1; /* default to using actual block size */
293	bool writeoob = false;
294	bool pad = false;
295	int fd, rlen = 0, ret = -1;
296	size_t w, e, offset;
297	struct mtd_dev_info mtd;
298	long long blockstart = -1;
299	long long offs;
300	long long imglen = 0;
301	long long remain_len = 0;
302	int ebsize_aligned;
303	bool baderaseblock = false;
304	libmtd_t mtd_desc;
305	int pagelen;
306	size_t filebuf_max = 0;
307
308	if ((fd = open(mtd_device, O_RDONLY |O_SYNC)) < 0)
309		return -1;
310
311	mtd_desc = libmtd_open();
312	if (!mtd_desc) {
313		fprintf(stderr, "can't initialize libmtd");
314	}
315
316	/* Fill in MTD device capability structure */
317	if (mtd_get_dev_info(mtd_desc, mtd_device, &mtd) < 0)
318		errmsg_die("mtd_get_dev_info failed");
319
320	/*
321	 * Pretend erasesize is specified number of blocks - to match jffs2
322	 *   (virtual) block size
323	 * Use this value throughout unless otherwise necessary
324	 */
325	ebsize_aligned = mtd.eb_size * blockalign;
326	offset = w = e = 0;
327	imglen = len;
328	pagelen = mtd.min_io_size + ((writeoob) ? mtd.oob_size : 0);
329
330	/* Check, if file is page-aligned */
331	if (!pad && (imglen % pagelen) != 0) {
332		dni_libmtd_log("Input file is not page-aligned. Use the padding " "option.\n");
333		goto fin;
334	}
335
336	/* Check, if length fits into device */
337	if ((imglen / pagelen) * mtd.min_io_size > mtd.size - mtdoffset) {
338		dni_libmtd_log("Image %lld bytes, NAND page %d bytes, OOB area %d"
339				" bytes, device size %lld bytes\n",
340				imglen, pagelen, mtd.oob_size, mtd.size);
341		dni_libmtd_log("Input file does not fit into device");
342		goto fin;
343	}
344
345	/*
346	 * Allocate a buffer big enough to contain all the data (OOB included)
347	 * for one eraseblock. The order of operations here matters; if ebsize
348	 * and pagelen are large enough, then "ebsize_aligned * pagelen" could
349	 * overflow a 32-bit data type.
350	 */
351	filebuf_max = ebsize_aligned / mtd.min_io_size * pagelen;
352	if (len > filebuf_max) {
353		remain_len = len - filebuf_max;
354		offset = 0;
355	}
356	else {
357		offset = 0;
358		remain_len = 0;
359	}
360
361
362	/*
363	 * Get data from input and write to the device while there is
364	 * still input to read and we are still within the device
365	 * bounds. Note that in the case of standard input, the input
366	 * length is simply a quasi-boolean flag whose values are page
367	 * length or zero.
368	 */
369	while (mtdoffset < mtd.size) {
370	/*
371	 * New eraseblock, check for bad block(s)
372	 * Stay in the loop to be sure that, if mtdoffset changes because
373	 * of a bad block, the next block that will be written to
374	 * is also checked. Thus, we avoid errors if the block(s) after the
375	 * skipped block(s) is also bad (number of blocks depending on
376	 * the blockalign).
377	 */
378		while (blockstart != (mtdoffset & (~ebsize_aligned + 1))) {
379			blockstart = mtdoffset & (~ebsize_aligned + 1);
380			offs = blockstart;
381
382			baderaseblock = false;
383			/* Check all the blocks in an erase block for bad blocks */
384			do {
385				ret = mtd_is_bad(&mtd, fd, offs / ebsize_aligned);
386				if (ret < 0) {
387					goto fin;
388				} else if (ret == 1) {
389					baderaseblock = true;
390				}
391
392				if (baderaseblock) {
393					mtdoffset = blockstart + ebsize_aligned;
394
395					if (mtdoffset > mtd.size) {
396						errmsg("too many bad blocks, cannot complete request");
397						goto fin;
398					}
399				}
400				offs +=  ebsize_aligned / blockalign;
401			} while (offs < blockstart + ebsize_aligned);
402		}
403
404		/* Read page data and exit on failure */
405		if (mtd_read(&mtd, fd, mtdoffset / mtd.eb_size, mtdoffset % mtd.eb_size, &buf[offset], filebuf_max)) {
406			errmsg("mtd_read");
407			goto fin;
408		}
409		rlen += filebuf_max;
410
411		mtdoffset += mtd.eb_size;
412		/* Set offset to the next 128k data postition */
413		if (remain_len > 0) {
414			offset += filebuf_max;
415			remain_len -= filebuf_max;
416		}
417		else {
418			break;
419		}
420	}
421fin:
422	close(fd);
423	return rlen;
424}
425
426