1/*-
2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * Copyright (c) 2012-2013, SRI International
4 * All rights reserved.
5 *
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
9 * programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the author nor the names of any co-contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include "opt_cfi.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/conf.h>
45#include <sys/endian.h>
46#include <sys/kenv.h>
47#include <sys/kernel.h>
48#include <sys/malloc.h>
49#include <sys/module.h>
50#include <sys/rman.h>
51#include <sys/sysctl.h>
52
53#include <machine/bus.h>
54
55#include <dev/cfi/cfi_reg.h>
56#include <dev/cfi/cfi_var.h>
57
58static void cfi_add_sysctls(struct cfi_softc *);
59
60extern struct cdevsw cfi_cdevsw;
61
62char cfi_driver_name[] = "cfi";
63devclass_t cfi_devclass;
64devclass_t cfi_diskclass;
65
66uint32_t
67cfi_read_raw(struct cfi_softc *sc, u_int ofs)
68{
69	uint32_t val;
70
71	ofs &= ~(sc->sc_width - 1);
72	switch (sc->sc_width) {
73	case 1:
74		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
75		break;
76	case 2:
77		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
78		break;
79	case 4:
80		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
81		break;
82	default:
83		val = ~0;
84		break;
85	}
86	return (val);
87}
88
89uint32_t
90cfi_read(struct cfi_softc *sc, u_int ofs)
91{
92	uint32_t val;
93	uint16_t sval;
94
95	ofs &= ~(sc->sc_width - 1);
96	switch (sc->sc_width) {
97	case 1:
98		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
99		break;
100	case 2:
101		sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
102		val = le16toh(sval);
103		break;
104	case 4:
105		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
106		val = le32toh(val);
107		break;
108	default:
109		val = ~0;
110		break;
111	}
112	return (val);
113}
114
115static void
116cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
117{
118
119	ofs &= ~(sc->sc_width - 1);
120	switch (sc->sc_width) {
121	case 1:
122		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
123		break;
124	case 2:
125		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
126		break;
127	case 4:
128		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
129		break;
130	}
131}
132
133uint8_t
134cfi_read_qry(struct cfi_softc *sc, u_int ofs)
135{
136	uint8_t val;
137
138	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
139	val = cfi_read(sc, ofs * sc->sc_width);
140	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
141	return (val);
142}
143
144static void
145cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
146{
147
148	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
149	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
150	cfi_write(sc, ofs + addr, data);
151}
152
153static char *
154cfi_fmtsize(uint32_t sz)
155{
156	static char buf[8];
157	static const char *sfx[] = { "", "K", "M", "G" };
158	int sfxidx;
159
160	sfxidx = 0;
161	while (sfxidx < 3 && sz > 1023) {
162		sz /= 1024;
163		sfxidx++;
164	}
165
166	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
167	return (buf);
168}
169
170int
171cfi_probe(device_t dev)
172{
173	char desc[80];
174	struct cfi_softc *sc;
175	char *vend_str;
176	int error;
177	uint16_t iface, vend;
178
179	sc = device_get_softc(dev);
180	sc->sc_dev = dev;
181
182	sc->sc_rid = 0;
183	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
184	    RF_ACTIVE);
185	if (sc->sc_res == NULL)
186		return (ENXIO);
187
188	sc->sc_tag = rman_get_bustag(sc->sc_res);
189	sc->sc_handle = rman_get_bushandle(sc->sc_res);
190
191	if (sc->sc_width == 0) {
192		sc->sc_width = 1;
193		while (sc->sc_width <= 4) {
194			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
195				break;
196			sc->sc_width <<= 1;
197		}
198	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
199		error = ENXIO;
200		goto out;
201	}
202	if (sc->sc_width > 4) {
203		error = ENXIO;
204		goto out;
205	}
206
207	/* We got a Q. Check if we also have the R and the Y. */
208	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
209	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
210		error = ENXIO;
211		goto out;
212	}
213
214	/* Get the vendor and command set. */
215	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
216	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
217
218	sc->sc_cmdset = vend;
219
220	switch (vend) {
221	case CFI_VEND_AMD_ECS:
222	case CFI_VEND_AMD_SCS:
223		vend_str = "AMD/Fujitsu";
224		break;
225	case CFI_VEND_INTEL_ECS:
226		vend_str = "Intel/Sharp";
227		break;
228	case CFI_VEND_INTEL_SCS:
229		vend_str = "Intel";
230		break;
231	case CFI_VEND_MITSUBISHI_ECS:
232	case CFI_VEND_MITSUBISHI_SCS:
233		vend_str = "Mitsubishi";
234		break;
235	default:
236		vend_str = "Unknown vendor";
237		break;
238	}
239
240	/* Get the device size. */
241	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
242
243	/* Sanity-check the I/F */
244	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
245	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
246
247	/*
248	 * Adding 1 to iface will give us a bit-wise "switch"
249	 * that allows us to test for the interface width by
250	 * testing a single bit.
251	 */
252	iface++;
253
254	error = (iface & sc->sc_width) ? 0 : EINVAL;
255	if (error)
256		goto out;
257
258	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
259	    cfi_fmtsize(sc->sc_size));
260	device_set_desc_copy(dev, desc);
261
262 out:
263	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
264	return (error);
265}
266
267int
268cfi_attach(device_t dev)
269{
270	struct cfi_softc *sc;
271	u_int blksz, blocks;
272	u_int r, u;
273	uint64_t mtoexp, ttoexp;
274#ifdef CFI_SUPPORT_STRATAFLASH
275	uint64_t ppr;
276	char name[KENV_MNAMELEN], value[32];
277#endif
278
279	sc = device_get_softc(dev);
280	sc->sc_dev = dev;
281
282	sc->sc_rid = 0;
283	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
284#ifndef ATSE_CFI_HACK
285	    RF_ACTIVE);
286#else
287	    RF_ACTIVE | RF_SHAREABLE);
288#endif
289	if (sc->sc_res == NULL)
290		return (ENXIO);
291
292	sc->sc_tag = rman_get_bustag(sc->sc_res);
293	sc->sc_handle = rman_get_bushandle(sc->sc_res);
294
295	/* Get time-out values for erase, write, and buffer write. */
296	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
297	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
298	if (ttoexp == 0) {
299		device_printf(dev, "erase timeout == 0, using 2^16ms\n");
300		ttoexp = 16;
301	}
302	if (ttoexp > 41) {
303		device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
304		return (EINVAL);
305	}
306	if (mtoexp == 0) {
307		device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
308		    ttoexp + 4);
309		mtoexp = 4;
310	}
311	if (ttoexp + mtoexp > 41) {
312		device_printf(dev, "insane max erase timeout: 2^%jd\n",
313		    ttoexp + mtoexp);
314		return (EINVAL);
315	}
316	sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
317	sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
318	    sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
319
320	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
321	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
322	if (ttoexp == 0) {
323		device_printf(dev, "write timeout == 0, using 2^18ns\n");
324		ttoexp = 18;
325	}
326	if (ttoexp > 51) {
327		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
328		return (EINVAL);
329	}
330	if (mtoexp == 0) {
331		device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
332		    ttoexp + 4);
333		mtoexp = 4;
334	}
335	if (ttoexp + mtoexp > 51) {
336		device_printf(dev, "insane max write timeout: 2^%jdus\n",
337		    ttoexp + mtoexp);
338		return (EINVAL);
339	}
340	sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
341	sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
342	    sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
343
344	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
345	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
346	/* Don't check for 0, it means not-supported. */
347	if (ttoexp > 51) {
348		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
349		return (EINVAL);
350	}
351	if (ttoexp + mtoexp > 51) {
352		device_printf(dev, "insane max write timeout: 2^%jdus\n",
353		    ttoexp + mtoexp);
354		return (EINVAL);
355	}
356	sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
357	    SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
358	sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
359	    sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
360	    (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
361
362	/* Get the maximum size of a multibyte program */
363	if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
364		sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
365		    cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
366	else
367		sc->sc_maxbuf = 0;
368
369	/* Get erase regions. */
370	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
371	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
372	    M_TEMP, M_WAITOK | M_ZERO);
373	for (r = 0; r < sc->sc_regions; r++) {
374		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
375		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
376		sc->sc_region[r].r_blocks = blocks + 1;
377
378		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
379		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
380		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
381		    blksz * 256;
382	}
383
384	/* Reset the device to a default state. */
385	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
386
387	if (bootverbose) {
388		device_printf(dev, "[");
389		for (r = 0; r < sc->sc_regions; r++) {
390			printf("%ux%s%s", sc->sc_region[r].r_blocks,
391			    cfi_fmtsize(sc->sc_region[r].r_blksz),
392			    (r == sc->sc_regions - 1) ? "]\n" : ",");
393		}
394	}
395
396	u = device_get_unit(dev);
397	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
398	    "%s%u", cfi_driver_name, u);
399	sc->sc_nod->si_drv1 = sc;
400
401	cfi_add_sysctls(sc);
402
403#ifdef CFI_SUPPORT_STRATAFLASH
404	/*
405	 * Store the Intel factory PPR in the environment.  In some
406	 * cases it is the most unique ID on a board.
407	 */
408	if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
409		if (snprintf(name, sizeof(name), "%s.factory_ppr",
410		    device_get_nameunit(dev)) < (sizeof(name) - 1) &&
411		    snprintf(value, sizeof(value), "0x%016jx", ppr) <
412		    (sizeof(value) - 1))
413			(void) setenv(name, value);
414	}
415#endif
416
417	device_add_child(dev, "cfid", -1);
418	bus_generic_attach(dev);
419
420	return (0);
421}
422
423static void
424cfi_add_sysctls(struct cfi_softc *sc)
425{
426	struct sysctl_ctx_list *ctx;
427	struct sysctl_oid_list *children;
428
429	ctx = device_get_sysctl_ctx(sc->sc_dev);
430	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
431
432	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
433	    "typical_erase_timout_count",
434	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
435	    0, "Number of times the typical erase timeout was exceeded");
436	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
437	    "max_erase_timout_count",
438	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
439	    "Number of times the maximum erase timeout was exceeded");
440	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
441	    "typical_write_timout_count",
442	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
443	    "Number of times the typical write timeout was exceeded");
444	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
445	    "max_write_timout_count",
446	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
447	    "Number of times the maximum write timeout was exceeded");
448	if (sc->sc_maxbuf > 0) {
449		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
450		    "typical_bufwrite_timout_count",
451		    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
452		    "Number of times the typical buffered write timeout was "
453		    "exceeded");
454		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
455		    "max_bufwrite_timout_count",
456		    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
457		    "Number of times the maximum buffered write timeout was "
458		    "exceeded");
459	}
460}
461
462int
463cfi_detach(device_t dev)
464{
465	struct cfi_softc *sc;
466
467	sc = device_get_softc(dev);
468
469	destroy_dev(sc->sc_nod);
470	free(sc->sc_region, M_TEMP);
471	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
472	return (0);
473}
474
475static int
476cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
477    enum cfi_wait_cmd cmd)
478{
479	int done, error, tto_exceeded;
480	uint32_t st0 = 0, st = 0;
481	sbintime_t now;
482
483	done = 0;
484	error = 0;
485	tto_exceeded = 0;
486	while (!done && !error) {
487		/*
488		 * Save time before we start so we always do one check
489		 * after the timeout has expired.
490		 */
491		now = sbinuptime();
492
493		switch (sc->sc_cmdset) {
494		case CFI_VEND_INTEL_ECS:
495		case CFI_VEND_INTEL_SCS:
496			st = cfi_read(sc, ofs);
497			done = (st & CFI_INTEL_STATUS_WSMS);
498			if (done) {
499				/* NB: bit 0 is reserved */
500				st &= ~(CFI_INTEL_XSTATUS_RSVD |
501					CFI_INTEL_STATUS_WSMS |
502					CFI_INTEL_STATUS_RSVD);
503				if (st & CFI_INTEL_STATUS_DPS)
504					error = EPERM;
505				else if (st & CFI_INTEL_STATUS_PSLBS)
506					error = EIO;
507				else if (st & CFI_INTEL_STATUS_ECLBS)
508					error = ENXIO;
509				else if (st)
510					error = EACCES;
511			}
512			break;
513		case CFI_VEND_AMD_SCS:
514		case CFI_VEND_AMD_ECS:
515			st0 = cfi_read(sc, ofs);
516			st = cfi_read(sc, ofs);
517			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
518			break;
519		}
520
521		if (tto_exceeded ||
522		    now > start + sc->sc_typical_timeouts[cmd]) {
523			if (!tto_exceeded) {
524				tto_exceeded = 1;
525				sc->sc_tto_counts[cmd]++;
526#ifdef CFI_DEBUG_TIMEOUT
527				device_printf(sc->sc_dev,
528				    "typical timeout exceeded (cmd %d)", cmd);
529#endif
530			}
531			if (now > start + sc->sc_max_timeouts[cmd]) {
532				sc->sc_mto_counts[cmd]++;
533#ifdef CFI_DEBUG_TIMEOUT
534				device_printf(sc->sc_dev,
535				    "max timeout exceeded (cmd %d)", cmd);
536#endif
537			}
538		}
539	}
540	if (!done && !error)
541		error = ETIMEDOUT;
542	if (error)
543		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
544	return (error);
545}
546
547int
548cfi_write_block(struct cfi_softc *sc)
549{
550	union {
551		uint8_t		*x8;
552		uint16_t	*x16;
553		uint32_t	*x32;
554	} ptr, cpyprt;
555	register_t intr;
556	int error, i, neederase = 0;
557	uint32_t st;
558	u_int wlen;
559	sbintime_t start;
560
561	/* Intel flash must be unlocked before modification */
562	switch (sc->sc_cmdset) {
563	case CFI_VEND_INTEL_ECS:
564	case CFI_VEND_INTEL_SCS:
565		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
566		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
567		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
568		break;
569	}
570
571	/* Check if an erase is required. */
572	for (i = 0; i < sc->sc_wrbufsz; i++)
573		if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
574			neederase = 1;
575			break;
576		}
577
578	if (neederase) {
579		intr = intr_disable();
580		start = sbinuptime();
581		/* Erase the block. */
582		switch (sc->sc_cmdset) {
583		case CFI_VEND_INTEL_ECS:
584		case CFI_VEND_INTEL_SCS:
585			cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
586			cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
587			break;
588		case CFI_VEND_AMD_SCS:
589		case CFI_VEND_AMD_ECS:
590			cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
591			    CFI_AMD_ERASE_SECTOR);
592			cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
593			break;
594		default:
595			/* Better safe than sorry... */
596			intr_restore(intr);
597			return (ENODEV);
598		}
599		intr_restore(intr);
600		error = cfi_wait_ready(sc, sc->sc_wrofs, start,
601		    CFI_TIMEOUT_ERASE);
602		if (error)
603			goto out;
604	} else
605		error = 0;
606
607	/* Write the block using a multibyte write if supported. */
608	ptr.x8 = sc->sc_wrbuf;
609	cpyprt.x8 = sc->sc_wrbufcpy;
610	if (sc->sc_maxbuf > sc->sc_width) {
611		switch (sc->sc_cmdset) {
612		case CFI_VEND_INTEL_ECS:
613		case CFI_VEND_INTEL_SCS:
614			for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
615				wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
616
617				intr = intr_disable();
618
619				start = sbinuptime();
620				do {
621					cfi_write(sc, sc->sc_wrofs + i,
622					    CFI_BCS_BUF_PROG_SETUP);
623					if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
624						error = ETIMEDOUT;
625						goto out;
626					}
627					st = cfi_read(sc, sc->sc_wrofs + i);
628				} while (! (st & CFI_INTEL_STATUS_WSMS));
629
630				cfi_write(sc, sc->sc_wrofs + i,
631				    (wlen / sc->sc_width) - 1);
632				switch (sc->sc_width) {
633				case 1:
634					bus_space_write_region_1(sc->sc_tag,
635					    sc->sc_handle, sc->sc_wrofs + i,
636					    ptr.x8 + i, wlen);
637					break;
638				case 2:
639					bus_space_write_region_2(sc->sc_tag,
640					    sc->sc_handle, sc->sc_wrofs + i,
641					    ptr.x16 + i / 2, wlen / 2);
642					break;
643				case 4:
644					bus_space_write_region_4(sc->sc_tag,
645					    sc->sc_handle, sc->sc_wrofs + i,
646					    ptr.x32 + i / 4, wlen / 4);
647					break;
648				}
649
650				cfi_write(sc, sc->sc_wrofs + i,
651				    CFI_BCS_CONFIRM);
652
653				intr_restore(intr);
654
655				error = cfi_wait_ready(sc, sc->sc_wrofs + i,
656				    start, CFI_TIMEOUT_BUFWRITE);
657				if (error != 0)
658					goto out;
659			}
660			goto out;
661		default:
662			/* Fall through to single word case */
663			break;
664		}
665
666	}
667
668	/* Write the block one byte/word at a time. */
669	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
670
671		/* Avoid writing unless we are actually changing bits */
672		if (!neederase) {
673			switch (sc->sc_width) {
674			case 1:
675				if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
676					continue;
677				break;
678			case 2:
679				if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
680					continue;
681				break;
682			case 4:
683				if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
684					continue;
685				break;
686			}
687		}
688
689		/*
690		 * Make sure the command to start a write and the
691		 * actual write happens back-to-back without any
692		 * excessive delays.
693		 */
694		intr = intr_disable();
695
696		start = sbinuptime();
697		switch (sc->sc_cmdset) {
698		case CFI_VEND_INTEL_ECS:
699		case CFI_VEND_INTEL_SCS:
700			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
701			break;
702		case CFI_VEND_AMD_SCS:
703		case CFI_VEND_AMD_ECS:
704			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
705			break;
706		}
707		switch (sc->sc_width) {
708		case 1:
709			bus_space_write_1(sc->sc_tag, sc->sc_handle,
710			    sc->sc_wrofs + i, *(ptr.x8 + i));
711			break;
712		case 2:
713			bus_space_write_2(sc->sc_tag, sc->sc_handle,
714			    sc->sc_wrofs + i, *(ptr.x16 + i / 2));
715			break;
716		case 4:
717			bus_space_write_4(sc->sc_tag, sc->sc_handle,
718			    sc->sc_wrofs + i, *(ptr.x32 + i / 4));
719			break;
720		}
721
722		intr_restore(intr);
723
724		error = cfi_wait_ready(sc, sc->sc_wrofs, start,
725		   CFI_TIMEOUT_WRITE);
726		if (error)
727			goto out;
728	}
729
730	/* error is 0. */
731
732 out:
733	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
734
735	/* Relock Intel flash */
736	switch (sc->sc_cmdset) {
737	case CFI_VEND_INTEL_ECS:
738	case CFI_VEND_INTEL_SCS:
739		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
740		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
741		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
742		break;
743	}
744	return (error);
745}
746
747#ifdef CFI_SUPPORT_STRATAFLASH
748/*
749 * Intel StrataFlash Protection Register Support.
750 *
751 * The memory includes a 128-bit Protection Register that can be
752 * used for security.  There are two 64-bit segments; one is programmed
753 * at the factory with a unique 64-bit number which is immutable.
754 * The other segment is left blank for User (OEM) programming.
755 * The User/OEM segment is One Time Programmable (OTP).  It can also
756 * be locked to prevent any further writes by setting bit 0 of the
757 * Protection Lock Register (PLR).  The PLR can written only once.
758 */
759
760static uint16_t
761cfi_get16(struct cfi_softc *sc, int off)
762{
763	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
764	return v;
765}
766
767#ifdef CFI_ARMEDANDDANGEROUS
768static void
769cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
770{
771	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
772}
773#endif
774
775/*
776 * Read the factory-defined 64-bit segment of the PR.
777 */
778int
779cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
780{
781	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
782		return EOPNOTSUPP;
783	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
784
785	cfi_write(sc, 0, CFI_INTEL_READ_ID);
786	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
787	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
788	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
789	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
790	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
791	return 0;
792}
793
794/*
795 * Read the User/OEM 64-bit segment of the PR.
796 */
797int
798cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
799{
800	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
801		return EOPNOTSUPP;
802	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
803
804	cfi_write(sc, 0, CFI_INTEL_READ_ID);
805	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
806	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
807	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
808	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
809	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
810	return 0;
811}
812
813/*
814 * Write the User/OEM 64-bit segment of the PR.
815 * XXX should allow writing individual words/bytes
816 */
817int
818cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
819{
820#ifdef CFI_ARMEDANDDANGEROUS
821	register_t intr;
822	int i, error;
823	sbintime_t start;
824#endif
825
826	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
827		return EOPNOTSUPP;
828	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
829
830#ifdef CFI_ARMEDANDDANGEROUS
831	for (i = 7; i >= 4; i--, id >>= 16) {
832		intr = intr_disable();
833		start = sbinuptime();
834		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
835		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
836		intr_restore(intr);
837		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
838		    CFI_TIMEOUT_WRITE);
839		if (error)
840			break;
841	}
842	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
843	return error;
844#else
845	device_printf(sc->sc_dev, "%s: OEM PR not set, "
846	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
847	return ENXIO;
848#endif
849}
850
851/*
852 * Read the contents of the Protection Lock Register.
853 */
854int
855cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
856{
857	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
858		return EOPNOTSUPP;
859	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
860
861	cfi_write(sc, 0, CFI_INTEL_READ_ID);
862	*plr = cfi_get16(sc, CFI_INTEL_PLR);
863	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
864	return 0;
865}
866
867/*
868 * Write the Protection Lock Register to lock down the
869 * user-settable segment of the Protection Register.
870 * NOTE: this operation is not reversible.
871 */
872int
873cfi_intel_set_plr(struct cfi_softc *sc)
874{
875#ifdef CFI_ARMEDANDDANGEROUS
876	register_t intr;
877	int error;
878	sbintime_t start;
879#endif
880	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
881		return EOPNOTSUPP;
882	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
883
884#ifdef CFI_ARMEDANDDANGEROUS
885	/* worthy of console msg */
886	device_printf(sc->sc_dev, "set PLR\n");
887	intr = intr_disable();
888	binuptime(&start);
889	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
890	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
891	intr_restore(intr);
892	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
893	    CFI_TIMEOUT_WRITE);
894	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
895	return error;
896#else
897	device_printf(sc->sc_dev, "%s: PLR not set, "
898	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
899	return ENXIO;
900#endif
901}
902#endif /* CFI_SUPPORT_STRATAFLASH */
903