Deleted Added
full compact
cfi_core.c (193936) cfi_core.c (233553)
1/*-
2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cfi/cfi_core.c 193936 2009-06-10 17:41:24Z imp $");
31__FBSDID("$FreeBSD: head/sys/dev/cfi/cfi_core.c 233553 2012-03-27 15:13:12Z jchandra $");
32
33#include "opt_cfi.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
32
33#include "opt_cfi.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/sysctl.h>
44
45#include <machine/bus.h>
46
47#include <dev/cfi/cfi_reg.h>
48#include <dev/cfi/cfi_var.h>
49
50extern struct cdevsw cfi_cdevsw;
51
52char cfi_driver_name[] = "cfi";
53devclass_t cfi_devclass;
54devclass_t cfi_diskclass;
55
56uint32_t
40#include <sys/kernel.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/sysctl.h>
45
46#include <machine/bus.h>
47
48#include <dev/cfi/cfi_reg.h>
49#include <dev/cfi/cfi_var.h>
50
51extern struct cdevsw cfi_cdevsw;
52
53char cfi_driver_name[] = "cfi";
54devclass_t cfi_devclass;
55devclass_t cfi_diskclass;
56
57uint32_t
57cfi_read(struct cfi_softc *sc, u_int ofs)
58cfi_read_raw(struct cfi_softc *sc, u_int ofs)
58{
59 uint32_t val;
60
61 ofs &= ~(sc->sc_width - 1);
62 switch (sc->sc_width) {
63 case 1:
64 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
65 break;
66 case 2:
67 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
68 break;
69 case 4:
70 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
71 break;
72 default:
73 val = ~0;
74 break;
75 }
76 return (val);
77}
78
59{
60 uint32_t val;
61
62 ofs &= ~(sc->sc_width - 1);
63 switch (sc->sc_width) {
64 case 1:
65 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
66 break;
67 case 2:
68 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
69 break;
70 case 4:
71 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
72 break;
73 default:
74 val = ~0;
75 break;
76 }
77 return (val);
78}
79
80uint32_t
81cfi_read(struct cfi_softc *sc, u_int ofs)
82{
83 uint32_t val;
84 uint16_t sval;
85
86 ofs &= ~(sc->sc_width - 1);
87 switch (sc->sc_width) {
88 case 1:
89 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
90 break;
91 case 2:
92 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
93 val = le16toh(sval);
94 break;
95 case 4:
96 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
97 val = le32toh(val);
98 break;
99 default:
100 val = ~0;
101 break;
102 }
103 return (val);
104}
105
79static void
80cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
81{
82
83 ofs &= ~(sc->sc_width - 1);
84 switch (sc->sc_width) {
85 case 1:
86 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
87 break;
88 case 2:
106static void
107cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
108{
109
110 ofs &= ~(sc->sc_width - 1);
111 switch (sc->sc_width) {
112 case 1:
113 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
114 break;
115 case 2:
89 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
116 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
90 break;
91 case 4:
117 break;
118 case 4:
92 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
119 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
93 break;
94 }
95}
96
97uint8_t
98cfi_read_qry(struct cfi_softc *sc, u_int ofs)
99{
100 uint8_t val;
101
102 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
103 val = cfi_read(sc, ofs * sc->sc_width);
104 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
105 return (val);
106}
107
108static void
109cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
110{
111
112 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
113 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
114 cfi_write(sc, ofs + addr, data);
115}
116
117static char *
118cfi_fmtsize(uint32_t sz)
119{
120 static char buf[8];
121 static const char *sfx[] = { "", "K", "M", "G" };
122 int sfxidx;
123
124 sfxidx = 0;
125 while (sfxidx < 3 && sz > 1023) {
126 sz /= 1024;
127 sfxidx++;
128 }
129
130 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
131 return (buf);
132}
133
134int
135cfi_probe(device_t dev)
136{
137 char desc[80];
138 struct cfi_softc *sc;
139 char *vend_str;
140 int error;
141 uint16_t iface, vend;
142
143 sc = device_get_softc(dev);
144 sc->sc_dev = dev;
145
146 sc->sc_rid = 0;
147 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
148 RF_ACTIVE);
149 if (sc->sc_res == NULL)
150 return (ENXIO);
151
152 sc->sc_tag = rman_get_bustag(sc->sc_res);
153 sc->sc_handle = rman_get_bushandle(sc->sc_res);
154
155 if (sc->sc_width == 0) {
156 sc->sc_width = 1;
157 while (sc->sc_width <= 4) {
158 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
159 break;
160 sc->sc_width <<= 1;
161 }
162 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
163 error = ENXIO;
164 goto out;
165 }
166 if (sc->sc_width > 4) {
167 error = ENXIO;
168 goto out;
169 }
170
171 /* We got a Q. Check if we also have the R and the Y. */
172 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
173 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
174 error = ENXIO;
175 goto out;
176 }
177
178 /* Get the vendor and command set. */
179 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
180 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
181
182 sc->sc_cmdset = vend;
183
184 switch (vend) {
185 case CFI_VEND_AMD_ECS:
186 case CFI_VEND_AMD_SCS:
187 vend_str = "AMD/Fujitsu";
188 break;
189 case CFI_VEND_INTEL_ECS:
190 vend_str = "Intel/Sharp";
191 break;
192 case CFI_VEND_INTEL_SCS:
193 vend_str = "Intel";
194 break;
195 case CFI_VEND_MITSUBISHI_ECS:
196 case CFI_VEND_MITSUBISHI_SCS:
197 vend_str = "Mitsubishi";
198 break;
199 default:
200 vend_str = "Unknown vendor";
201 break;
202 }
203
204 /* Get the device size. */
205 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
206
207 /* Sanity-check the I/F */
208 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
209 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
210
211 /*
212 * Adding 1 to iface will give us a bit-wise "switch"
213 * that allows us to test for the interface width by
214 * testing a single bit.
215 */
216 iface++;
217
218 error = (iface & sc->sc_width) ? 0 : EINVAL;
219 if (error)
220 goto out;
221
222 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
223 cfi_fmtsize(sc->sc_size));
224 device_set_desc_copy(dev, desc);
225
226 out:
227 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
228 return (error);
229}
230
231int
232cfi_attach(device_t dev)
233{
234 struct cfi_softc *sc;
235 u_int blksz, blocks;
236 u_int r, u;
237
238 sc = device_get_softc(dev);
239 sc->sc_dev = dev;
240
241 sc->sc_rid = 0;
242 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
243 RF_ACTIVE);
244 if (sc->sc_res == NULL)
245 return (ENXIO);
246
247 sc->sc_tag = rman_get_bustag(sc->sc_res);
248 sc->sc_handle = rman_get_bushandle(sc->sc_res);
249
250 /* Get time-out values for erase and write. */
251 sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
252 sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
253 sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
254 sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
255
256 /* Get erase regions. */
257 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
258 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
259 M_TEMP, M_WAITOK | M_ZERO);
260 for (r = 0; r < sc->sc_regions; r++) {
261 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
262 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
263 sc->sc_region[r].r_blocks = blocks + 1;
264
265 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
266 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
267 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
268 blksz * 256;
269 }
270
271 /* Reset the device to a default state. */
272 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
273
274 if (bootverbose) {
275 device_printf(dev, "[");
276 for (r = 0; r < sc->sc_regions; r++) {
277 printf("%ux%s%s", sc->sc_region[r].r_blocks,
278 cfi_fmtsize(sc->sc_region[r].r_blksz),
279 (r == sc->sc_regions - 1) ? "]\n" : ",");
280 }
281 }
282
283 u = device_get_unit(dev);
284 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
285 "%s%u", cfi_driver_name, u);
286 sc->sc_nod->si_drv1 = sc;
287
288 device_add_child(dev, "cfid", -1);
289 bus_generic_attach(dev);
290
291 return (0);
292}
293
294int
295cfi_detach(device_t dev)
296{
297 struct cfi_softc *sc;
298
299 sc = device_get_softc(dev);
300
301 destroy_dev(sc->sc_nod);
302 free(sc->sc_region, M_TEMP);
303 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
304 return (0);
305}
306
307static int
308cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
309{
310 int done, error;
311 uint32_t st0 = 0, st = 0;
312
313 done = 0;
314 error = 0;
315 timeout *= 10;
316 while (!done && !error && timeout) {
317 DELAY(100);
318 timeout--;
319
320 switch (sc->sc_cmdset) {
321 case CFI_VEND_INTEL_ECS:
322 case CFI_VEND_INTEL_SCS:
323 st = cfi_read(sc, ofs);
324 done = (st & CFI_INTEL_STATUS_WSMS);
325 if (done) {
326 /* NB: bit 0 is reserved */
327 st &= ~(CFI_INTEL_XSTATUS_RSVD |
328 CFI_INTEL_STATUS_WSMS |
329 CFI_INTEL_STATUS_RSVD);
330 if (st & CFI_INTEL_STATUS_DPS)
331 error = EPERM;
332 else if (st & CFI_INTEL_STATUS_PSLBS)
333 error = EIO;
334 else if (st & CFI_INTEL_STATUS_ECLBS)
335 error = ENXIO;
336 else if (st)
337 error = EACCES;
338 }
339 break;
340 case CFI_VEND_AMD_SCS:
341 case CFI_VEND_AMD_ECS:
342 st0 = cfi_read(sc, ofs);
343 st = cfi_read(sc, ofs);
344 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
345 break;
346 }
347 }
348 if (!done && !error)
349 error = ETIMEDOUT;
350 if (error)
351 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
352 return (error);
353}
354
355int
356cfi_write_block(struct cfi_softc *sc)
357{
358 union {
359 uint8_t *x8;
360 uint16_t *x16;
361 uint32_t *x32;
362 } ptr;
363 register_t intr;
364 int error, i;
365
366 /* Erase the block. */
367 switch (sc->sc_cmdset) {
368 case CFI_VEND_INTEL_ECS:
369 case CFI_VEND_INTEL_SCS:
370 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
371 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
372 break;
373 case CFI_VEND_AMD_SCS:
374 case CFI_VEND_AMD_ECS:
375 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
376 CFI_AMD_ERASE_SECTOR);
377 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
378 break;
379 default:
380 /* Better safe than sorry... */
381 return (ENODEV);
382 }
383 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
384 if (error)
385 goto out;
386
387 /* Write the block. */
388 ptr.x8 = sc->sc_wrbuf;
389 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
390
391 /*
392 * Make sure the command to start a write and the
393 * actual write happens back-to-back without any
394 * excessive delays.
395 */
396 intr = intr_disable();
397
398 switch (sc->sc_cmdset) {
399 case CFI_VEND_INTEL_ECS:
400 case CFI_VEND_INTEL_SCS:
401 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
402 break;
403 case CFI_VEND_AMD_SCS:
404 case CFI_VEND_AMD_ECS:
405 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
406 break;
407 }
408 switch (sc->sc_width) {
409 case 1:
410 bus_space_write_1(sc->sc_tag, sc->sc_handle,
411 sc->sc_wrofs + i, *(ptr.x8)++);
412 break;
413 case 2:
414 bus_space_write_2(sc->sc_tag, sc->sc_handle,
415 sc->sc_wrofs + i, *(ptr.x16)++);
416 break;
417 case 4:
418 bus_space_write_4(sc->sc_tag, sc->sc_handle,
419 sc->sc_wrofs + i, *(ptr.x32)++);
420 break;
421 }
422
423 intr_restore(intr);
424
425 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
426 if (error)
427 goto out;
428 }
429
430 /* error is 0. */
431
432 out:
433 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
434 return (error);
435}
436
437#ifdef CFI_SUPPORT_STRATAFLASH
438/*
439 * Intel StrataFlash Protection Register Support.
440 *
441 * The memory includes a 128-bit Protection Register that can be
442 * used for security. There are two 64-bit segments; one is programmed
443 * at the factory with a unique 64-bit number which is immutable.
444 * The other segment is left blank for User (OEM) programming.
445 * The User/OEM segment is One Time Programmable (OTP). It can also
446 * be locked to prevent any further writes by setting bit 0 of the
447 * Protection Lock Register (PLR). The PLR can written only once.
448 */
449
450static uint16_t
451cfi_get16(struct cfi_softc *sc, int off)
452{
453 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
454 return v;
455}
456
457#ifdef CFI_ARMEDANDDANGEROUS
458static void
459cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
460{
461 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
462}
463#endif
464
465/*
466 * Read the factory-defined 64-bit segment of the PR.
467 */
468int
469cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
470{
471 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
472 return EOPNOTSUPP;
473 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
474
475 cfi_write(sc, 0, CFI_INTEL_READ_ID);
476 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
477 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
478 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
479 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
480 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
481 return 0;
482}
483
484/*
485 * Read the User/OEM 64-bit segment of the PR.
486 */
487int
488cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
489{
490 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
491 return EOPNOTSUPP;
492 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
493
494 cfi_write(sc, 0, CFI_INTEL_READ_ID);
495 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
496 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
497 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
498 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
499 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
500 return 0;
501}
502
503/*
504 * Write the User/OEM 64-bit segment of the PR.
505 * XXX should allow writing individual words/bytes
506 */
507int
508cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
509{
510#ifdef CFI_ARMEDANDDANGEROUS
511 register_t intr;
512 int i, error;
513#endif
514
515 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
516 return EOPNOTSUPP;
517 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
518
519#ifdef CFI_ARMEDANDDANGEROUS
520 for (i = 7; i >= 4; i--, id >>= 16) {
521 intr = intr_disable();
522 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
523 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
524 intr_restore(intr);
525 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
526 sc->sc_write_timeout);
527 if (error)
528 break;
529 }
530 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
531 return error;
532#else
533 device_printf(sc->sc_dev, "%s: OEM PR not set, "
534 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
535 return ENXIO;
536#endif
537}
538
539/*
540 * Read the contents of the Protection Lock Register.
541 */
542int
543cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
544{
545 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
546 return EOPNOTSUPP;
547 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
548
549 cfi_write(sc, 0, CFI_INTEL_READ_ID);
550 *plr = cfi_get16(sc, CFI_INTEL_PLR);
551 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
552 return 0;
553}
554
555/*
556 * Write the Protection Lock Register to lock down the
557 * user-settable segment of the Protection Register.
558 * NOTE: this operation is not reversible.
559 */
560int
561cfi_intel_set_plr(struct cfi_softc *sc)
562{
563#ifdef CFI_ARMEDANDDANGEROUS
564 register_t intr;
565 int error;
566#endif
567 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
568 return EOPNOTSUPP;
569 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
570
571#ifdef CFI_ARMEDANDDANGEROUS
572 /* worthy of console msg */
573 device_printf(sc->sc_dev, "set PLR\n");
574 intr = intr_disable();
575 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
576 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
577 intr_restore(intr);
578 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
579 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
580 return error;
581#else
582 device_printf(sc->sc_dev, "%s: PLR not set, "
583 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
584 return ENXIO;
585#endif
586}
587#endif /* CFI_SUPPORT_STRATAFLASH */
120 break;
121 }
122}
123
124uint8_t
125cfi_read_qry(struct cfi_softc *sc, u_int ofs)
126{
127 uint8_t val;
128
129 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
130 val = cfi_read(sc, ofs * sc->sc_width);
131 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
132 return (val);
133}
134
135static void
136cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
137{
138
139 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
140 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
141 cfi_write(sc, ofs + addr, data);
142}
143
144static char *
145cfi_fmtsize(uint32_t sz)
146{
147 static char buf[8];
148 static const char *sfx[] = { "", "K", "M", "G" };
149 int sfxidx;
150
151 sfxidx = 0;
152 while (sfxidx < 3 && sz > 1023) {
153 sz /= 1024;
154 sfxidx++;
155 }
156
157 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
158 return (buf);
159}
160
161int
162cfi_probe(device_t dev)
163{
164 char desc[80];
165 struct cfi_softc *sc;
166 char *vend_str;
167 int error;
168 uint16_t iface, vend;
169
170 sc = device_get_softc(dev);
171 sc->sc_dev = dev;
172
173 sc->sc_rid = 0;
174 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
175 RF_ACTIVE);
176 if (sc->sc_res == NULL)
177 return (ENXIO);
178
179 sc->sc_tag = rman_get_bustag(sc->sc_res);
180 sc->sc_handle = rman_get_bushandle(sc->sc_res);
181
182 if (sc->sc_width == 0) {
183 sc->sc_width = 1;
184 while (sc->sc_width <= 4) {
185 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
186 break;
187 sc->sc_width <<= 1;
188 }
189 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
190 error = ENXIO;
191 goto out;
192 }
193 if (sc->sc_width > 4) {
194 error = ENXIO;
195 goto out;
196 }
197
198 /* We got a Q. Check if we also have the R and the Y. */
199 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
200 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
201 error = ENXIO;
202 goto out;
203 }
204
205 /* Get the vendor and command set. */
206 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
207 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
208
209 sc->sc_cmdset = vend;
210
211 switch (vend) {
212 case CFI_VEND_AMD_ECS:
213 case CFI_VEND_AMD_SCS:
214 vend_str = "AMD/Fujitsu";
215 break;
216 case CFI_VEND_INTEL_ECS:
217 vend_str = "Intel/Sharp";
218 break;
219 case CFI_VEND_INTEL_SCS:
220 vend_str = "Intel";
221 break;
222 case CFI_VEND_MITSUBISHI_ECS:
223 case CFI_VEND_MITSUBISHI_SCS:
224 vend_str = "Mitsubishi";
225 break;
226 default:
227 vend_str = "Unknown vendor";
228 break;
229 }
230
231 /* Get the device size. */
232 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
233
234 /* Sanity-check the I/F */
235 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
236 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
237
238 /*
239 * Adding 1 to iface will give us a bit-wise "switch"
240 * that allows us to test for the interface width by
241 * testing a single bit.
242 */
243 iface++;
244
245 error = (iface & sc->sc_width) ? 0 : EINVAL;
246 if (error)
247 goto out;
248
249 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
250 cfi_fmtsize(sc->sc_size));
251 device_set_desc_copy(dev, desc);
252
253 out:
254 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
255 return (error);
256}
257
258int
259cfi_attach(device_t dev)
260{
261 struct cfi_softc *sc;
262 u_int blksz, blocks;
263 u_int r, u;
264
265 sc = device_get_softc(dev);
266 sc->sc_dev = dev;
267
268 sc->sc_rid = 0;
269 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
270 RF_ACTIVE);
271 if (sc->sc_res == NULL)
272 return (ENXIO);
273
274 sc->sc_tag = rman_get_bustag(sc->sc_res);
275 sc->sc_handle = rman_get_bushandle(sc->sc_res);
276
277 /* Get time-out values for erase and write. */
278 sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
279 sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
280 sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
281 sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
282
283 /* Get erase regions. */
284 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
285 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
286 M_TEMP, M_WAITOK | M_ZERO);
287 for (r = 0; r < sc->sc_regions; r++) {
288 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
289 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
290 sc->sc_region[r].r_blocks = blocks + 1;
291
292 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
293 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
294 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
295 blksz * 256;
296 }
297
298 /* Reset the device to a default state. */
299 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
300
301 if (bootverbose) {
302 device_printf(dev, "[");
303 for (r = 0; r < sc->sc_regions; r++) {
304 printf("%ux%s%s", sc->sc_region[r].r_blocks,
305 cfi_fmtsize(sc->sc_region[r].r_blksz),
306 (r == sc->sc_regions - 1) ? "]\n" : ",");
307 }
308 }
309
310 u = device_get_unit(dev);
311 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
312 "%s%u", cfi_driver_name, u);
313 sc->sc_nod->si_drv1 = sc;
314
315 device_add_child(dev, "cfid", -1);
316 bus_generic_attach(dev);
317
318 return (0);
319}
320
321int
322cfi_detach(device_t dev)
323{
324 struct cfi_softc *sc;
325
326 sc = device_get_softc(dev);
327
328 destroy_dev(sc->sc_nod);
329 free(sc->sc_region, M_TEMP);
330 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
331 return (0);
332}
333
334static int
335cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
336{
337 int done, error;
338 uint32_t st0 = 0, st = 0;
339
340 done = 0;
341 error = 0;
342 timeout *= 10;
343 while (!done && !error && timeout) {
344 DELAY(100);
345 timeout--;
346
347 switch (sc->sc_cmdset) {
348 case CFI_VEND_INTEL_ECS:
349 case CFI_VEND_INTEL_SCS:
350 st = cfi_read(sc, ofs);
351 done = (st & CFI_INTEL_STATUS_WSMS);
352 if (done) {
353 /* NB: bit 0 is reserved */
354 st &= ~(CFI_INTEL_XSTATUS_RSVD |
355 CFI_INTEL_STATUS_WSMS |
356 CFI_INTEL_STATUS_RSVD);
357 if (st & CFI_INTEL_STATUS_DPS)
358 error = EPERM;
359 else if (st & CFI_INTEL_STATUS_PSLBS)
360 error = EIO;
361 else if (st & CFI_INTEL_STATUS_ECLBS)
362 error = ENXIO;
363 else if (st)
364 error = EACCES;
365 }
366 break;
367 case CFI_VEND_AMD_SCS:
368 case CFI_VEND_AMD_ECS:
369 st0 = cfi_read(sc, ofs);
370 st = cfi_read(sc, ofs);
371 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
372 break;
373 }
374 }
375 if (!done && !error)
376 error = ETIMEDOUT;
377 if (error)
378 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
379 return (error);
380}
381
382int
383cfi_write_block(struct cfi_softc *sc)
384{
385 union {
386 uint8_t *x8;
387 uint16_t *x16;
388 uint32_t *x32;
389 } ptr;
390 register_t intr;
391 int error, i;
392
393 /* Erase the block. */
394 switch (sc->sc_cmdset) {
395 case CFI_VEND_INTEL_ECS:
396 case CFI_VEND_INTEL_SCS:
397 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
398 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
399 break;
400 case CFI_VEND_AMD_SCS:
401 case CFI_VEND_AMD_ECS:
402 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
403 CFI_AMD_ERASE_SECTOR);
404 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
405 break;
406 default:
407 /* Better safe than sorry... */
408 return (ENODEV);
409 }
410 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
411 if (error)
412 goto out;
413
414 /* Write the block. */
415 ptr.x8 = sc->sc_wrbuf;
416 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
417
418 /*
419 * Make sure the command to start a write and the
420 * actual write happens back-to-back without any
421 * excessive delays.
422 */
423 intr = intr_disable();
424
425 switch (sc->sc_cmdset) {
426 case CFI_VEND_INTEL_ECS:
427 case CFI_VEND_INTEL_SCS:
428 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
429 break;
430 case CFI_VEND_AMD_SCS:
431 case CFI_VEND_AMD_ECS:
432 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
433 break;
434 }
435 switch (sc->sc_width) {
436 case 1:
437 bus_space_write_1(sc->sc_tag, sc->sc_handle,
438 sc->sc_wrofs + i, *(ptr.x8)++);
439 break;
440 case 2:
441 bus_space_write_2(sc->sc_tag, sc->sc_handle,
442 sc->sc_wrofs + i, *(ptr.x16)++);
443 break;
444 case 4:
445 bus_space_write_4(sc->sc_tag, sc->sc_handle,
446 sc->sc_wrofs + i, *(ptr.x32)++);
447 break;
448 }
449
450 intr_restore(intr);
451
452 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
453 if (error)
454 goto out;
455 }
456
457 /* error is 0. */
458
459 out:
460 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
461 return (error);
462}
463
464#ifdef CFI_SUPPORT_STRATAFLASH
465/*
466 * Intel StrataFlash Protection Register Support.
467 *
468 * The memory includes a 128-bit Protection Register that can be
469 * used for security. There are two 64-bit segments; one is programmed
470 * at the factory with a unique 64-bit number which is immutable.
471 * The other segment is left blank for User (OEM) programming.
472 * The User/OEM segment is One Time Programmable (OTP). It can also
473 * be locked to prevent any further writes by setting bit 0 of the
474 * Protection Lock Register (PLR). The PLR can written only once.
475 */
476
477static uint16_t
478cfi_get16(struct cfi_softc *sc, int off)
479{
480 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
481 return v;
482}
483
484#ifdef CFI_ARMEDANDDANGEROUS
485static void
486cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
487{
488 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
489}
490#endif
491
492/*
493 * Read the factory-defined 64-bit segment of the PR.
494 */
495int
496cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
497{
498 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
499 return EOPNOTSUPP;
500 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
501
502 cfi_write(sc, 0, CFI_INTEL_READ_ID);
503 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
504 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
505 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
506 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
507 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
508 return 0;
509}
510
511/*
512 * Read the User/OEM 64-bit segment of the PR.
513 */
514int
515cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
516{
517 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
518 return EOPNOTSUPP;
519 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
520
521 cfi_write(sc, 0, CFI_INTEL_READ_ID);
522 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
523 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
524 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
525 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
526 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
527 return 0;
528}
529
530/*
531 * Write the User/OEM 64-bit segment of the PR.
532 * XXX should allow writing individual words/bytes
533 */
534int
535cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
536{
537#ifdef CFI_ARMEDANDDANGEROUS
538 register_t intr;
539 int i, error;
540#endif
541
542 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
543 return EOPNOTSUPP;
544 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
545
546#ifdef CFI_ARMEDANDDANGEROUS
547 for (i = 7; i >= 4; i--, id >>= 16) {
548 intr = intr_disable();
549 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
550 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
551 intr_restore(intr);
552 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
553 sc->sc_write_timeout);
554 if (error)
555 break;
556 }
557 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
558 return error;
559#else
560 device_printf(sc->sc_dev, "%s: OEM PR not set, "
561 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
562 return ENXIO;
563#endif
564}
565
566/*
567 * Read the contents of the Protection Lock Register.
568 */
569int
570cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
571{
572 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
573 return EOPNOTSUPP;
574 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
575
576 cfi_write(sc, 0, CFI_INTEL_READ_ID);
577 *plr = cfi_get16(sc, CFI_INTEL_PLR);
578 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
579 return 0;
580}
581
582/*
583 * Write the Protection Lock Register to lock down the
584 * user-settable segment of the Protection Register.
585 * NOTE: this operation is not reversible.
586 */
587int
588cfi_intel_set_plr(struct cfi_softc *sc)
589{
590#ifdef CFI_ARMEDANDDANGEROUS
591 register_t intr;
592 int error;
593#endif
594 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
595 return EOPNOTSUPP;
596 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
597
598#ifdef CFI_ARMEDANDDANGEROUS
599 /* worthy of console msg */
600 device_printf(sc->sc_dev, "set PLR\n");
601 intr = intr_disable();
602 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
603 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
604 intr_restore(intr);
605 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
606 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
607 return error;
608#else
609 device_printf(sc->sc_dev, "%s: PLR not set, "
610 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
611 return ENXIO;
612#endif
613}
614#endif /* CFI_SUPPORT_STRATAFLASH */