Deleted Added
full compact
cfi_core.c (251118) cfi_core.c (255207)
1/*-
2 * Copyright (c) 2007, Juniper Networks, Inc.
1/*-
2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * Copyright (c) 2012-2013, SRI International
3 * All rights reserved.
4 *
4 * All rights reserved.
5 *
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
9 * programme.
10 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.

--- 10 unchanged lines hidden (view full) ---

23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.

--- 10 unchanged lines hidden (view full) ---

29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cfi/cfi_core.c 251118 2013-05-30 01:22:50Z brooks $");
37__FBSDID("$FreeBSD: head/sys/dev/cfi/cfi_core.c 255207 2013-09-04 17:19:21Z brooks $");
32
33#include "opt_cfi.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/endian.h>

--- 4 unchanged lines hidden (view full) ---

44#include <sys/rman.h>
45#include <sys/sysctl.h>
46
47#include <machine/bus.h>
48
49#include <dev/cfi/cfi_reg.h>
50#include <dev/cfi/cfi_var.h>
51
38
39#include "opt_cfi.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/conf.h>
45#include <sys/endian.h>

--- 4 unchanged lines hidden (view full) ---

50#include <sys/rman.h>
51#include <sys/sysctl.h>
52
53#include <machine/bus.h>
54
55#include <dev/cfi/cfi_reg.h>
56#include <dev/cfi/cfi_var.h>
57
58static void cfi_add_sysctls(struct cfi_softc *);
59
52extern struct cdevsw cfi_cdevsw;
53
54char cfi_driver_name[] = "cfi";
55devclass_t cfi_devclass;
56devclass_t cfi_diskclass;
57
58uint32_t
59cfi_read_raw(struct cfi_softc *sc, u_int ofs)

--- 197 unchanged lines hidden (view full) ---

257}
258
259int
260cfi_attach(device_t dev)
261{
262 struct cfi_softc *sc;
263 u_int blksz, blocks;
264 u_int r, u;
60extern struct cdevsw cfi_cdevsw;
61
62char cfi_driver_name[] = "cfi";
63devclass_t cfi_devclass;
64devclass_t cfi_diskclass;
65
66uint32_t
67cfi_read_raw(struct cfi_softc *sc, u_int ofs)

--- 197 unchanged lines hidden (view full) ---

265}
266
267int
268cfi_attach(device_t dev)
269{
270 struct cfi_softc *sc;
271 u_int blksz, blocks;
272 u_int r, u;
273 uint64_t mtoexp, ttoexp;
265#ifdef CFI_SUPPORT_STRATAFLASH
266 uint64_t ppr;
267 char name[KENV_MNAMELEN], value[32];
268#endif
269
270 sc = device_get_softc(dev);
271 sc->sc_dev = dev;
272
273 sc->sc_rid = 0;
274 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
275 RF_ACTIVE);
276 if (sc->sc_res == NULL)
277 return (ENXIO);
278
279 sc->sc_tag = rman_get_bustag(sc->sc_res);
280 sc->sc_handle = rman_get_bushandle(sc->sc_res);
281
274#ifdef CFI_SUPPORT_STRATAFLASH
275 uint64_t ppr;
276 char name[KENV_MNAMELEN], value[32];
277#endif
278
279 sc = device_get_softc(dev);
280 sc->sc_dev = dev;
281
282 sc->sc_rid = 0;
283 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
284 RF_ACTIVE);
285 if (sc->sc_res == NULL)
286 return (ENXIO);
287
288 sc->sc_tag = rman_get_bustag(sc->sc_res);
289 sc->sc_handle = rman_get_bushandle(sc->sc_res);
290
282 /* Get time-out values for erase and write. */
283 sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
284 sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
285 sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
286 sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
291 /* Get time-out values for erase, write, and buffer write. */
292 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
293 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
294 if (ttoexp == 0) {
295 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
296 ttoexp = 16;
297 }
298 if (ttoexp > 41) {
299 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
300 return (EINVAL);
301 }
302 if (mtoexp == 0) {
303 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
304 ttoexp + 4);
305 mtoexp = 4;
306 }
307 if (ttoexp + mtoexp > 41) {
308 device_printf(dev, "insane max erase timeout: 2^%jd\n",
309 ttoexp + mtoexp);
310 return (EINVAL);
311 }
312 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
313 sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
314 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
287
315
316 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
317 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
318 if (ttoexp == 0) {
319 device_printf(dev, "write timeout == 0, using 2^18ns\n");
320 ttoexp = 18;
321 }
322 if (ttoexp > 51) {
323 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
324 return (EINVAL);
325 }
326 if (mtoexp == 0) {
327 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
328 ttoexp + 4);
329 mtoexp = 4;
330 }
331 if (ttoexp + mtoexp > 51) {
332 device_printf(dev, "insane max write timeout: 2^%jdus\n",
333 ttoexp + mtoexp);
334 return (EINVAL);
335 }
336 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
337 sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
338 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
339
340 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
341 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
342 /* Don't check for 0, it means not-supported. */
343 if (ttoexp > 51) {
344 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
345 return (EINVAL);
346 }
347 if (ttoexp + mtoexp > 51) {
348 device_printf(dev, "insane max write timeout: 2^%jdus\n",
349 ttoexp + mtoexp);
350 return (EINVAL);
351 }
352 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
353 SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
354 sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
355 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
356 (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
357
358 /* Get the maximum size of a multibyte program */
359 if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
360 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
361 cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
362 else
363 sc->sc_maxbuf = 0;
364
288 /* Get erase regions. */
289 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
290 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
291 M_TEMP, M_WAITOK | M_ZERO);
292 for (r = 0; r < sc->sc_regions; r++) {
293 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
294 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
295 sc->sc_region[r].r_blocks = blocks + 1;

--- 16 unchanged lines hidden (view full) ---

312 }
313 }
314
315 u = device_get_unit(dev);
316 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
317 "%s%u", cfi_driver_name, u);
318 sc->sc_nod->si_drv1 = sc;
319
365 /* Get erase regions. */
366 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
367 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
368 M_TEMP, M_WAITOK | M_ZERO);
369 for (r = 0; r < sc->sc_regions; r++) {
370 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
371 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
372 sc->sc_region[r].r_blocks = blocks + 1;

--- 16 unchanged lines hidden (view full) ---

389 }
390 }
391
392 u = device_get_unit(dev);
393 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
394 "%s%u", cfi_driver_name, u);
395 sc->sc_nod->si_drv1 = sc;
396
397 cfi_add_sysctls(sc);
398
320#ifdef CFI_SUPPORT_STRATAFLASH
321 /*
322 * Store the Intel factory PPR in the environment. In some
323 * cases it is the most unique ID on a board.
324 */
325 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
326 if (snprintf(name, sizeof(name), "%s.factory_ppr",
327 device_get_nameunit(dev)) < (sizeof(name) - 1) &&

--- 4 unchanged lines hidden (view full) ---

332#endif
333
334 device_add_child(dev, "cfid", -1);
335 bus_generic_attach(dev);
336
337 return (0);
338}
339
399#ifdef CFI_SUPPORT_STRATAFLASH
400 /*
401 * Store the Intel factory PPR in the environment. In some
402 * cases it is the most unique ID on a board.
403 */
404 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
405 if (snprintf(name, sizeof(name), "%s.factory_ppr",
406 device_get_nameunit(dev)) < (sizeof(name) - 1) &&

--- 4 unchanged lines hidden (view full) ---

411#endif
412
413 device_add_child(dev, "cfid", -1);
414 bus_generic_attach(dev);
415
416 return (0);
417}
418
419static void
420cfi_add_sysctls(struct cfi_softc *sc)
421{
422 struct sysctl_ctx_list *ctx;
423 struct sysctl_oid_list *children;
424
425 ctx = device_get_sysctl_ctx(sc->sc_dev);
426 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
427
428 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
429 "typical_erase_timout_count",
430 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
431 0, "Number of times the typical erase timeout was exceeded");
432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
433 "max_erase_timout_count",
434 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
435 "Number of times the maximum erase timeout was exceeded");
436 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
437 "typical_write_timout_count",
438 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
439 "Number of times the typical write timeout was exceeded");
440 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
441 "max_write_timout_count",
442 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
443 "Number of times the maximum write timeout was exceeded");
444 if (sc->sc_maxbuf > 0) {
445 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
446 "typical_bufwrite_timout_count",
447 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
448 "Number of times the typical buffered write timeout was "
449 "exceeded");
450 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
451 "max_bufwrite_timout_count",
452 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
453 "Number of times the maximum buffered write timeout was "
454 "exceeded");
455 }
456}
457
340int
341cfi_detach(device_t dev)
342{
343 struct cfi_softc *sc;
344
345 sc = device_get_softc(dev);
346
347 destroy_dev(sc->sc_nod);
348 free(sc->sc_region, M_TEMP);
349 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
350 return (0);
351}
352
353static int
458int
459cfi_detach(device_t dev)
460{
461 struct cfi_softc *sc;
462
463 sc = device_get_softc(dev);
464
465 destroy_dev(sc->sc_nod);
466 free(sc->sc_region, M_TEMP);
467 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
468 return (0);
469}
470
471static int
354cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
472cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
473 enum cfi_wait_cmd cmd)
355{
474{
356 int done, error;
475 int done, error, tto_exceeded;
357 uint32_t st0 = 0, st = 0;
476 uint32_t st0 = 0, st = 0;
477 sbintime_t now;
358
359 done = 0;
360 error = 0;
478
479 done = 0;
480 error = 0;
361 timeout *= 10;
362 while (!done && !error && timeout) {
363 DELAY(100);
364 timeout--;
481 tto_exceeded = 0;
482 while (!done && !error) {
483 /*
484 * Save time before we start so we always do one check
485 * after the timeout has expired.
486 */
487 now = sbinuptime();
365
366 switch (sc->sc_cmdset) {
367 case CFI_VEND_INTEL_ECS:
368 case CFI_VEND_INTEL_SCS:
369 st = cfi_read(sc, ofs);
370 done = (st & CFI_INTEL_STATUS_WSMS);
371 if (done) {
372 /* NB: bit 0 is reserved */

--- 12 unchanged lines hidden (view full) ---

385 break;
386 case CFI_VEND_AMD_SCS:
387 case CFI_VEND_AMD_ECS:
388 st0 = cfi_read(sc, ofs);
389 st = cfi_read(sc, ofs);
390 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
391 break;
392 }
488
489 switch (sc->sc_cmdset) {
490 case CFI_VEND_INTEL_ECS:
491 case CFI_VEND_INTEL_SCS:
492 st = cfi_read(sc, ofs);
493 done = (st & CFI_INTEL_STATUS_WSMS);
494 if (done) {
495 /* NB: bit 0 is reserved */

--- 12 unchanged lines hidden (view full) ---

508 break;
509 case CFI_VEND_AMD_SCS:
510 case CFI_VEND_AMD_ECS:
511 st0 = cfi_read(sc, ofs);
512 st = cfi_read(sc, ofs);
513 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
514 break;
515 }
516
517 if (tto_exceeded ||
518 now > start + sc->sc_typical_timeouts[cmd]) {
519 if (!tto_exceeded) {
520 tto_exceeded = 1;
521 sc->sc_tto_counts[cmd]++;
522#ifdef CFI_DEBUG_TIMEOUT
523 device_printf(sc->sc_dev,
524 "typical timeout exceeded (cmd %d)", cmd);
525#endif
526 }
527 if (now > start + sc->sc_max_timeouts[cmd]) {
528 sc->sc_mto_counts[cmd]++;
529#ifdef CFI_DEBUG_TIMEOUT
530 device_printf(sc->sc_dev,
531 "max timeout exceeded (cmd %d)", cmd);
532#endif
533 }
534 }
393 }
394 if (!done && !error)
395 error = ETIMEDOUT;
396 if (error)
397 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
398 return (error);
399}
400
401int
402cfi_write_block(struct cfi_softc *sc)
403{
404 union {
405 uint8_t *x8;
406 uint16_t *x16;
407 uint32_t *x32;
535 }
536 if (!done && !error)
537 error = ETIMEDOUT;
538 if (error)
539 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
540 return (error);
541}
542
543int
544cfi_write_block(struct cfi_softc *sc)
545{
546 union {
547 uint8_t *x8;
548 uint16_t *x16;
549 uint32_t *x32;
408 } ptr;
550 } ptr, cpyprt;
409 register_t intr;
551 register_t intr;
410 int error, i;
552 int error, i, neederase = 0;
553 uint32_t st;
554 u_int wlen;
555 sbintime_t start;
411
412 /* Intel flash must be unlocked before modification */
413 switch (sc->sc_cmdset) {
414 case CFI_VEND_INTEL_ECS:
415 case CFI_VEND_INTEL_SCS:
416 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
417 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
418 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
419 break;
420 }
421
556
557 /* Intel flash must be unlocked before modification */
558 switch (sc->sc_cmdset) {
559 case CFI_VEND_INTEL_ECS:
560 case CFI_VEND_INTEL_SCS:
561 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
562 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
563 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
564 break;
565 }
566
422 /* Erase the block. */
423 switch (sc->sc_cmdset) {
424 case CFI_VEND_INTEL_ECS:
425 case CFI_VEND_INTEL_SCS:
426 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
427 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
428 break;
429 case CFI_VEND_AMD_SCS:
430 case CFI_VEND_AMD_ECS:
431 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
432 CFI_AMD_ERASE_SECTOR);
433 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
434 break;
435 default:
436 /* Better safe than sorry... */
437 return (ENODEV);
438 }
439 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
440 if (error)
441 goto out;
567 /* Check if an erase is required. */
568 for (i = 0; i < sc->sc_wrbufsz; i++)
569 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
570 neederase = 1;
571 break;
572 }
442
573
443 /* Write the block. */
574 if (neederase) {
575 intr = intr_disable();
576 start = sbinuptime();
577 /* Erase the block. */
578 switch (sc->sc_cmdset) {
579 case CFI_VEND_INTEL_ECS:
580 case CFI_VEND_INTEL_SCS:
581 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
582 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
583 break;
584 case CFI_VEND_AMD_SCS:
585 case CFI_VEND_AMD_ECS:
586 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
587 CFI_AMD_ERASE_SECTOR);
588 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
589 break;
590 default:
591 /* Better safe than sorry... */
592 intr_restore(intr);
593 return (ENODEV);
594 }
595 intr_restore(intr);
596 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
597 CFI_TIMEOUT_ERASE);
598 if (error)
599 goto out;
600 } else
601 error = 0;
602
603 /* Write the block using a multibyte write if supported. */
444 ptr.x8 = sc->sc_wrbuf;
604 ptr.x8 = sc->sc_wrbuf;
605 cpyprt.x8 = sc->sc_wrbufcpy;
606 if (sc->sc_maxbuf > sc->sc_width) {
607 switch (sc->sc_cmdset) {
608 case CFI_VEND_INTEL_ECS:
609 case CFI_VEND_INTEL_SCS:
610 for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
611 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
612
613 intr = intr_disable();
614
615 start = sbinuptime();
616 do {
617 cfi_write(sc, sc->sc_wrofs + i,
618 CFI_BCS_BUF_PROG_SETUP);
619 if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
620 error = ETIMEDOUT;
621 goto out;
622 }
623 st = cfi_read(sc, sc->sc_wrofs + i);
624 } while (! (st & CFI_INTEL_STATUS_WSMS));
625
626 cfi_write(sc, sc->sc_wrofs + i,
627 (wlen / sc->sc_width) - 1);
628 switch (sc->sc_width) {
629 case 1:
630 bus_space_write_region_1(sc->sc_tag,
631 sc->sc_handle, sc->sc_wrofs + i,
632 ptr.x8 + i, wlen);
633 break;
634 case 2:
635 bus_space_write_region_2(sc->sc_tag,
636 sc->sc_handle, sc->sc_wrofs + i,
637 ptr.x16 + i / 2, wlen / 2);
638 break;
639 case 4:
640 bus_space_write_region_4(sc->sc_tag,
641 sc->sc_handle, sc->sc_wrofs + i,
642 ptr.x32 + i / 4, wlen / 4);
643 break;
644 }
645
646 cfi_write(sc, sc->sc_wrofs + i,
647 CFI_BCS_CONFIRM);
648
649 intr_restore(intr);
650
651 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
652 start, CFI_TIMEOUT_BUFWRITE);
653 if (error != 0)
654 goto out;
655 }
656 goto out;
657 default:
658 /* Fall through to single word case */
659 break;
660 }
661
662 }
663
664 /* Write the block one byte/word at a time. */
445 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
446
665 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
666
667 /* Avoid writing unless we are actually changing bits */
668 if (!neederase) {
669 switch (sc->sc_width) {
670 case 1:
671 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
672 continue;
673 break;
674 case 2:
675 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
676 continue;
677 break;
678 case 4:
679 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
680 continue;
681 break;
682 }
683 }
684
447 /*
448 * Make sure the command to start a write and the
449 * actual write happens back-to-back without any
450 * excessive delays.
451 */
452 intr = intr_disable();
453
685 /*
686 * Make sure the command to start a write and the
687 * actual write happens back-to-back without any
688 * excessive delays.
689 */
690 intr = intr_disable();
691
692 start = sbinuptime();
454 switch (sc->sc_cmdset) {
455 case CFI_VEND_INTEL_ECS:
456 case CFI_VEND_INTEL_SCS:
457 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
458 break;
459 case CFI_VEND_AMD_SCS:
460 case CFI_VEND_AMD_ECS:
461 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
462 break;
463 }
464 switch (sc->sc_width) {
465 case 1:
466 bus_space_write_1(sc->sc_tag, sc->sc_handle,
693 switch (sc->sc_cmdset) {
694 case CFI_VEND_INTEL_ECS:
695 case CFI_VEND_INTEL_SCS:
696 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
697 break;
698 case CFI_VEND_AMD_SCS:
699 case CFI_VEND_AMD_ECS:
700 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
701 break;
702 }
703 switch (sc->sc_width) {
704 case 1:
705 bus_space_write_1(sc->sc_tag, sc->sc_handle,
467 sc->sc_wrofs + i, *(ptr.x8)++);
706 sc->sc_wrofs + i, *(ptr.x8 + i));
468 break;
469 case 2:
470 bus_space_write_2(sc->sc_tag, sc->sc_handle,
707 break;
708 case 2:
709 bus_space_write_2(sc->sc_tag, sc->sc_handle,
471 sc->sc_wrofs + i, *(ptr.x16)++);
710 sc->sc_wrofs + i, *(ptr.x16 + i / 2));
472 break;
473 case 4:
474 bus_space_write_4(sc->sc_tag, sc->sc_handle,
711 break;
712 case 4:
713 bus_space_write_4(sc->sc_tag, sc->sc_handle,
475 sc->sc_wrofs + i, *(ptr.x32)++);
714 sc->sc_wrofs + i, *(ptr.x32 + i / 4));
476 break;
477 }
715 break;
716 }
478
717
479 intr_restore(intr);
480
718 intr_restore(intr);
719
481 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
720 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
721 CFI_TIMEOUT_WRITE);
482 if (error)
483 goto out;
484 }
485
486 /* error is 0. */
487
488 out:
489 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);

--- 81 unchanged lines hidden (view full) ---

571 * XXX should allow writing individual words/bytes
572 */
573int
574cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
575{
576#ifdef CFI_ARMEDANDDANGEROUS
577 register_t intr;
578 int i, error;
722 if (error)
723 goto out;
724 }
725
726 /* error is 0. */
727
728 out:
729 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);

--- 81 unchanged lines hidden (view full) ---

811 * XXX should allow writing individual words/bytes
812 */
813int
814cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
815{
816#ifdef CFI_ARMEDANDDANGEROUS
817 register_t intr;
818 int i, error;
819 sbintime_t start;
579#endif
580
581 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
582 return EOPNOTSUPP;
583 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
584
585#ifdef CFI_ARMEDANDDANGEROUS
586 for (i = 7; i >= 4; i--, id >>= 16) {
587 intr = intr_disable();
820#endif
821
822 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
823 return EOPNOTSUPP;
824 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
825
826#ifdef CFI_ARMEDANDDANGEROUS
827 for (i = 7; i >= 4; i--, id >>= 16) {
828 intr = intr_disable();
829 start = sbinuptime();
588 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
589 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
590 intr_restore(intr);
830 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
831 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
832 intr_restore(intr);
591 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
592 sc->sc_write_timeout);
833 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
834 CFI_TIMEOUT_WRITE);
593 if (error)
594 break;
595 }
596 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
597 return error;
598#else
599 device_printf(sc->sc_dev, "%s: OEM PR not set, "
600 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);

--- 23 unchanged lines hidden (view full) ---

624 * NOTE: this operation is not reversible.
625 */
626int
627cfi_intel_set_plr(struct cfi_softc *sc)
628{
629#ifdef CFI_ARMEDANDDANGEROUS
630 register_t intr;
631 int error;
835 if (error)
836 break;
837 }
838 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
839 return error;
840#else
841 device_printf(sc->sc_dev, "%s: OEM PR not set, "
842 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);

--- 23 unchanged lines hidden (view full) ---

866 * NOTE: this operation is not reversible.
867 */
868int
869cfi_intel_set_plr(struct cfi_softc *sc)
870{
871#ifdef CFI_ARMEDANDDANGEROUS
872 register_t intr;
873 int error;
874 sbintime_t start;
632#endif
633 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
634 return EOPNOTSUPP;
635 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
636
637#ifdef CFI_ARMEDANDDANGEROUS
638 /* worthy of console msg */
639 device_printf(sc->sc_dev, "set PLR\n");
640 intr = intr_disable();
875#endif
876 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
877 return EOPNOTSUPP;
878 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
879
880#ifdef CFI_ARMEDANDDANGEROUS
881 /* worthy of console msg */
882 device_printf(sc->sc_dev, "set PLR\n");
883 intr = intr_disable();
884 binuptime(&start);
641 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
642 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
643 intr_restore(intr);
885 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
886 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
887 intr_restore(intr);
644 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
888 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
889 CFI_TIMEOUT_WRITE);
645 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
646 return error;
647#else
648 device_printf(sc->sc_dev, "%s: PLR not set, "
649 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
650 return ENXIO;
651#endif
652}
653#endif /* CFI_SUPPORT_STRATAFLASH */
890 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
891 return error;
892#else
893 device_printf(sc->sc_dev, "%s: PLR not set, "
894 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
895 return ENXIO;
896#endif
897}
898#endif /* CFI_SUPPORT_STRATAFLASH */