arspi.c revision 1.5
1/* $NetBSD: arspi.c,v 1.5 2007/02/28 04:21:53 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
5 * Copyright (c) 2006 Garrett D'Amore.
6 * All rights reserved.
7 *
8 * Portions of this code were written by Garrett D'Amore for the
9 * Champaign-Urbana Community Wireless Network Project.
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above
17 *    copyright notice, this list of conditions and the following
18 *    disclaimer in the documentation and/or other materials provided
19 *    with the distribution.
20 * 3. All advertising materials mentioning features or use of this
21 *    software must display the following acknowledgements:
22 *      This product includes software developed by the Urbana-Champaign
23 *      Independent Media Center.
24 *	This product includes software developed by Garrett D'Amore.
25 * 4. Urbana-Champaign Independent Media Center's name and Garrett
26 *    D'Amore's name may not be used to endorse or promote products
27 *    derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: arspi.c,v 1.5 2007/02/28 04:21:53 thorpej Exp $");
46
47#include "locators.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/kernel.h>
52#include <sys/device.h>
53#include <sys/errno.h>
54#include <sys/malloc.h>
55#include <sys/proc.h>
56#include <sys/queue.h>
57
58#include <machine/bus.h>
59#include <machine/cpu.h>
60
61#include <mips/atheros/include/ar5315reg.h>
62#include <mips/atheros/include/ar531xvar.h>
63#include <mips/atheros/include/arbusvar.h>
64
65#include <mips/atheros/dev/arspireg.h>
66
67#include <dev/spi/spiflash.h>
68#include <dev/spi/spivar.h>
69
70/*
71 * This device is intended only to operate with specific SPI flash
72 * parts, and is not a general purpose SPI host.  (Or at least if it
73 * is, the Linux and eCos sources do not show how to use it as such.)
74 * And lack of documentation on the Atheros SoCs is less than helpful.
75 *
76 * So for now we just "emulate" enough of the host bus framework to
77 * make the SPI flash drivers happy.
78 */
79
80struct arspi_job {
81	uint8_t			job_opcode;
82	struct spi_chunk	*job_chunk;
83	uint32_t		job_flags;
84	uint32_t		job_addr;
85	uint32_t		job_data;
86	int			job_rxcnt;
87	int			job_txcnt;
88	int			job_addrcnt;
89	int			job_rresid;
90	int			job_wresid;
91};
92
93#define	JOB_READ		0x1
94#define	JOB_WRITE		0x2
95#define	JOB_LAST		0x4
96#define	JOB_WAIT		0x8	/* job must wait for WIP bits */
97#define	JOB_WREN		0x10	/* WREN needed */
98
99struct arspi_softc {
100	struct device		sc_dev;
101	struct spi_controller	sc_spi;
102	void			*sc_ih;
103	bool			sc_interrupts;
104
105	struct spi_transfer	*sc_transfer;
106	struct spi_chunk	*sc_wchunk;	/* for partial writes */
107	struct spi_transq	sc_transq;
108	bus_space_tag_t		sc_st;
109	bus_space_handle_t	sc_sh;
110	bus_size_t		sc_size;
111};
112
113#define	STATIC
114
115STATIC int arspi_match(struct device *, struct cfdata *, void *);
116STATIC void arspi_attach(struct device *, struct device *, void *);
117STATIC void arspi_interrupts(struct device *);
118STATIC int arspi_intr(void *);
119/* SPI service routines */
120STATIC int arspi_configure(void *, int, int, int);
121STATIC int arspi_transfer(void *, struct spi_transfer *);
122/* internal support */
123STATIC void arspi_poll(struct arspi_softc *);
124STATIC void arspi_done(struct arspi_softc *, int);
125STATIC void arspi_sched(struct arspi_softc *);
126STATIC int arspi_get_byte(struct spi_chunk **, uint8_t *);
127STATIC int arspi_put_byte(struct spi_chunk **, uint8_t);
128STATIC int arspi_make_job(struct spi_transfer *);
129STATIC void arspi_update_job(struct spi_transfer *);
130STATIC void arspi_finish_job(struct spi_transfer *);
131
132
133CFATTACH_DECL(arspi, sizeof(struct arspi_softc),
134    arspi_match, arspi_attach, NULL, NULL);
135
136#define	GETREG(sc, o)		bus_space_read_4(sc->sc_st, sc->sc_sh, o)
137#define	PUTREG(sc, o, v)	bus_space_write_4(sc->sc_st, sc->sc_sh, o, v)
138
139int
140arspi_match(struct device *parent, struct cfdata *cf, void *aux)
141{
142	struct arbus_attach_args *aa = aux;
143
144	if (strcmp(aa->aa_name, cf->cf_name) != 0)
145		return 0;
146	return 1;
147}
148
149void
150arspi_attach(struct device *parent, struct device *self, void *aux)
151{
152	struct arspi_softc *sc = device_private(self);
153	struct spibus_attach_args sba;
154	struct arbus_attach_args *aa = aux;
155
156	/*
157	 * Map registers.
158	 */
159	sc->sc_st = aa->aa_bst;
160	sc->sc_size = aa->aa_size;
161	if (bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
162		&sc->sc_sh) != 0) {
163		printf(": unable to map registers!\n");
164		return;
165	}
166
167	aprint_normal(": Atheros SPI controller\n");
168
169	/*
170	 * Initialize SPI controller.
171	 */
172	sc->sc_spi.sct_cookie = sc;
173	sc->sc_spi.sct_configure = arspi_configure;
174	sc->sc_spi.sct_transfer = arspi_transfer;
175	sc->sc_spi.sct_nslaves = 1;
176
177
178	/*
179	 * Initialize the queue.
180	 */
181	spi_transq_init(&sc->sc_transq);
182
183	/*
184	 * Enable device interrupts.
185	 */
186	sc->sc_ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq,
187	    arspi_intr, sc);
188	if (sc->sc_ih == NULL) {
189		aprint_error("%s: couldn't establish interrupt\n",
190		    device_xname(self));
191		/* just leave it in polled mode */
192	} else
193		config_interrupts(self, arspi_interrupts);
194
195	/*
196	 * Initialize and attach bus attach.
197	 */
198	sba.sba_controller = &sc->sc_spi;
199	(void) config_found_ia(&sc->sc_dev, "spibus", &sba, spibus_print);
200}
201
202void
203arspi_interrupts(struct device *self)
204{
205	/*
206	 * we never leave polling mode, because, apparently, we
207	 * are missing some data about how to drive the SPI in interrupt
208	 * mode.
209	 */
210#if 0
211	struct arspi_softc *sc = device_private(self);
212	int	s;
213
214	s = splserial();
215	sc->sc_interrupts = true;
216	splx(s);
217#endif
218}
219
220int
221arspi_intr(void *arg)
222{
223	struct arspi_softc *sc = arg;
224
225	while (GETREG(sc, ARSPI_REG_CTL) & ARSPI_CTL_BUSY);
226
227	arspi_done(sc, 0);
228
229	return 1;
230}
231
232void
233arspi_poll(struct arspi_softc *sc)
234{
235
236	while (sc->sc_transfer) {
237		arspi_intr(sc);
238	}
239}
240
241int
242arspi_configure(void *cookie, int slave, int mode, int speed)
243{
244
245	/*
246	 * We don't support the full SPI protocol, and hopefully the
247	 * firmware has programmed a reasonable mode already.  So
248	 * just a couple of quick sanity checks, then bail.
249	 */
250	if ((mode != 0) || (slave != 0))
251		return EINVAL;
252
253	return 0;
254}
255
256int
257arspi_transfer(void *cookie, struct spi_transfer *st)
258{
259	struct arspi_softc *sc = cookie;
260	int rv;
261	int s;
262
263	st->st_busprivate = NULL;
264	if ((rv = arspi_make_job(st)) != 0) {
265		if (st->st_busprivate) {
266			free(st->st_busprivate, M_DEVBUF);
267			st->st_busprivate = NULL;
268		}
269		spi_done(st, rv);
270		return rv;
271	}
272
273	s = splserial();
274	spi_transq_enqueue(&sc->sc_transq, st);
275	if (sc->sc_transfer == NULL) {
276		arspi_sched(sc);
277		if (!sc->sc_interrupts)
278			arspi_poll(sc);
279	}
280	splx(s);
281	return 0;
282}
283
284void
285arspi_sched(struct arspi_softc *sc)
286{
287	struct spi_transfer *st;
288	struct arspi_job *job;
289	uint32_t ctl, cnt;
290
291	for (;;) {
292		if ((st = sc->sc_transfer) == NULL) {
293			if ((st = spi_transq_first(&sc->sc_transq)) == NULL) {
294				/* no work left to do */
295				break;
296			}
297			spi_transq_dequeue(&sc->sc_transq);
298			sc->sc_transfer = st;
299		}
300
301		arspi_update_job(st);
302		job = st->st_busprivate;
303
304		/* there shouldn't be anything running, but ensure it */
305		do {
306			ctl = GETREG(sc, ARSPI_REG_CTL);
307		}  while (ctl & ARSPI_CTL_BUSY);
308		/* clear all of the tx and rx bits */
309		ctl &= ~(ARSPI_CTL_TXCNT_MASK | ARSPI_CTL_RXCNT_MASK);
310
311		if (job->job_flags & JOB_WAIT) {
312			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_RDSR);
313			/* only the opcode for tx */
314			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
315			/* and one rx byte */
316			ctl |= (1 << ARSPI_CTL_RXCNT_SHIFT);
317		} else if (job->job_flags & JOB_WREN) {
318			PUTREG(sc, ARSPI_REG_OPCODE, SPIFLASH_CMD_WREN);
319			/* just the opcode */
320			ctl |= (1 << ARSPI_CTL_TXCNT_SHIFT);
321			/* no rx bytes */
322		} else {
323			/* set the data */
324			PUTREG(sc, ARSPI_REG_DATA, job->job_data);
325
326			/* set the opcode and the address */
327			PUTREG(sc, ARSPI_REG_OPCODE, job->job_opcode |
328			    (job->job_addr << 8));
329
330			/* now set txcnt */
331			cnt = 1;	/* opcode */
332			cnt += job->job_addrcnt + job->job_txcnt;
333			ctl |= (cnt << ARSPI_CTL_TXCNT_SHIFT);
334
335			/* now set rxcnt */
336			cnt = job->job_rxcnt;
337			ctl |= (cnt << ARSPI_CTL_RXCNT_SHIFT);
338		}
339
340		/* set the start bit */
341		ctl |= ARSPI_CTL_START;
342
343		PUTREG(sc, ARSPI_REG_CTL, ctl);
344		break;
345	}
346}
347
348void
349arspi_done(struct arspi_softc *sc, int err)
350{
351	struct spi_transfer *st;
352	struct arspi_job *job;
353
354	if ((st = sc->sc_transfer) != NULL) {
355		job = st->st_busprivate;
356
357		if (job->job_flags & JOB_WAIT) {
358			if (err == 0) {
359				if ((GETREG(sc, ARSPI_REG_DATA) &
360				    SPIFLASH_SR_BUSY) == 0) {
361					/* intermediate wait done */
362					job->job_flags &= ~JOB_WAIT;
363					goto done;
364				}
365			}
366		} else if (job->job_flags & JOB_WREN) {
367			if (err == 0) {
368				job->job_flags &= ~JOB_WREN;
369				goto done;
370			}
371		} else if (err == 0) {
372			/*
373			 * When breaking up write jobs, we have to wait until
374			 * the WIP bit is clear, and we have to separately
375			 * send WREN for each chunk.  These flags facilitate
376			 * that.
377			 */
378			if (job->job_flags & JOB_WRITE)
379				job->job_flags |= (JOB_WAIT | JOB_WREN);
380			job->job_data = GETREG(sc, ARSPI_REG_DATA);
381			arspi_finish_job(st);
382		}
383
384		if (err || (job->job_flags & JOB_LAST)) {
385			sc->sc_transfer = NULL;
386			st->st_busprivate = NULL;
387			spi_done(st, err);
388			free(job, M_DEVBUF);
389		}
390	}
391done:
392	arspi_sched(sc);
393}
394
395int
396arspi_get_byte(struct spi_chunk **chunkp, uint8_t *bytep)
397{
398	struct spi_chunk *chunk;
399
400	chunk = *chunkp;
401
402	/* skip leading empty (or already consumed) chunks */
403	while (chunk && chunk->chunk_wresid == 0)
404		chunk = chunk->chunk_next;
405
406	if (chunk == NULL) {
407		return ENODATA;
408	}
409
410	/*
411	 * chunk must be write only.  SPI flash doesn't support
412	 * any full duplex operations.
413	 */
414	if ((chunk->chunk_rptr) || !(chunk->chunk_wptr)) {
415		return EINVAL;
416	}
417
418	*bytep = *chunk->chunk_wptr;
419	chunk->chunk_wptr++;
420	chunk->chunk_wresid--;
421	chunk->chunk_rresid--;
422	/* clearing wptr and rptr makes sanity checks later easier */
423	if (chunk->chunk_wresid == 0)
424		chunk->chunk_wptr = NULL;
425	if (chunk->chunk_rresid == 0)
426		chunk->chunk_rptr = NULL;
427	while (chunk && chunk->chunk_wresid == 0)
428		chunk = chunk->chunk_next;
429
430	*chunkp = chunk;
431	return 0;
432}
433
434int
435arspi_put_byte(struct spi_chunk **chunkp, uint8_t byte)
436{
437	struct spi_chunk *chunk;
438
439	chunk = *chunkp;
440
441	/* skip leading empty (or already consumed) chunks */
442	while (chunk && chunk->chunk_rresid == 0)
443		chunk = chunk->chunk_next;
444
445	if (chunk == NULL) {
446		return EOVERFLOW;
447	}
448
449	/*
450	 * chunk must be read only.  SPI flash doesn't support
451	 * any full duplex operations.
452	 */
453	if ((chunk->chunk_wptr) || !(chunk->chunk_rptr)) {
454		return EINVAL;
455	}
456
457	*chunk->chunk_rptr = byte;
458	chunk->chunk_rptr++;
459	chunk->chunk_wresid--;	/* technically this was done at send time */
460	chunk->chunk_rresid--;
461	while (chunk && chunk->chunk_rresid == 0)
462		chunk = chunk->chunk_next;
463
464	*chunkp = chunk;
465	return 0;
466}
467
468int
469arspi_make_job(struct spi_transfer *st)
470{
471	struct arspi_job *job;
472	struct spi_chunk *chunk;
473	uint8_t byte;
474	int i, rv;
475
476	job = malloc(sizeof (struct arspi_job), M_DEVBUF, M_ZERO);
477	if (job == NULL) {
478		return ENOMEM;
479	}
480
481	st->st_busprivate = job;
482
483	/* skip any leading empty chunks (should not be any!) */
484	chunk = st->st_chunks;
485
486	/* get transfer opcode */
487	if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
488		return rv;
489
490	job->job_opcode = byte;
491	switch (job->job_opcode) {
492	case SPIFLASH_CMD_WREN:
493	case SPIFLASH_CMD_WRDI:
494	case SPIFLASH_CMD_CHIPERASE:
495		break;
496	case SPIFLASH_CMD_RDJI:
497		job->job_rxcnt = 3;
498		break;
499	case SPIFLASH_CMD_RDSR:
500		job->job_rxcnt = 1;
501		break;
502	case SPIFLASH_CMD_WRSR:
503		/*
504		 * is this in data, or in address?  stick it in data
505		 * for now.
506		 */
507		job->job_txcnt = 1;
508		break;
509	case SPIFLASH_CMD_RDID:
510		job->job_addrcnt = 3;	/* 3 dummy bytes */
511		job->job_rxcnt = 1;
512		break;
513	case SPIFLASH_CMD_ERASE:
514		job->job_addrcnt = 3;
515		break;
516	case SPIFLASH_CMD_READ:
517		job->job_addrcnt = 3;
518		job->job_flags |= JOB_READ;
519		break;
520	case SPIFLASH_CMD_PROGRAM:
521		job->job_addrcnt = 3;
522		job->job_flags |= JOB_WRITE;
523		break;
524	case SPIFLASH_CMD_READFAST:
525		/*
526		 * This is a pain in the arse to support, so we will
527		 * rewrite as an ordinary read.  But later, after we
528		 * obtain the address.
529		 */
530		job->job_addrcnt = 3;	/* 3 address */
531		job->job_flags |= JOB_READ;
532		break;
533	default:
534		return EINVAL;
535	}
536
537	for (i = 0; i < job->job_addrcnt; i++) {
538		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
539			return rv;
540		job->job_addr <<= 8;
541		job->job_addr |= byte;
542	}
543
544
545	if (job->job_opcode == SPIFLASH_CMD_READFAST) {
546		/* eat the dummy timing byte */
547		if ((rv = arspi_get_byte(&chunk, &byte)) != 0)
548			return rv;
549		/* rewrite this as a read */
550		job->job_opcode = SPIFLASH_CMD_READ;
551	}
552
553	job->job_chunk = chunk;
554
555	/*
556	 * Now quickly check a few other things.   Namely, we are not
557	 * allowed to have both READ and WRITE.
558	 */
559	for (chunk = job->job_chunk; chunk; chunk = chunk->chunk_next) {
560		if (chunk->chunk_wptr) {
561			job->job_wresid += chunk->chunk_wresid;
562		}
563		if (chunk->chunk_rptr) {
564			job->job_rresid += chunk->chunk_rresid;
565		}
566	}
567
568	if (job->job_rresid && job->job_wresid) {
569		return EINVAL;
570	}
571
572	return 0;
573}
574
575/*
576 * NB: The Atheros SPI controller runs in little endian mode. So all
577 * data accesses must be swapped appropriately.
578 *
579 * The controller auto-swaps read accesses done through the mapped memory
580 * region, but when using SPI directly, we have to do the right thing to
581 * swap to or from little endian.
582 */
583
584void
585arspi_update_job(struct spi_transfer *st)
586{
587	struct arspi_job *job = st->st_busprivate;
588	uint8_t byte;
589	int i;
590
591	if (job->job_flags & (JOB_WAIT|JOB_WREN))
592		return;
593
594	job->job_rxcnt = 0;
595	job->job_txcnt = 0;
596	job->job_data = 0;
597
598	job->job_txcnt = min(job->job_wresid, 4);
599	job->job_rxcnt = min(job->job_rresid, 4);
600
601	job->job_wresid -= job->job_txcnt;
602	job->job_rresid -= job->job_rxcnt;
603
604	for (i = 0; i < job->job_txcnt; i++) {
605		arspi_get_byte(&job->job_chunk, &byte);
606		job->job_data |= (byte << (i * 8));
607	}
608
609	if ((!job->job_wresid) && (!job->job_rresid)) {
610		job->job_flags |= JOB_LAST;
611	}
612}
613
614void
615arspi_finish_job(struct spi_transfer *st)
616{
617	struct arspi_job *job = st->st_busprivate;
618	uint8_t	byte;
619	int i;
620
621	job->job_addr += job->job_rxcnt;
622	job->job_addr += job->job_txcnt;
623	for (i = 0; i < job->job_rxcnt; i++) {
624		byte = job->job_data & 0xff;
625		job->job_data >>= 8;
626		arspi_put_byte(&job->job_chunk, byte);
627	}
628}
629
630