1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011
5 *	Ben Gray <ben.r.gray@gmail.com>.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/kernel.h>
37#include <sys/lock.h>
38#include <sys/interrupt.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/mutex.h>
42#include <sys/rman.h>
43#include <sys/queue.h>
44#include <sys/taskqueue.h>
45#include <sys/timetc.h>
46#include <machine/bus.h>
47#include <machine/intr.h>
48
49#include <dev/ofw/openfirm.h>
50#include <dev/ofw/ofw_bus.h>
51#include <dev/ofw/ofw_bus_subr.h>
52
53#include <arm/ti/ti_cpuid.h>
54#include <arm/ti/ti_sysc.h>
55#include <arm/ti/ti_sdma.h>
56#include <arm/ti/ti_sdmareg.h>
57
58/**
59 *	Kernel functions for using the DMA controller
60 *
61 *
62 *	DMA TRANSFERS:
63 *	A DMA transfer block consists of a number of frames (FN). Each frame
64 *	consists of a number of elements, and each element can have a size of 8, 16,
65 *	or 32 bits.
66 *
67 *	OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
68 *	where a linked list of source/destination pairs can be placed in memory
69 *	for the H/W to process.  Earlier chips only allowed you to chain multiple
70 *	channels together.  However currently this linked list feature is not
71 *	supported by the driver.
72 *
73 */
74
75/**
76 *	Data structure per DMA channel.
77 *
78 *
79 */
80struct ti_sdma_channel {
81	/*
82	 * The configuration registers for the given channel, these are modified
83	 * by the set functions and only written to the actual registers when a
84	 * transaction is started.
85	 */
86	uint32_t		reg_csdp;
87	uint32_t		reg_ccr;
88	uint32_t		reg_cicr;
89
90	/* Set when one of the configuration registers above change */
91	uint32_t		need_reg_write;
92
93	/* Callback function used when an interrupt is tripped on the given channel */
94	void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
95
96	/* Callback data passed in the callback ... duh */
97	void*			callback_data;
98
99};
100
101/**
102 *	DMA driver context, allocated and stored globally, this driver is not
103 *	intetned to ever be unloaded (see ti_sdma_sc).
104 *
105 */
106struct ti_sdma_softc {
107	device_t		sc_dev;
108	struct resource*	sc_irq_res;
109	struct resource*	sc_mem_res;
110
111	/*
112	 * I guess in theory we should have a mutex per DMA channel for register
113	 * modifications. But since we know we are never going to be run on a SMP
114	 * system, we can use just the single lock for all channels.
115	 */
116	struct mtx		sc_mtx;
117
118	/* Stores the H/W revision read from the registers */
119	uint32_t		sc_hw_rev;
120
121	/*
122	 * Bits in the sc_active_channels data field indicate if the channel has
123	 * been activated.
124	 */
125	uint32_t		sc_active_channels;
126
127	struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
128
129};
130
131static struct ti_sdma_softc *ti_sdma_sc = NULL;
132
133/**
134 *	Macros for driver mutex locking
135 */
136#define TI_SDMA_LOCK(_sc)             mtx_lock_spin(&(_sc)->sc_mtx)
137#define TI_SDMA_UNLOCK(_sc)           mtx_unlock_spin(&(_sc)->sc_mtx)
138#define TI_SDMA_LOCK_INIT(_sc) \
139	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
140	         "ti_sdma", MTX_SPIN)
141#define TI_SDMA_LOCK_DESTROY(_sc)     mtx_destroy(&_sc->sc_mtx);
142#define TI_SDMA_ASSERT_LOCKED(_sc)    mtx_assert(&_sc->sc_mtx, MA_OWNED);
143#define TI_SDMA_ASSERT_UNLOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
144
145/**
146 *	Function prototypes
147 *
148 */
149static void ti_sdma_intr(void *);
150
151/**
152 *	ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
153 *	@sc: DMA device context
154 *	@off: The offset of a register from the DMA register address range
155 *
156 *
157 *	RETURNS:
158 *	32-bit value read from the register.
159 */
160static inline uint32_t
161ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
162{
163	return bus_read_4(sc->sc_mem_res, off);
164}
165
166/**
167 *	ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
168 *	@sc: DMA device context
169 *	@off: The offset of a register from the DMA register address range
170 *
171 *
172 *	RETURNS:
173 *	32-bit value read from the register.
174 */
175static inline void
176ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
177{
178	bus_write_4(sc->sc_mem_res, off, val);
179}
180
181/**
182 *	ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
183 *	@sc: DMA device context
184 *
185 */
186static inline int
187ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
188{
189	return (sc->sc_hw_rev == DMA4_OMAP3_REV);
190}
191
192/**
193 *	ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
194 *	@sc: DMA device context
195 *
196 */
197static inline int
198ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
199{
200	return (sc->sc_hw_rev == DMA4_OMAP4_REV);
201}
202
203/**
204 *	ti_sdma_intr - interrupt handler for all 4 DMA IRQs
205 *	@arg: ignored
206 *
207 *	Called when any of the four DMA IRQs are triggered.
208 *
209 *	LOCKING:
210 *	DMA registers protected by internal mutex
211 *
212 *	RETURNS:
213 *	nothing
214 */
215static void
216ti_sdma_intr(void *arg)
217{
218	struct ti_sdma_softc *sc = ti_sdma_sc;
219	uint32_t intr;
220	uint32_t csr;
221	unsigned int ch, j;
222	struct ti_sdma_channel* channel;
223
224	TI_SDMA_LOCK(sc);
225
226	for (j = 0; j < NUM_DMA_IRQS; j++) {
227		/* Get the flag interrupts (enabled) */
228		intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
229		intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
230		if (intr == 0x00000000)
231			continue;
232
233		/* Loop through checking the status bits */
234		for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
235			if (intr & (1 << ch)) {
236				channel = &sc->sc_channel[ch];
237
238				/* Read the CSR regsiter and verify we don't have a spurious IRQ */
239				csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
240				if (csr == 0) {
241					device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
242					              "%d\n", ch);
243					continue;
244				}
245
246				/* Sanity check this channel is active */
247				if ((sc->sc_active_channels & (1 << ch)) == 0) {
248					device_printf(sc->sc_dev, "IRQ %d for a non-activated "
249					              "channel %d\n", j, ch);
250					continue;
251				}
252
253				/* Check the status error codes */
254				if (csr & DMA4_CSR_DROP)
255					device_printf(sc->sc_dev, "Synchronization event drop "
256					              "occurred during the transfer on channel %u\n",
257								  ch);
258				if (csr & DMA4_CSR_SECURE_ERR)
259					device_printf(sc->sc_dev, "Secure transaction error event "
260					              "on channel %u\n", ch);
261				if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
262					device_printf(sc->sc_dev, "Misaligned address error event "
263					              "on channel %u\n", ch);
264				if (csr & DMA4_CSR_TRANS_ERR) {
265					device_printf(sc->sc_dev, "Transaction error event on "
266					              "channel %u\n", ch);
267					/*
268					 * Apparently according to linux code, there is an errata
269					 * that says the channel is not disabled upon this error.
270					 * They explicitly disable the channel here .. since I
271					 * haven't seen the errata, I'm going to ignore for now.
272					 */
273				}
274
275				/* Clear the status flags for the IRQ */
276				ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
277				ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
278
279				/* Call the callback for the given channel */
280				if (channel->callback)
281					channel->callback(ch, csr, channel->callback_data);
282			}
283		}
284	}
285
286	TI_SDMA_UNLOCK(sc);
287
288	return;
289}
290
291/**
292 *	ti_sdma_activate_channel - activates a DMA channel
293 *	@ch: upon return contains the channel allocated
294 *	@callback: a callback function to associate with the channel
295 *	@data: optional data supplied when the callback is called
296 *
297 *	Simply activates a channel be enabling and writing default values to the
298 *	channel's register set.  It doesn't start a transaction, just populates the
299 *	internal data structures and sets defaults.
300 *
301 *	Note this function doesn't enable interrupts, for that you need to call
302 *	ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
303 *	transfer, you can use ti_sdma_status_poll() to detect a change in the
304 *	status.
305 *
306 *	A channel must be activated before any of the other DMA functions can be
307 *	called on it.
308 *
309 *	LOCKING:
310 *	DMA registers protected by internal mutex
311 *
312 *	RETURNS:
313 *	0 on success, otherwise an error code
314 */
315int
316ti_sdma_activate_channel(unsigned int *ch,
317                          void (*callback)(unsigned int ch, uint32_t status, void *data),
318                          void *data)
319{
320	struct ti_sdma_softc *sc = ti_sdma_sc;
321	struct ti_sdma_channel *channel = NULL;
322	uint32_t addr;
323	unsigned int i;
324
325	/* Sanity check */
326	if (sc == NULL)
327		return (ENOMEM);
328
329	if (ch == NULL)
330		return (EINVAL);
331
332	TI_SDMA_LOCK(sc);
333
334	/* Check to see if all channels are in use */
335	if (sc->sc_active_channels == 0xffffffff) {
336		TI_SDMA_UNLOCK(sc);
337		return (ENOMEM);
338	}
339
340	/* Find the first non-active channel */
341	for (i = 0; i < NUM_DMA_CHANNELS; i++) {
342		if (!(sc->sc_active_channels & (0x1 << i))) {
343			sc->sc_active_channels |= (0x1 << i);
344			*ch = i;
345			break;
346		}
347	}
348
349	/* Get the channel struct and populate the fields */
350	channel = &sc->sc_channel[*ch];
351
352	channel->callback = callback;
353	channel->callback_data = data;
354
355	channel->need_reg_write = 1;
356
357	/* Set the default configuration for the DMA channel */
358	channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
359		| DMA4_CSDP_SRC_BURST_MODE(0)
360		| DMA4_CSDP_DST_BURST_MODE(0)
361		| DMA4_CSDP_SRC_ENDIANISM(0)
362		| DMA4_CSDP_DST_ENDIANISM(0)
363		| DMA4_CSDP_WRITE_MODE(0)
364		| DMA4_CSDP_SRC_PACKED(0)
365		| DMA4_CSDP_DST_PACKED(0);
366
367	channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
368		| DMA4_CCR_SRC_ADDRESS_MODE(1)
369		| DMA4_CCR_READ_PRIORITY(0)
370		| DMA4_CCR_WRITE_PRIORITY(0)
371		| DMA4_CCR_SYNC_TRIGGER(0)
372		| DMA4_CCR_FRAME_SYNC(0)
373		| DMA4_CCR_BLOCK_SYNC(0);
374
375	channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
376		| DMA4_CICR_SECURE_ERR_IE
377		| DMA4_CICR_SUPERVISOR_ERR_IE
378		| DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
379
380	/* Clear all the channel registers, this should abort any transaction */
381	for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
382		ti_sdma_write_4(sc, addr, 0x00000000);
383
384	TI_SDMA_UNLOCK(sc);
385
386	return 0;
387}
388
389/**
390 *	ti_sdma_deactivate_channel - deactivates a channel
391 *	@ch: the channel to deactivate
392 *
393 *
394 *
395 *	LOCKING:
396 *	DMA registers protected by internal mutex
397 *
398 *	RETURNS:
399 *	EH_HANDLED or EH_NOT_HANDLED
400 */
401int
402ti_sdma_deactivate_channel(unsigned int ch)
403{
404	struct ti_sdma_softc *sc = ti_sdma_sc;
405	unsigned int j;
406	unsigned int addr;
407
408	/* Sanity check */
409	if (sc == NULL)
410		return (ENOMEM);
411
412	TI_SDMA_LOCK(sc);
413
414	/* First check if the channel is currently active */
415	if ((sc->sc_active_channels & (1 << ch)) == 0) {
416		TI_SDMA_UNLOCK(sc);
417		return (EBUSY);
418	}
419
420	/* Mark the channel as inactive */
421	sc->sc_active_channels &= ~(1 << ch);
422
423	/* Disable all DMA interrupts for the channel. */
424	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
425
426	/* Make sure the DMA transfer is stopped. */
427	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
428
429	/* Clear the CSR register and IRQ status register */
430	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
431	for (j = 0; j < NUM_DMA_IRQS; j++) {
432		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
433	}
434
435	/* Clear all the channel registers, this should abort any transaction */
436	for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
437		ti_sdma_write_4(sc, addr, 0x00000000);
438
439	TI_SDMA_UNLOCK(sc);
440
441	return 0;
442}
443
444/**
445 *	ti_sdma_disable_channel_irq - disables IRQ's on the given channel
446 *	@ch: the channel to disable IRQ's on
447 *
448 *	Disable interrupt generation for the given channel.
449 *
450 *	LOCKING:
451 *	DMA registers protected by internal mutex
452 *
453 *	RETURNS:
454 *	EH_HANDLED or EH_NOT_HANDLED
455 */
456int
457ti_sdma_disable_channel_irq(unsigned int ch)
458{
459	struct ti_sdma_softc *sc = ti_sdma_sc;
460	uint32_t irq_enable;
461	unsigned int j;
462
463	/* Sanity check */
464	if (sc == NULL)
465		return (ENOMEM);
466
467	TI_SDMA_LOCK(sc);
468
469	if ((sc->sc_active_channels & (1 << ch)) == 0) {
470		TI_SDMA_UNLOCK(sc);
471		return (EINVAL);
472	}
473
474	/* Disable all the individual error conditions */
475	sc->sc_channel[ch].reg_cicr = 0x0000;
476	ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
477
478	/* Disable the channel interrupt enable */
479	for (j = 0; j < NUM_DMA_IRQS; j++) {
480		irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
481		irq_enable &= ~(1 << ch);
482
483		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
484	}
485
486	/* Indicate the registers need to be rewritten on the next transaction */
487	sc->sc_channel[ch].need_reg_write = 1;
488
489	TI_SDMA_UNLOCK(sc);
490
491	return (0);
492}
493
494/**
495 *	ti_sdma_disable_channel_irq - enables IRQ's on the given channel
496 *	@ch: the channel to enable IRQ's on
497 *	@flags: bitmask of interrupt types to enable
498 *
499 *	Flags can be a bitmask of the following options:
500 *		DMA_IRQ_FLAG_DROP
501 *		DMA_IRQ_FLAG_HALF_FRAME_COMPL
502 *		DMA_IRQ_FLAG_FRAME_COMPL
503 *		DMA_IRQ_FLAG_START_LAST_FRAME
504 *		DMA_IRQ_FLAG_BLOCK_COMPL
505 *		DMA_IRQ_FLAG_ENDOF_PKT
506 *		DMA_IRQ_FLAG_DRAIN
507 *
508 *
509 *	LOCKING:
510 *	DMA registers protected by internal mutex
511 *
512 *	RETURNS:
513 *	EH_HANDLED or EH_NOT_HANDLED
514 */
515int
516ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
517{
518	struct ti_sdma_softc *sc = ti_sdma_sc;
519	uint32_t irq_enable;
520
521	/* Sanity check */
522	if (sc == NULL)
523		return (ENOMEM);
524
525	TI_SDMA_LOCK(sc);
526
527	if ((sc->sc_active_channels & (1 << ch)) == 0) {
528		TI_SDMA_UNLOCK(sc);
529		return (EINVAL);
530	}
531
532	/* Always enable the error interrupts if we have interrupts enabled */
533	flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
534	         DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
535
536	sc->sc_channel[ch].reg_cicr = flags;
537
538	/* Write the values to the register */
539	ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
540
541	/* Enable the channel interrupt enable */
542	irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
543	irq_enable |= (1 << ch);
544
545	ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
546
547	/* Indicate the registers need to be rewritten on the next transaction */
548	sc->sc_channel[ch].need_reg_write = 1;
549
550	TI_SDMA_UNLOCK(sc);
551
552	return (0);
553}
554
555/**
556 *	ti_sdma_get_channel_status - returns the status of a given channel
557 *	@ch: the channel number to get the status of
558 *	@status: upon return will contain the status bitmask, see below for possible
559 *	         values.
560 *
561 *	      DMA_STATUS_DROP
562 *	      DMA_STATUS_HALF
563 *	      DMA_STATUS_FRAME
564 *	      DMA_STATUS_LAST
565 *	      DMA_STATUS_BLOCK
566 *	      DMA_STATUS_SYNC
567 *	      DMA_STATUS_PKT
568 *	      DMA_STATUS_TRANS_ERR
569 *	      DMA_STATUS_SECURE_ERR
570 *	      DMA_STATUS_SUPERVISOR_ERR
571 *	      DMA_STATUS_MISALIGNED_ADRS_ERR
572 *	      DMA_STATUS_DRAIN_END
573 *
574 *
575 *	LOCKING:
576 *	DMA registers protected by internal mutex
577 *
578 *	RETURNS:
579 *	EH_HANDLED or EH_NOT_HANDLED
580 */
581int
582ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
583{
584	struct ti_sdma_softc *sc = ti_sdma_sc;
585	uint32_t csr;
586
587	/* Sanity check */
588	if (sc == NULL)
589		return (ENOMEM);
590
591	TI_SDMA_LOCK(sc);
592
593	if ((sc->sc_active_channels & (1 << ch)) == 0) {
594		TI_SDMA_UNLOCK(sc);
595		return (EINVAL);
596	}
597
598	TI_SDMA_UNLOCK(sc);
599
600	csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
601
602	if (status != NULL)
603		*status = csr;
604
605	return (0);
606}
607
608/**
609 *	ti_sdma_start_xfer - starts a DMA transfer
610 *	@ch: the channel number to set the endianness of
611 *	@src_paddr: the source phsyical address
612 *	@dst_paddr: the destination phsyical address
613 *	@frmcnt: the number of frames per block
614 *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
615 *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
616 *
617 *
618 *	LOCKING:
619 *	DMA registers protected by internal mutex
620 *
621 *	RETURNS:
622 *	EH_HANDLED or EH_NOT_HANDLED
623 */
624int
625ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
626                    unsigned long dst_paddr,
627                    unsigned int frmcnt, unsigned int elmcnt)
628{
629	struct ti_sdma_softc *sc = ti_sdma_sc;
630	struct ti_sdma_channel *channel;
631	uint32_t ccr;
632
633	/* Sanity check */
634	if (sc == NULL)
635		return (ENOMEM);
636
637	TI_SDMA_LOCK(sc);
638
639	if ((sc->sc_active_channels & (1 << ch)) == 0) {
640		TI_SDMA_UNLOCK(sc);
641		return (EINVAL);
642	}
643
644	channel = &sc->sc_channel[ch];
645
646	/* a) Write the CSDP register */
647	ti_sdma_write_4(sc, DMA4_CSDP(ch),
648	    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
649
650	/* b) Set the number of element per frame CEN[23:0] */
651	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
652
653	/* c) Set the number of frame per block CFN[15:0] */
654	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
655
656	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
657	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
658	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
659
660	/* e) Write the CCR register */
661	ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
662
663	/* f)  - Set the source element index increment CSEI[15:0] */
664	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
665
666	/*     - Set the source frame index increment CSFI[15:0] */
667	ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
668
669	/*     - Set the destination element index increment CDEI[15:0]*/
670	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
671
672	/* - Set the destination frame index increment CDFI[31:0] */
673	ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
674
675	/* Clear the status register */
676	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
677
678	/* Write the start-bit and away we go */
679	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
680	ccr |= (1 << 7);
681	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
682
683	/* Clear the reg write flag */
684	channel->need_reg_write = 0;
685
686	TI_SDMA_UNLOCK(sc);
687
688	return (0);
689}
690
691/**
692 *	ti_sdma_start_xfer_packet - starts a packet DMA transfer
693 *	@ch: the channel number to use for the transfer
694 *	@src_paddr: the source physical address
695 *	@dst_paddr: the destination physical address
696 *	@frmcnt: the number of frames to transfer
697 *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
698 *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
699 *	@pktsize: the number of elements in each transfer packet
700 *
701 *	The @frmcnt and @elmcnt define the overall number of bytes to transfer,
702 *	typically @frmcnt is 1 and @elmcnt contains the total number of elements.
703 *	@pktsize is the size of each individual packet, there might be multiple
704 *	packets per transfer.  i.e. for the following with element size of 32-bits
705 *
706 *		frmcnt = 1, elmcnt = 512, pktsize = 128
707 *
708 *	       Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
709 *	       Packets transferred   = 128 / 512 = 4
710 *
711 *
712 *	LOCKING:
713 *	DMA registers protected by internal mutex
714 *
715 *	RETURNS:
716 *	EH_HANDLED or EH_NOT_HANDLED
717 */
718int
719ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
720                           unsigned long dst_paddr, unsigned int frmcnt,
721                           unsigned int elmcnt, unsigned int pktsize)
722{
723	struct ti_sdma_softc *sc = ti_sdma_sc;
724	struct ti_sdma_channel *channel;
725	uint32_t ccr;
726
727	/* Sanity check */
728	if (sc == NULL)
729		return (ENOMEM);
730
731	TI_SDMA_LOCK(sc);
732
733	if ((sc->sc_active_channels & (1 << ch)) == 0) {
734		TI_SDMA_UNLOCK(sc);
735		return (EINVAL);
736	}
737
738	channel = &sc->sc_channel[ch];
739
740	/* a) Write the CSDP register */
741	if (channel->need_reg_write)
742		ti_sdma_write_4(sc, DMA4_CSDP(ch),
743		    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
744
745	/* b) Set the number of elements to transfer CEN[23:0] */
746	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
747
748	/* c) Set the number of frames to transfer CFN[15:0] */
749	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
750
751	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
752	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
753	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
754
755	/* e) Write the CCR register */
756	ti_sdma_write_4(sc, DMA4_CCR(ch),
757	    channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
758
759	/* f)  - Set the source element index increment CSEI[15:0] */
760	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
761
762	/*     - Set the packet size, this is dependent on the sync source */
763	if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
764		ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
765	else
766		ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
767
768	/* - Set the destination frame index increment CDFI[31:0] */
769	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
770
771	/* Clear the status register */
772	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
773
774	/* Write the start-bit and away we go */
775	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
776	ccr |= (1 << 7);
777	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
778
779	/* Clear the reg write flag */
780	channel->need_reg_write = 0;
781
782	TI_SDMA_UNLOCK(sc);
783
784	return (0);
785}
786
787/**
788 *	ti_sdma_stop_xfer - stops any currently active transfers
789 *	@ch: the channel number to set the endianness of
790 *
791 *	This function call is effectively a NOP if no transaction is in progress.
792 *
793 *	LOCKING:
794 *	DMA registers protected by internal mutex
795 *
796 *	RETURNS:
797 *	EH_HANDLED or EH_NOT_HANDLED
798 */
799int
800ti_sdma_stop_xfer(unsigned int ch)
801{
802	struct ti_sdma_softc *sc = ti_sdma_sc;
803	unsigned int j;
804
805	/* Sanity check */
806	if (sc == NULL)
807		return (ENOMEM);
808
809	TI_SDMA_LOCK(sc);
810
811	if ((sc->sc_active_channels & (1 << ch)) == 0) {
812		TI_SDMA_UNLOCK(sc);
813		return (EINVAL);
814	}
815
816	/* Disable all DMA interrupts for the channel. */
817	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
818
819	/* Make sure the DMA transfer is stopped. */
820	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
821
822	/* Clear the CSR register and IRQ status register */
823	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
824	for (j = 0; j < NUM_DMA_IRQS; j++) {
825		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
826	}
827
828	/* Configuration registers need to be re-written on the next xfer */
829	sc->sc_channel[ch].need_reg_write = 1;
830
831	TI_SDMA_UNLOCK(sc);
832
833	return (0);
834}
835
836/**
837 *	ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
838 *	@ch: the channel number to set the endianness of
839 *	@src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
840 *	@dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
841 *
842 *
843 *	LOCKING:
844 *	DMA registers protected by internal mutex
845 *
846 *	RETURNS:
847 *	EH_HANDLED or EH_NOT_HANDLED
848 */
849int
850ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
851{
852	struct ti_sdma_softc *sc = ti_sdma_sc;
853
854	/* Sanity check */
855	if (sc == NULL)
856		return (ENOMEM);
857
858	TI_SDMA_LOCK(sc);
859
860	if ((sc->sc_active_channels & (1 << ch)) == 0) {
861		TI_SDMA_UNLOCK(sc);
862		return (EINVAL);
863	}
864
865	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
866	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
867
868	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
869	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
870
871	sc->sc_channel[ch].need_reg_write = 1;
872
873	TI_SDMA_UNLOCK(sc);
874
875	return 0;
876}
877
878/**
879 *	ti_sdma_set_xfer_burst - sets the source and destination element size
880 *	@ch: the channel number to set the burst settings of
881 *	@src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
882 *	      or DMA_BURST_64)
883 *	@dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
884 *	      DMA_BURST_32 or DMA_BURST_64)
885 *
886 *	This function sets the size of the elements for all subsequent transfers.
887 *
888 *	LOCKING:
889 *	DMA registers protected by internal mutex
890 *
891 *	RETURNS:
892 *	EH_HANDLED or EH_NOT_HANDLED
893 */
894int
895ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
896{
897	struct ti_sdma_softc *sc = ti_sdma_sc;
898
899	/* Sanity check */
900	if (sc == NULL)
901		return (ENOMEM);
902
903	TI_SDMA_LOCK(sc);
904
905	if ((sc->sc_active_channels & (1 << ch)) == 0) {
906		TI_SDMA_UNLOCK(sc);
907		return (EINVAL);
908	}
909
910	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
911	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
912
913	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
914	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
915
916	sc->sc_channel[ch].need_reg_write = 1;
917
918	TI_SDMA_UNLOCK(sc);
919
920	return 0;
921}
922
923/**
924 *	ti_sdma_set_xfer_data_type - driver attach function
925 *	@ch: the channel number to set the endianness of
926 *	@type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
927 *	       or DMA_DATA_32BITS_SCALAR)
928 *
929 *
930 *	LOCKING:
931 *	DMA registers protected by internal mutex
932 *
933 *	RETURNS:
934 *	EH_HANDLED or EH_NOT_HANDLED
935 */
936int
937ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
938{
939	struct ti_sdma_softc *sc = ti_sdma_sc;
940
941	/* Sanity check */
942	if (sc == NULL)
943		return (ENOMEM);
944
945	TI_SDMA_LOCK(sc);
946
947	if ((sc->sc_active_channels & (1 << ch)) == 0) {
948		TI_SDMA_UNLOCK(sc);
949		return (EINVAL);
950	}
951
952	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
953	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
954
955	sc->sc_channel[ch].need_reg_write = 1;
956
957	TI_SDMA_UNLOCK(sc);
958
959	return 0;
960}
961
962/**
963 *	ti_sdma_set_callback - driver attach function
964 *	@dev: dma device handle
965 *
966 *
967 *
968 *	LOCKING:
969 *	DMA registers protected by internal mutex
970 *
971 *	RETURNS:
972 *	EH_HANDLED or EH_NOT_HANDLED
973 */
974int
975ti_sdma_set_callback(unsigned int ch,
976                      void (*callback)(unsigned int ch, uint32_t status, void *data),
977                      void *data)
978{
979	struct ti_sdma_softc *sc = ti_sdma_sc;
980
981	/* Sanity check */
982	if (sc == NULL)
983		return (ENOMEM);
984
985	TI_SDMA_LOCK(sc);
986
987	if ((sc->sc_active_channels & (1 << ch)) == 0) {
988		TI_SDMA_UNLOCK(sc);
989		return (EINVAL);
990	}
991
992	sc->sc_channel[ch].callback = callback;
993	sc->sc_channel[ch].callback_data = data;
994
995	sc->sc_channel[ch].need_reg_write = 1;
996
997	TI_SDMA_UNLOCK(sc);
998
999	return 0;
1000}
1001
1002/**
1003 *	ti_sdma_sync_params - sets channel sync settings
1004 *	@ch: the channel number to set the sync on
1005 *	@trigger: the number of the sync trigger, this depends on what other H/W
1006 *	          module is triggering/receiving the DMA transactions
1007 *	@mode: flags describing the sync mode to use, it may have one or more of
1008 *	          the following bits set; TI_SDMA_SYNC_FRAME,
1009 *	          TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1010 *
1011 *
1012 *
1013 *	LOCKING:
1014 *	DMA registers protected by internal mutex
1015 *
1016 *	RETURNS:
1017 *	EH_HANDLED or EH_NOT_HANDLED
1018 */
1019int
1020ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1021{
1022	struct ti_sdma_softc *sc = ti_sdma_sc;
1023	uint32_t ccr;
1024
1025	/* Sanity check */
1026	if (sc == NULL)
1027		return (ENOMEM);
1028
1029	TI_SDMA_LOCK(sc);
1030
1031	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1032		TI_SDMA_UNLOCK(sc);
1033		return (EINVAL);
1034	}
1035
1036	ccr = sc->sc_channel[ch].reg_ccr;
1037
1038	ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1039	ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1040
1041	if (mode & TI_SDMA_SYNC_FRAME)
1042		ccr |= DMA4_CCR_FRAME_SYNC(1);
1043	else
1044		ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1045
1046	if (mode & TI_SDMA_SYNC_BLOCK)
1047		ccr |= DMA4_CCR_BLOCK_SYNC(1);
1048	else
1049		ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1050
1051	if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1052		ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1053	else
1054		ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1055
1056	sc->sc_channel[ch].reg_ccr = ccr;
1057
1058	sc->sc_channel[ch].need_reg_write = 1;
1059
1060	TI_SDMA_UNLOCK(sc);
1061
1062	return 0;
1063}
1064
1065/**
1066 *	ti_sdma_set_addr_mode - driver attach function
1067 *	@ch: the channel number to set the endianness of
1068 *	@rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1069 *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1070 *	          DMA_ADDR_DOUBLE_INDEX)
1071 *	@wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1072 *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1073 *	          DMA_ADDR_DOUBLE_INDEX)
1074 *
1075 *
1076 *	LOCKING:
1077 *	DMA registers protected by internal mutex
1078 *
1079 *	RETURNS:
1080 *	EH_HANDLED or EH_NOT_HANDLED
1081 */
1082int
1083ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1084                       unsigned int dst_mode)
1085{
1086	struct ti_sdma_softc *sc = ti_sdma_sc;
1087	uint32_t ccr;
1088
1089	/* Sanity check */
1090	if (sc == NULL)
1091		return (ENOMEM);
1092
1093	TI_SDMA_LOCK(sc);
1094
1095	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1096		TI_SDMA_UNLOCK(sc);
1097		return (EINVAL);
1098	}
1099
1100	ccr = sc->sc_channel[ch].reg_ccr;
1101
1102	ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1103	ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1104
1105	ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1106	ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1107
1108	sc->sc_channel[ch].reg_ccr = ccr;
1109
1110	sc->sc_channel[ch].need_reg_write = 1;
1111
1112	TI_SDMA_UNLOCK(sc);
1113
1114	return 0;
1115}
1116
1117/**
1118 *	ti_sdma_probe - driver probe function
1119 *	@dev: dma device handle
1120 *
1121 *
1122 *
1123 *	RETURNS:
1124 *	Always returns 0.
1125 */
1126static int
1127ti_sdma_probe(device_t dev)
1128{
1129
1130	if (!ofw_bus_status_okay(dev))
1131		return (ENXIO);
1132
1133	if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1134		return (ENXIO);
1135
1136	device_set_desc(dev, "TI sDMA Controller");
1137	return (0);
1138}
1139
1140/**
1141 *	ti_sdma_attach - driver attach function
1142 *	@dev: dma device handle
1143 *
1144 *	Initialises memory mapping/pointers to the DMA register set and requests
1145 *	IRQs. This is effectively the setup function for the driver.
1146 *
1147 *	RETURNS:
1148 *	0 on success or a negative error code failure.
1149 */
1150static int
1151ti_sdma_attach(device_t dev)
1152{
1153	struct ti_sdma_softc *sc = device_get_softc(dev);
1154	unsigned int timeout;
1155	unsigned int i;
1156	int      rid;
1157	void    *ihl;
1158	int      err;
1159
1160	/* Setup the basics */
1161	sc->sc_dev = dev;
1162
1163	/* No channels active at the moment */
1164	sc->sc_active_channels = 0x00000000;
1165
1166	/* Mutex to protect the shared data structures */
1167	TI_SDMA_LOCK_INIT(sc);
1168
1169	/* Get the memory resource for the register mapping */
1170	rid = 0;
1171	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1172	if (sc->sc_mem_res == NULL)
1173		panic("%s: Cannot map registers", device_get_name(dev));
1174
1175	/* Enable the interface and functional clocks */
1176	ti_sysc_clock_enable(device_get_parent(dev));
1177
1178	/* Read the sDMA revision register and sanity check it's known */
1179	sc->sc_hw_rev = ti_sdma_read_4(sc,
1180	    ti_sysc_get_rev_address_offset_host(device_get_parent(dev)));
1181	device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1182
1183	if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1184		device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1185		return (EINVAL);
1186	}
1187
1188	/* Disable all interrupts */
1189	for (i = 0; i < NUM_DMA_IRQS; i++) {
1190		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1191	}
1192
1193	/* Soft-reset is only supported on pre-OMAP44xx devices */
1194	if (ti_sdma_is_omap3_rev(sc)) {
1195		/* Soft-reset */
1196		ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1197
1198		/* Set the timeout to 100ms*/
1199		timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1200
1201		/* Wait for DMA reset to complete */
1202		while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1203			/* Sleep for a tick */
1204			pause("DMARESET", 1);
1205
1206			if (timeout-- == 0) {
1207				device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1208				return (EINVAL);
1209			}
1210		}
1211	}
1212
1213	/*
1214	 * Install interrupt handlers for the for possible interrupts. Any channel
1215	 * can trip one of the four IRQs
1216	 */
1217	rid = 0;
1218	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1219	    RF_ACTIVE | RF_SHAREABLE);
1220	if (sc->sc_irq_res == NULL)
1221		panic("Unable to setup the dma irq handler.\n");
1222
1223	err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1224	    NULL, ti_sdma_intr, NULL, &ihl);
1225	if (err)
1226		panic("%s: Cannot register IRQ", device_get_name(dev));
1227
1228	/* Store the DMA structure globally ... this driver should never be unloaded */
1229	ti_sdma_sc = sc;
1230
1231	return (0);
1232}
1233
1234static device_method_t ti_sdma_methods[] = {
1235	DEVMETHOD(device_probe, ti_sdma_probe),
1236	DEVMETHOD(device_attach, ti_sdma_attach),
1237	{0, 0},
1238};
1239
1240static driver_t ti_sdma_driver = {
1241	"ti_sdma",
1242	ti_sdma_methods,
1243	sizeof(struct ti_sdma_softc),
1244};
1245static devclass_t ti_sdma_devclass;
1246
1247DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, ti_sdma_devclass, 0, 0);
1248MODULE_DEPEND(ti_sdma, ti_sysc, 1, 1, 1);
1249