1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mips Jazz DMA controller support
4 * Copyright (C) 1995, 1996 by Andreas Busse
5 *
6 * NOTE: Some of the argument checking could be removed when
7 * things have settled down. Also, instead of returning 0xffffffff
8 * on failure of vdma_alloc() one could leave page #0 unused
9 * and return the more usual NULL pointer as logical address.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16#include <linux/memblock.h>
17#include <linux/spinlock.h>
18#include <linux/gfp.h>
19#include <linux/dma-map-ops.h>
20#include <asm/mipsregs.h>
21#include <asm/jazz.h>
22#include <asm/io.h>
23#include <linux/uaccess.h>
24#include <asm/dma.h>
25#include <asm/jazzdma.h>
26
27/*
28 * Set this to one to enable additional vdma debug code.
29 */
30#define CONF_DEBUG_VDMA 0
31
32static VDMA_PGTBL_ENTRY *pgtbl;
33
34static DEFINE_SPINLOCK(vdma_lock);
35
36/*
37 * Debug stuff
38 */
39#define vdma_debug     ((CONF_DEBUG_VDMA) ? debuglvl : 0)
40
41static int debuglvl = 3;
42
43/*
44 * Initialize the pagetable with a one-to-one mapping of
45 * the first 16 Mbytes of main memory and declare all
46 * entries to be unused. Using this method will at least
47 * allow some early device driver operations to work.
48 */
49static inline void vdma_pgtbl_init(void)
50{
51	unsigned long paddr = 0;
52	int i;
53
54	for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
55		pgtbl[i].frame = paddr;
56		pgtbl[i].owner = VDMA_PAGE_EMPTY;
57		paddr += VDMA_PAGESIZE;
58	}
59}
60
61/*
62 * Initialize the Jazz R4030 dma controller
63 */
64static int __init vdma_init(void)
65{
66	/*
67	 * Allocate 32k of memory for DMA page tables.	This needs to be page
68	 * aligned and should be uncached to avoid cache flushing after every
69	 * update.
70	 */
71	pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
72						    get_order(VDMA_PGTBL_SIZE));
73	BUG_ON(!pgtbl);
74	dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
75	pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
76
77	/*
78	 * Clear the R4030 translation table
79	 */
80	vdma_pgtbl_init();
81
82	r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
83			  CPHYSADDR((unsigned long)pgtbl));
84	r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
85	r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
86
87	printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
88	return 0;
89}
90arch_initcall(vdma_init);
91
92/*
93 * Allocate DMA pagetables using a simple first-fit algorithm
94 */
95unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
96{
97	int first, last, pages, frame, i;
98	unsigned long laddr, flags;
99
100	/* check arguments */
101
102	if (paddr > 0x1fffffff) {
103		if (vdma_debug)
104			printk("vdma_alloc: Invalid physical address: %08lx\n",
105			       paddr);
106		return DMA_MAPPING_ERROR;	/* invalid physical address */
107	}
108	if (size > 0x400000 || size == 0) {
109		if (vdma_debug)
110			printk("vdma_alloc: Invalid size: %08lx\n", size);
111		return DMA_MAPPING_ERROR;	/* invalid physical address */
112	}
113
114	spin_lock_irqsave(&vdma_lock, flags);
115	/*
116	 * Find free chunk
117	 */
118	pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
119	first = 0;
120	while (1) {
121		while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
122		       first < VDMA_PGTBL_ENTRIES) first++;
123		if (first + pages > VDMA_PGTBL_ENTRIES) {	/* nothing free */
124			spin_unlock_irqrestore(&vdma_lock, flags);
125			return DMA_MAPPING_ERROR;
126		}
127
128		last = first + 1;
129		while (pgtbl[last].owner == VDMA_PAGE_EMPTY
130		       && last - first < pages)
131			last++;
132
133		if (last - first == pages)
134			break;	/* found */
135		first = last + 1;
136	}
137
138	/*
139	 * Mark pages as allocated
140	 */
141	laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
142	frame = paddr & ~(VDMA_PAGESIZE - 1);
143
144	for (i = first; i < last; i++) {
145		pgtbl[i].frame = frame;
146		pgtbl[i].owner = laddr;
147		frame += VDMA_PAGESIZE;
148	}
149
150	/*
151	 * Update translation table and return logical start address
152	 */
153	r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
154
155	if (vdma_debug > 1)
156		printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
157		     pages, laddr);
158
159	if (vdma_debug > 2) {
160		printk("LADDR: ");
161		for (i = first; i < last; i++)
162			printk("%08x ", i << 12);
163		printk("\nPADDR: ");
164		for (i = first; i < last; i++)
165			printk("%08x ", pgtbl[i].frame);
166		printk("\nOWNER: ");
167		for (i = first; i < last; i++)
168			printk("%08x ", pgtbl[i].owner);
169		printk("\n");
170	}
171
172	spin_unlock_irqrestore(&vdma_lock, flags);
173
174	return laddr;
175}
176
177EXPORT_SYMBOL(vdma_alloc);
178
179/*
180 * Free previously allocated dma translation pages
181 * Note that this does NOT change the translation table,
182 * it just marks the free'd pages as unused!
183 */
184int vdma_free(unsigned long laddr)
185{
186	int i;
187
188	i = laddr >> 12;
189
190	if (pgtbl[i].owner != laddr) {
191		printk
192		    ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
193		     laddr);
194		return -1;
195	}
196
197	while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
198		pgtbl[i].owner = VDMA_PAGE_EMPTY;
199		i++;
200	}
201
202	if (vdma_debug > 1)
203		printk("vdma_free: freed %ld pages starting from %08lx\n",
204		       i - (laddr >> 12), laddr);
205
206	return 0;
207}
208
209EXPORT_SYMBOL(vdma_free);
210
211/*
212 * Translate a physical address to a logical address.
213 * This will return the logical address of the first
214 * match.
215 */
216unsigned long vdma_phys2log(unsigned long paddr)
217{
218	int i;
219	int frame;
220
221	frame = paddr & ~(VDMA_PAGESIZE - 1);
222
223	for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
224		if (pgtbl[i].frame == frame)
225			break;
226	}
227
228	if (i == VDMA_PGTBL_ENTRIES)
229		return ~0UL;
230
231	return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
232}
233
234EXPORT_SYMBOL(vdma_phys2log);
235
236/*
237 * Translate a logical DMA address to a physical address
238 */
239unsigned long vdma_log2phys(unsigned long laddr)
240{
241	return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
242}
243
244EXPORT_SYMBOL(vdma_log2phys);
245
246/*
247 * Print DMA statistics
248 */
249void vdma_stats(void)
250{
251	int i;
252
253	printk("vdma_stats: CONFIG: %08x\n",
254	       r4030_read_reg32(JAZZ_R4030_CONFIG));
255	printk("R4030 translation table base: %08x\n",
256	       r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
257	printk("R4030 translation table limit: %08x\n",
258	       r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
259	printk("vdma_stats: INV_ADDR: %08x\n",
260	       r4030_read_reg32(JAZZ_R4030_INV_ADDR));
261	printk("vdma_stats: R_FAIL_ADDR: %08x\n",
262	       r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
263	printk("vdma_stats: M_FAIL_ADDR: %08x\n",
264	       r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
265	printk("vdma_stats: IRQ_SOURCE: %08x\n",
266	       r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
267	printk("vdma_stats: I386_ERROR: %08x\n",
268	       r4030_read_reg32(JAZZ_R4030_I386_ERROR));
269	printk("vdma_chnl_modes:   ");
270	for (i = 0; i < 8; i++)
271		printk("%04x ",
272		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
273						   (i << 5)));
274	printk("\n");
275	printk("vdma_chnl_enables: ");
276	for (i = 0; i < 8; i++)
277		printk("%04x ",
278		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
279						   (i << 5)));
280	printk("\n");
281}
282
283/*
284 * DMA transfer functions
285 */
286
287/*
288 * Enable a DMA channel. Also clear any error conditions.
289 */
290void vdma_enable(int channel)
291{
292	int status;
293
294	if (vdma_debug)
295		printk("vdma_enable: channel %d\n", channel);
296
297	/*
298	 * Check error conditions first
299	 */
300	status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
301	if (status & 0x400)
302		printk("VDMA: Channel %d: Address error!\n", channel);
303	if (status & 0x200)
304		printk("VDMA: Channel %d: Memory error!\n", channel);
305
306	/*
307	 * Clear all interrupt flags
308	 */
309	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
310			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
311					   (channel << 5)) | R4030_TC_INTR
312			  | R4030_MEM_INTR | R4030_ADDR_INTR);
313
314	/*
315	 * Enable the desired channel
316	 */
317	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
318			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
319					   (channel << 5)) |
320			  R4030_CHNL_ENABLE);
321}
322
323EXPORT_SYMBOL(vdma_enable);
324
325/*
326 * Disable a DMA channel
327 */
328void vdma_disable(int channel)
329{
330	if (vdma_debug) {
331		int status =
332		    r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
333				     (channel << 5));
334
335		printk("vdma_disable: channel %d\n", channel);
336		printk("VDMA: channel %d status: %04x (%s) mode: "
337		       "%02x addr: %06x count: %06x\n",
338		       channel, status,
339		       ((status & 0x600) ? "ERROR" : "OK"),
340		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
341						   (channel << 5)),
342		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
343						   (channel << 5)),
344		       (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
345						   (channel << 5)));
346	}
347
348	r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
349			  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
350					   (channel << 5)) &
351			  ~R4030_CHNL_ENABLE);
352
353	/*
354	 * After disabling a DMA channel a remote bus register should be
355	 * read to ensure that the current DMA acknowledge cycle is completed.
356	 */
357	*((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
358}
359
360EXPORT_SYMBOL(vdma_disable);
361
362/*
363 * Set DMA mode. This function accepts the mode values used
364 * to set a PC-style DMA controller. For the SCSI and FDC
365 * channels, we also set the default modes each time we're
366 * called.
367 * NOTE: The FAST and BURST dma modes are supported by the
368 * R4030 Rev. 2 and PICA chipsets only. I leave them disabled
369 * for now.
370 */
371void vdma_set_mode(int channel, int mode)
372{
373	if (vdma_debug)
374		printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
375		       mode);
376
377	switch (channel) {
378	case JAZZ_SCSI_DMA:	/* scsi */
379		r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
380/*			  R4030_MODE_FAST | */
381/*			  R4030_MODE_BURST | */
382				  R4030_MODE_INTR_EN |
383				  R4030_MODE_WIDTH_16 |
384				  R4030_MODE_ATIME_80);
385		break;
386
387	case JAZZ_FLOPPY_DMA:	/* floppy */
388		r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
389/*			  R4030_MODE_FAST | */
390/*			  R4030_MODE_BURST | */
391				  R4030_MODE_INTR_EN |
392				  R4030_MODE_WIDTH_8 |
393				  R4030_MODE_ATIME_120);
394		break;
395
396	case JAZZ_AUDIOL_DMA:
397	case JAZZ_AUDIOR_DMA:
398		printk("VDMA: Audio DMA not supported yet.\n");
399		break;
400
401	default:
402		printk
403		    ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
404		     channel);
405	}
406
407	switch (mode) {
408	case DMA_MODE_READ:
409		r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
410				  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
411						   (channel << 5)) &
412				  ~R4030_CHNL_WRITE);
413		break;
414
415	case DMA_MODE_WRITE:
416		r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
417				  r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
418						   (channel << 5)) |
419				  R4030_CHNL_WRITE);
420		break;
421
422	default:
423		printk
424		    ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
425		     mode);
426	}
427}
428
429EXPORT_SYMBOL(vdma_set_mode);
430
431/*
432 * Set Transfer Address
433 */
434void vdma_set_addr(int channel, long addr)
435{
436	if (vdma_debug)
437		printk("vdma_set_addr: channel %d, addr %lx\n", channel,
438		       addr);
439
440	r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
441}
442
443EXPORT_SYMBOL(vdma_set_addr);
444
445/*
446 * Set Transfer Count
447 */
448void vdma_set_count(int channel, int count)
449{
450	if (vdma_debug)
451		printk("vdma_set_count: channel %d, count %08x\n", channel,
452		       (unsigned) count);
453
454	r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
455}
456
457EXPORT_SYMBOL(vdma_set_count);
458
459/*
460 * Get Residual
461 */
462int vdma_get_residue(int channel)
463{
464	int residual;
465
466	residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
467
468	if (vdma_debug)
469		printk("vdma_get_residual: channel %d: residual=%d\n",
470		       channel, residual);
471
472	return residual;
473}
474
475/*
476 * Get DMA channel enable register
477 */
478int vdma_get_enable(int channel)
479{
480	int enable;
481
482	enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
483
484	if (vdma_debug)
485		printk("vdma_get_enable: channel %d: enable=%d\n", channel,
486		       enable);
487
488	return enable;
489}
490
491static void *jazz_dma_alloc(struct device *dev, size_t size,
492		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
493{
494	struct page *page;
495	void *ret;
496
497	if (attrs & DMA_ATTR_NO_WARN)
498		gfp |= __GFP_NOWARN;
499
500	size = PAGE_ALIGN(size);
501	page = alloc_pages(gfp, get_order(size));
502	if (!page)
503		return NULL;
504	ret = page_address(page);
505	memset(ret, 0, size);
506	*dma_handle = vdma_alloc(virt_to_phys(ret), size);
507	if (*dma_handle == DMA_MAPPING_ERROR)
508		goto out_free_pages;
509	arch_dma_prep_coherent(page, size);
510	return (void *)(UNCAC_BASE + __pa(ret));
511
512out_free_pages:
513	__free_pages(page, get_order(size));
514	return NULL;
515}
516
517static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
518		dma_addr_t dma_handle, unsigned long attrs)
519{
520	vdma_free(dma_handle);
521	__free_pages(virt_to_page(vaddr), get_order(size));
522}
523
524static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
525		unsigned long offset, size_t size, enum dma_data_direction dir,
526		unsigned long attrs)
527{
528	phys_addr_t phys = page_to_phys(page) + offset;
529
530	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
531		arch_sync_dma_for_device(phys, size, dir);
532	return vdma_alloc(phys, size);
533}
534
535static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
536		size_t size, enum dma_data_direction dir, unsigned long attrs)
537{
538	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
539		arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
540	vdma_free(dma_addr);
541}
542
543static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
544		int nents, enum dma_data_direction dir, unsigned long attrs)
545{
546	int i;
547	struct scatterlist *sg;
548
549	for_each_sg(sglist, sg, nents, i) {
550		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
551			arch_sync_dma_for_device(sg_phys(sg), sg->length,
552				dir);
553		sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
554		if (sg->dma_address == DMA_MAPPING_ERROR)
555			return -EIO;
556		sg_dma_len(sg) = sg->length;
557	}
558
559	return nents;
560}
561
562static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
563		int nents, enum dma_data_direction dir, unsigned long attrs)
564{
565	int i;
566	struct scatterlist *sg;
567
568	for_each_sg(sglist, sg, nents, i) {
569		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
570			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
571		vdma_free(sg->dma_address);
572	}
573}
574
575static void jazz_dma_sync_single_for_device(struct device *dev,
576		dma_addr_t addr, size_t size, enum dma_data_direction dir)
577{
578	arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
579}
580
581static void jazz_dma_sync_single_for_cpu(struct device *dev,
582		dma_addr_t addr, size_t size, enum dma_data_direction dir)
583{
584	arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
585}
586
587static void jazz_dma_sync_sg_for_device(struct device *dev,
588		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
589{
590	struct scatterlist *sg;
591	int i;
592
593	for_each_sg(sgl, sg, nents, i)
594		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
595}
596
597static void jazz_dma_sync_sg_for_cpu(struct device *dev,
598		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
599{
600	struct scatterlist *sg;
601	int i;
602
603	for_each_sg(sgl, sg, nents, i)
604		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
605}
606
607const struct dma_map_ops jazz_dma_ops = {
608	.alloc			= jazz_dma_alloc,
609	.free			= jazz_dma_free,
610	.map_page		= jazz_dma_map_page,
611	.unmap_page		= jazz_dma_unmap_page,
612	.map_sg			= jazz_dma_map_sg,
613	.unmap_sg		= jazz_dma_unmap_sg,
614	.sync_single_for_cpu	= jazz_dma_sync_single_for_cpu,
615	.sync_single_for_device	= jazz_dma_sync_single_for_device,
616	.sync_sg_for_cpu	= jazz_dma_sync_sg_for_cpu,
617	.sync_sg_for_device	= jazz_dma_sync_sg_for_device,
618	.mmap			= dma_common_mmap,
619	.get_sgtable		= dma_common_get_sgtable,
620	.alloc_pages		= dma_common_alloc_pages,
621	.free_pages		= dma_common_free_pages,
622};
623EXPORT_SYMBOL(jazz_dma_ops);
624