1/* 2 * bfin_dma_5xx.c - Blackfin DMA implementation 3 * 4 * Copyright 2004-2008 Analog Devices Inc. 5 * 6 * Licensed under the GPL-2 or later. 7 */ 8 9#include <linux/errno.h> 10#include <linux/interrupt.h> 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/param.h> 14#include <linux/proc_fs.h> 15#include <linux/sched.h> 16#include <linux/seq_file.h> 17#include <linux/spinlock.h> 18 19#include <asm/blackfin.h> 20#include <asm/cacheflush.h> 21#include <asm/dma.h> 22#include <asm/uaccess.h> 23#include <asm/early_printk.h> 24 25 26struct dma_channel dma_ch[MAX_DMA_CHANNELS]; 27EXPORT_SYMBOL(dma_ch); 28 29static int __init blackfin_dma_init(void) 30{ 31 int i; 32 33 printk(KERN_INFO "Blackfin DMA Controller\n"); 34 35 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 36 atomic_set(&dma_ch[i].chan_status, 0); 37 dma_ch[i].regs = dma_io_base_addr[i]; 38 } 39 /* Mark MEMDMA Channel 0 as requested since we're using it internally */ 40 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); 41 request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy"); 42 43#if defined(CONFIG_DEB_DMA_URGENT) 44 bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() 45 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); 46#endif 47 48 return 0; 49} 50arch_initcall(blackfin_dma_init); 51 52#ifdef CONFIG_PROC_FS 53static int proc_dma_show(struct seq_file *m, void *v) 54{ 55 int i; 56 57 for (i = 0; i < MAX_DMA_CHANNELS; ++i) 58 if (dma_channel_active(i)) 59 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); 60 61 return 0; 62} 63 64static int proc_dma_open(struct inode *inode, struct file *file) 65{ 66 return single_open(file, proc_dma_show, NULL); 67} 68 69static const struct file_operations proc_dma_operations = { 70 .open = proc_dma_open, 71 .read = seq_read, 72 .llseek = seq_lseek, 73 .release = single_release, 74}; 75 76static int __init proc_dma_init(void) 77{ 78 return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL; 79} 80late_initcall(proc_dma_init); 81#endif 82 83/** 84 * request_dma - request a DMA channel 85 * 86 * Request the specific DMA channel from the system if it's available. 87 */ 88int request_dma(unsigned int channel, const char *device_id) 89{ 90 pr_debug("request_dma() : BEGIN\n"); 91 92 if (device_id == NULL) 93 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); 94 95#if defined(CONFIG_BF561) && ANOMALY_05000182 96 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { 97 if (get_cclk() > 500000000) { 98 printk(KERN_WARNING 99 "Request IMDMA failed due to ANOMALY 05000182\n"); 100 return -EFAULT; 101 } 102 } 103#endif 104 105 if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { 106 pr_debug("DMA CHANNEL IN USE\n"); 107 return -EBUSY; 108 } 109 110#ifdef CONFIG_BF54x 111 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { 112 unsigned int per_map; 113 per_map = dma_ch[channel].regs->peripheral_map & 0xFFF; 114 if (strncmp(device_id, "BFIN_UART", 9) == 0) 115 dma_ch[channel].regs->peripheral_map = per_map | 116 ((channel - CH_UART2_RX + 0xC)<<12); 117 else 118 dma_ch[channel].regs->peripheral_map = per_map | 119 ((channel - CH_UART2_RX + 0x6)<<12); 120 } 121#endif 122 123 dma_ch[channel].device_id = device_id; 124 dma_ch[channel].irq = 0; 125 126 /* This is to be enabled by putting a restriction - 127 * you have to request DMA, before doing any operations on 128 * descriptor/channel 129 */ 130 pr_debug("request_dma() : END\n"); 131 return 0; 132} 133EXPORT_SYMBOL(request_dma); 134 135int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) 136{ 137 int ret; 138 unsigned int irq; 139 140 BUG_ON(channel >= MAX_DMA_CHANNELS || !callback || 141 !atomic_read(&dma_ch[channel].chan_status)); 142 143 irq = channel2irq(channel); 144 ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data); 145 if (ret) 146 return ret; 147 148 dma_ch[channel].irq = irq; 149 dma_ch[channel].data = data; 150 151 return 0; 152} 153EXPORT_SYMBOL(set_dma_callback); 154 155/** 156 * clear_dma_buffer - clear DMA fifos for specified channel 157 * 158 * Set the Buffer Clear bit in the Configuration register of specific DMA 159 * channel. This will stop the descriptor based DMA operation. 160 */ 161static void clear_dma_buffer(unsigned int channel) 162{ 163 dma_ch[channel].regs->cfg |= RESTART; 164 SSYNC(); 165 dma_ch[channel].regs->cfg &= ~RESTART; 166} 167 168void free_dma(unsigned int channel) 169{ 170 pr_debug("freedma() : BEGIN\n"); 171 BUG_ON(channel >= MAX_DMA_CHANNELS || 172 !atomic_read(&dma_ch[channel].chan_status)); 173 174 /* Halt the DMA */ 175 disable_dma(channel); 176 clear_dma_buffer(channel); 177 178 if (dma_ch[channel].irq) 179 free_irq(dma_ch[channel].irq, dma_ch[channel].data); 180 181 /* Clear the DMA Variable in the Channel */ 182 atomic_set(&dma_ch[channel].chan_status, 0); 183 184 pr_debug("freedma() : END\n"); 185} 186EXPORT_SYMBOL(free_dma); 187 188#ifdef CONFIG_PM 189# ifndef MAX_DMA_SUSPEND_CHANNELS 190# define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS 191# endif 192int blackfin_dma_suspend(void) 193{ 194 int i; 195 196 for (i = 0; i < MAX_DMA_CHANNELS; ++i) { 197 if (dma_ch[i].regs->cfg & DMAEN) { 198 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); 199 return -EBUSY; 200 } 201 202 if (i < MAX_DMA_SUSPEND_CHANNELS) 203 dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map; 204 } 205 206 return 0; 207} 208 209void blackfin_dma_resume(void) 210{ 211 int i; 212 213 for (i = 0; i < MAX_DMA_CHANNELS; ++i) { 214 dma_ch[i].regs->cfg = 0; 215 216 if (i < MAX_DMA_SUSPEND_CHANNELS) 217 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; 218 } 219} 220#endif 221 222/** 223 * blackfin_dma_early_init - minimal DMA init 224 * 225 * Setup a few DMA registers so we can safely do DMA transfers early on in 226 * the kernel booting process. Really this just means using dma_memcpy(). 227 */ 228void __init blackfin_dma_early_init(void) 229{ 230 early_shadow_stamp(); 231 bfin_write_MDMA_S0_CONFIG(0); 232 bfin_write_MDMA_S1_CONFIG(0); 233} 234 235void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) 236{ 237 unsigned long dst = (unsigned long)pdst; 238 unsigned long src = (unsigned long)psrc; 239 struct dma_register *dst_ch, *src_ch; 240 241 early_shadow_stamp(); 242 243 /* We assume that everything is 4 byte aligned, so include 244 * a basic sanity check 245 */ 246 BUG_ON(dst % 4); 247 BUG_ON(src % 4); 248 BUG_ON(size % 4); 249 250 src_ch = 0; 251 /* Find an avalible memDMA channel */ 252 while (1) { 253 if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { 254 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; 255 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; 256 } else { 257 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; 258 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; 259 } 260 261 if (!bfin_read16(&src_ch->cfg)) 262 break; 263 else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { 264 bfin_write16(&src_ch->cfg, 0); 265 break; 266 } 267 } 268 269 /* Force a sync in case a previous config reset on this channel 270 * occurred. This is needed so subsequent writes to DMA registers 271 * are not spuriously lost/corrupted. 272 */ 273 __builtin_bfin_ssync(); 274 275 /* Destination */ 276 bfin_write32(&dst_ch->start_addr, dst); 277 bfin_write16(&dst_ch->x_count, size >> 2); 278 bfin_write16(&dst_ch->x_modify, 1 << 2); 279 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); 280 281 /* Source */ 282 bfin_write32(&src_ch->start_addr, src); 283 bfin_write16(&src_ch->x_count, size >> 2); 284 bfin_write16(&src_ch->x_modify, 1 << 2); 285 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); 286 287 /* Enable */ 288 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); 289 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); 290 291 __builtin_bfin_ssync(); 292} 293 294void __init early_dma_memcpy_done(void) 295{ 296 early_shadow_stamp(); 297 298 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || 299 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) 300 continue; 301 302 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 303 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); 304 /* 305 * Now that DMA is done, we would normally flush cache, but 306 * i/d cache isn't running this early, so we don't bother, 307 * and just clear out the DMA channel for next time 308 */ 309 bfin_write_MDMA_S0_CONFIG(0); 310 bfin_write_MDMA_S1_CONFIG(0); 311 bfin_write_MDMA_D0_CONFIG(0); 312 bfin_write_MDMA_D1_CONFIG(0); 313 314 __builtin_bfin_ssync(); 315} 316 317/** 318 * __dma_memcpy - program the MDMA registers 319 * 320 * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs 321 * while programming registers so that everything is fully configured. Wait 322 * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE 323 * check will make sure we don't clobber any existing transfer. 324 */ 325static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) 326{ 327 static DEFINE_SPINLOCK(mdma_lock); 328 unsigned long flags; 329 330 spin_lock_irqsave(&mdma_lock, flags); 331 332 /* Force a sync in case a previous config reset on this channel 333 * occurred. This is needed so subsequent writes to DMA registers 334 * are not spuriously lost/corrupted. Do it under irq lock and 335 * without the anomaly version (because we are atomic already). 336 */ 337 __builtin_bfin_ssync(); 338 339 if (bfin_read_MDMA_S0_CONFIG()) 340 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) 341 continue; 342 343 if (conf & DMA2D) { 344 /* For larger bit sizes, we've already divided down cnt so it 345 * is no longer a multiple of 64k. So we have to break down 346 * the limit here so it is a multiple of the incoming size. 347 * There is no limitation here in terms of total size other 348 * than the hardware though as the bits lost in the shift are 349 * made up by MODIFY (== we can hit the whole address space). 350 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 351 */ 352 u32 shift = abs(dmod) >> 1; 353 size_t ycnt = cnt >> (16 - shift); 354 cnt = 1 << (16 - shift); 355 bfin_write_MDMA_D0_Y_COUNT(ycnt); 356 bfin_write_MDMA_S0_Y_COUNT(ycnt); 357 bfin_write_MDMA_D0_Y_MODIFY(dmod); 358 bfin_write_MDMA_S0_Y_MODIFY(smod); 359 } 360 361 bfin_write_MDMA_D0_START_ADDR(daddr); 362 bfin_write_MDMA_D0_X_COUNT(cnt); 363 bfin_write_MDMA_D0_X_MODIFY(dmod); 364 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 365 366 bfin_write_MDMA_S0_START_ADDR(saddr); 367 bfin_write_MDMA_S0_X_COUNT(cnt); 368 bfin_write_MDMA_S0_X_MODIFY(smod); 369 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); 370 371 bfin_write_MDMA_S0_CONFIG(DMAEN | conf); 372 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf); 373 374 spin_unlock_irqrestore(&mdma_lock, flags); 375 376 SSYNC(); 377 378 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) 379 if (bfin_read_MDMA_S0_CONFIG()) 380 continue; 381 else 382 return; 383 384 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 385 386 bfin_write_MDMA_S0_CONFIG(0); 387 bfin_write_MDMA_D0_CONFIG(0); 388} 389 390/** 391 * _dma_memcpy - translate C memcpy settings into MDMA settings 392 * 393 * Handle all the high level steps before we touch the MDMA registers. So 394 * handle direction, tweaking of sizes, and formatting of addresses. 395 */ 396static void *_dma_memcpy(void *pdst, const void *psrc, size_t size) 397{ 398 u32 conf, shift; 399 s16 mod; 400 unsigned long dst = (unsigned long)pdst; 401 unsigned long src = (unsigned long)psrc; 402 403 if (size == 0) 404 return NULL; 405 406 if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) { 407 conf = WDSIZE_32; 408 shift = 2; 409 } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) { 410 conf = WDSIZE_16; 411 shift = 1; 412 } else { 413 conf = WDSIZE_8; 414 shift = 0; 415 } 416 417 /* If the two memory regions have a chance of overlapping, make 418 * sure the memcpy still works as expected. Do this by having the 419 * copy run backwards instead. 420 */ 421 mod = 1 << shift; 422 if (src < dst) { 423 mod *= -1; 424 dst += size + mod; 425 src += size + mod; 426 } 427 size >>= shift; 428 429 if (size > 0x10000) 430 conf |= DMA2D; 431 432 __dma_memcpy(dst, mod, src, mod, size, conf); 433 434 return pdst; 435} 436 437/** 438 * dma_memcpy - DMA memcpy under mutex lock 439 * 440 * Do not check arguments before starting the DMA memcpy. Break the transfer 441 * up into two pieces. The first transfer is in multiples of 64k and the 442 * second transfer is the piece smaller than 64k. 443 */ 444void *dma_memcpy(void *pdst, const void *psrc, size_t size) 445{ 446 unsigned long dst = (unsigned long)pdst; 447 unsigned long src = (unsigned long)psrc; 448 449 if (bfin_addr_dcacheable(src)) 450 blackfin_dcache_flush_range(src, src + size); 451 452 if (bfin_addr_dcacheable(dst)) 453 blackfin_dcache_invalidate_range(dst, dst + size); 454 455 return dma_memcpy_nocache(pdst, psrc, size); 456} 457EXPORT_SYMBOL(dma_memcpy); 458 459/** 460 * dma_memcpy_nocache - DMA memcpy under mutex lock 461 * - No cache flush/invalidate 462 * 463 * Do not check arguments before starting the DMA memcpy. Break the transfer 464 * up into two pieces. The first transfer is in multiples of 64k and the 465 * second transfer is the piece smaller than 64k. 466 */ 467void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size) 468{ 469 size_t bulk, rest; 470 471 bulk = size & ~0xffff; 472 rest = size - bulk; 473 if (bulk) 474 _dma_memcpy(pdst, psrc, bulk); 475 _dma_memcpy(pdst + bulk, psrc + bulk, rest); 476 return pdst; 477} 478EXPORT_SYMBOL(dma_memcpy_nocache); 479 480/** 481 * safe_dma_memcpy - DMA memcpy w/argument checking 482 * 483 * Verify arguments are safe before heading to dma_memcpy(). 484 */ 485void *safe_dma_memcpy(void *dst, const void *src, size_t size) 486{ 487 if (!access_ok(VERIFY_WRITE, dst, size)) 488 return NULL; 489 if (!access_ok(VERIFY_READ, src, size)) 490 return NULL; 491 return dma_memcpy(dst, src, size); 492} 493EXPORT_SYMBOL(safe_dma_memcpy); 494 495static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len, 496 u16 size, u16 dma_size) 497{ 498 blackfin_dcache_flush_range(buf, buf + len * size); 499 __dma_memcpy(addr, 0, buf, size, len, dma_size); 500} 501 502static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len, 503 u16 size, u16 dma_size) 504{ 505 blackfin_dcache_invalidate_range(buf, buf + len * size); 506 __dma_memcpy(buf, size, addr, 0, len, dma_size); 507} 508 509#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \ 510void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \ 511{ \ 512 _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \ 513} \ 514EXPORT_SYMBOL(dma_##io##s##bwl) 515MAKE_DMA_IO(out, b, 1, 8, const); 516MAKE_DMA_IO(in, b, 1, 8, ); 517MAKE_DMA_IO(out, w, 2, 16, const); 518MAKE_DMA_IO(in, w, 2, 16, ); 519MAKE_DMA_IO(out, l, 4, 32, const); 520MAKE_DMA_IO(in, l, 4, 32, ); 521