1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core 4 * 5 * Copyright (C) 2018 Jacob Feder 6 * 7 * Authors: Jacob Feder <jacobsfeder@gmail.com> 8 * 9 * See Xilinx PG080 document for IP details 10 */ 11 12/* ---------------------------- 13 * includes 14 * ---------------------------- 15 */ 16 17#include <linux/kernel.h> 18#include <linux/of.h> 19#include <linux/platform_device.h> 20#include <linux/wait.h> 21#include <linux/mutex.h> 22#include <linux/device.h> 23#include <linux/cdev.h> 24#include <linux/init.h> 25#include <linux/module.h> 26#include <linux/slab.h> 27#include <linux/io.h> 28#include <linux/moduleparam.h> 29#include <linux/interrupt.h> 30#include <linux/param.h> 31#include <linux/fs.h> 32#include <linux/types.h> 33#include <linux/uaccess.h> 34#include <linux/jiffies.h> 35#include <linux/miscdevice.h> 36 37/* ---------------------------- 38 * driver parameters 39 * ---------------------------- 40 */ 41 42#define DRIVER_NAME "axis_fifo" 43 44#define READ_BUF_SIZE 128U /* read buffer length in words */ 45#define WRITE_BUF_SIZE 128U /* write buffer length in words */ 46 47/* ---------------------------- 48 * IP register offsets 49 * ---------------------------- 50 */ 51 52#define XLLF_ISR_OFFSET 0x00000000 /* Interrupt Status */ 53#define XLLF_IER_OFFSET 0x00000004 /* Interrupt Enable */ 54 55#define XLLF_TDFR_OFFSET 0x00000008 /* Transmit Reset */ 56#define XLLF_TDFV_OFFSET 0x0000000c /* Transmit Vacancy */ 57#define XLLF_TDFD_OFFSET 0x00000010 /* Transmit Data */ 58#define XLLF_TLR_OFFSET 0x00000014 /* Transmit Length */ 59 60#define XLLF_RDFR_OFFSET 0x00000018 /* Receive Reset */ 61#define XLLF_RDFO_OFFSET 0x0000001c /* Receive Occupancy */ 62#define XLLF_RDFD_OFFSET 0x00000020 /* Receive Data */ 63#define XLLF_RLR_OFFSET 0x00000024 /* Receive Length */ 64#define XLLF_SRR_OFFSET 0x00000028 /* Local Link Reset */ 65#define XLLF_TDR_OFFSET 0x0000002C /* Transmit Destination */ 66#define XLLF_RDR_OFFSET 0x00000030 /* Receive Destination */ 67 68/* ---------------------------- 69 * reset register masks 70 * ---------------------------- 71 */ 72 73#define XLLF_RDFR_RESET_MASK 0x000000a5 /* receive reset value */ 74#define XLLF_TDFR_RESET_MASK 0x000000a5 /* Transmit reset value */ 75#define XLLF_SRR_RESET_MASK 0x000000a5 /* Local Link reset value */ 76 77/* ---------------------------- 78 * interrupt masks 79 * ---------------------------- 80 */ 81 82#define XLLF_INT_RPURE_MASK 0x80000000 /* Receive under-read */ 83#define XLLF_INT_RPORE_MASK 0x40000000 /* Receive over-read */ 84#define XLLF_INT_RPUE_MASK 0x20000000 /* Receive underrun (empty) */ 85#define XLLF_INT_TPOE_MASK 0x10000000 /* Transmit overrun */ 86#define XLLF_INT_TC_MASK 0x08000000 /* Transmit complete */ 87#define XLLF_INT_RC_MASK 0x04000000 /* Receive complete */ 88#define XLLF_INT_TSE_MASK 0x02000000 /* Transmit length mismatch */ 89#define XLLF_INT_TRC_MASK 0x01000000 /* Transmit reset complete */ 90#define XLLF_INT_RRC_MASK 0x00800000 /* Receive reset complete */ 91#define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */ 92#define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */ 93#define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */ 94#define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */ 95#define XLLF_INT_ALL_MASK 0xfff80000 /* All the ints */ 96#define XLLF_INT_ERROR_MASK 0xf2000000 /* Error status ints */ 97#define XLLF_INT_RXERROR_MASK 0xe0000000 /* Receive Error status ints */ 98#define XLLF_INT_TXERROR_MASK 0x12000000 /* Transmit Error status ints */ 99 100/* ---------------------------- 101 * globals 102 * ---------------------------- 103 */ 104static long read_timeout = 1000; /* ms to wait before read() times out */ 105static long write_timeout = 1000; /* ms to wait before write() times out */ 106 107/* ---------------------------- 108 * module command-line arguments 109 * ---------------------------- 110 */ 111 112module_param(read_timeout, long, 0444); 113MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout"); 114module_param(write_timeout, long, 0444); 115MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout"); 116 117/* ---------------------------- 118 * types 119 * ---------------------------- 120 */ 121 122struct axis_fifo { 123 int irq; /* interrupt */ 124 void __iomem *base_addr; /* kernel space memory */ 125 126 unsigned int rx_fifo_depth; /* max words in the receive fifo */ 127 unsigned int tx_fifo_depth; /* max words in the transmit fifo */ 128 int has_rx_fifo; /* whether the IP has the rx fifo enabled */ 129 int has_tx_fifo; /* whether the IP has the tx fifo enabled */ 130 131 wait_queue_head_t read_queue; /* wait queue for asynchronos read */ 132 struct mutex read_lock; /* lock for reading */ 133 wait_queue_head_t write_queue; /* wait queue for asynchronos write */ 134 struct mutex write_lock; /* lock for writing */ 135 unsigned int write_flags; /* write file flags */ 136 unsigned int read_flags; /* read file flags */ 137 138 struct device *dt_device; /* device created from the device tree */ 139 struct miscdevice miscdev; 140}; 141 142/* ---------------------------- 143 * sysfs entries 144 * ---------------------------- 145 */ 146 147static ssize_t sysfs_write(struct device *dev, const char *buf, 148 size_t count, unsigned int addr_offset) 149{ 150 struct axis_fifo *fifo = dev_get_drvdata(dev); 151 unsigned long tmp; 152 int rc; 153 154 rc = kstrtoul(buf, 0, &tmp); 155 if (rc < 0) 156 return rc; 157 158 iowrite32(tmp, fifo->base_addr + addr_offset); 159 160 return count; 161} 162 163static ssize_t sysfs_read(struct device *dev, char *buf, 164 unsigned int addr_offset) 165{ 166 struct axis_fifo *fifo = dev_get_drvdata(dev); 167 unsigned int read_val; 168 169 read_val = ioread32(fifo->base_addr + addr_offset); 170 return sysfs_emit(buf, "0x%x\n", read_val); 171} 172 173static ssize_t isr_store(struct device *dev, struct device_attribute *attr, 174 const char *buf, size_t count) 175{ 176 return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET); 177} 178 179static ssize_t isr_show(struct device *dev, 180 struct device_attribute *attr, char *buf) 181{ 182 return sysfs_read(dev, buf, XLLF_ISR_OFFSET); 183} 184 185static DEVICE_ATTR_RW(isr); 186 187static ssize_t ier_store(struct device *dev, struct device_attribute *attr, 188 const char *buf, size_t count) 189{ 190 return sysfs_write(dev, buf, count, XLLF_IER_OFFSET); 191} 192 193static ssize_t ier_show(struct device *dev, 194 struct device_attribute *attr, char *buf) 195{ 196 return sysfs_read(dev, buf, XLLF_IER_OFFSET); 197} 198 199static DEVICE_ATTR_RW(ier); 200 201static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr, 202 const char *buf, size_t count) 203{ 204 return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET); 205} 206 207static DEVICE_ATTR_WO(tdfr); 208 209static ssize_t tdfv_show(struct device *dev, 210 struct device_attribute *attr, char *buf) 211{ 212 return sysfs_read(dev, buf, XLLF_TDFV_OFFSET); 213} 214 215static DEVICE_ATTR_RO(tdfv); 216 217static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr, 218 const char *buf, size_t count) 219{ 220 return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET); 221} 222 223static DEVICE_ATTR_WO(tdfd); 224 225static ssize_t tlr_store(struct device *dev, struct device_attribute *attr, 226 const char *buf, size_t count) 227{ 228 return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET); 229} 230 231static DEVICE_ATTR_WO(tlr); 232 233static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr, 234 const char *buf, size_t count) 235{ 236 return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET); 237} 238 239static DEVICE_ATTR_WO(rdfr); 240 241static ssize_t rdfo_show(struct device *dev, 242 struct device_attribute *attr, char *buf) 243{ 244 return sysfs_read(dev, buf, XLLF_RDFO_OFFSET); 245} 246 247static DEVICE_ATTR_RO(rdfo); 248 249static ssize_t rdfd_show(struct device *dev, 250 struct device_attribute *attr, char *buf) 251{ 252 return sysfs_read(dev, buf, XLLF_RDFD_OFFSET); 253} 254 255static DEVICE_ATTR_RO(rdfd); 256 257static ssize_t rlr_show(struct device *dev, 258 struct device_attribute *attr, char *buf) 259{ 260 return sysfs_read(dev, buf, XLLF_RLR_OFFSET); 261} 262 263static DEVICE_ATTR_RO(rlr); 264 265static ssize_t srr_store(struct device *dev, struct device_attribute *attr, 266 const char *buf, size_t count) 267{ 268 return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET); 269} 270 271static DEVICE_ATTR_WO(srr); 272 273static ssize_t tdr_store(struct device *dev, struct device_attribute *attr, 274 const char *buf, size_t count) 275{ 276 return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET); 277} 278 279static DEVICE_ATTR_WO(tdr); 280 281static ssize_t rdr_show(struct device *dev, 282 struct device_attribute *attr, char *buf) 283{ 284 return sysfs_read(dev, buf, XLLF_RDR_OFFSET); 285} 286 287static DEVICE_ATTR_RO(rdr); 288 289static struct attribute *axis_fifo_attrs[] = { 290 &dev_attr_isr.attr, 291 &dev_attr_ier.attr, 292 &dev_attr_tdfr.attr, 293 &dev_attr_tdfv.attr, 294 &dev_attr_tdfd.attr, 295 &dev_attr_tlr.attr, 296 &dev_attr_rdfr.attr, 297 &dev_attr_rdfo.attr, 298 &dev_attr_rdfd.attr, 299 &dev_attr_rlr.attr, 300 &dev_attr_srr.attr, 301 &dev_attr_tdr.attr, 302 &dev_attr_rdr.attr, 303 NULL, 304}; 305 306static const struct attribute_group axis_fifo_attrs_group = { 307 .name = "ip_registers", 308 .attrs = axis_fifo_attrs, 309}; 310 311static const struct attribute_group *axis_fifo_attrs_groups[] = { 312 &axis_fifo_attrs_group, 313 NULL, 314}; 315 316/* ---------------------------- 317 * implementation 318 * ---------------------------- 319 */ 320 321static void reset_ip_core(struct axis_fifo *fifo) 322{ 323 iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET); 324 iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET); 325 iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET); 326 iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK | 327 XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK | 328 XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK, 329 fifo->base_addr + XLLF_IER_OFFSET); 330 iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET); 331} 332 333/** 334 * axis_fifo_read() - Read a packet from AXIS-FIFO character device. 335 * @f: Open file. 336 * @buf: User space buffer to read to. 337 * @len: User space buffer length. 338 * @off: Buffer offset. 339 * 340 * As defined by the device's documentation, we need to check the device's 341 * occupancy before reading the length register and then the data. All these 342 * operations must be executed atomically, in order and one after the other 343 * without missing any. 344 * 345 * Returns the number of bytes read from the device or negative error code 346 * on failure. 347 */ 348static ssize_t axis_fifo_read(struct file *f, char __user *buf, 349 size_t len, loff_t *off) 350{ 351 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 352 size_t bytes_available; 353 unsigned int words_available; 354 unsigned int copied; 355 unsigned int copy; 356 unsigned int i; 357 int ret; 358 u32 tmp_buf[READ_BUF_SIZE]; 359 360 if (fifo->read_flags & O_NONBLOCK) { 361 /* 362 * Device opened in non-blocking mode. Try to lock it and then 363 * check if any packet is available. 364 */ 365 if (!mutex_trylock(&fifo->read_lock)) 366 return -EAGAIN; 367 368 if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) { 369 ret = -EAGAIN; 370 goto end_unlock; 371 } 372 } else { 373 /* opened in blocking mode 374 * wait for a packet available interrupt (or timeout) 375 * if nothing is currently available 376 */ 377 mutex_lock(&fifo->read_lock); 378 ret = wait_event_interruptible_timeout(fifo->read_queue, 379 ioread32(fifo->base_addr + XLLF_RDFO_OFFSET), 380 read_timeout); 381 382 if (ret <= 0) { 383 if (ret == 0) { 384 ret = -EAGAIN; 385 } else if (ret != -ERESTARTSYS) { 386 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n", 387 ret); 388 } 389 390 goto end_unlock; 391 } 392 } 393 394 bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); 395 if (!bytes_available) { 396 dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); 397 reset_ip_core(fifo); 398 ret = -EIO; 399 goto end_unlock; 400 } 401 402 if (bytes_available > len) { 403 dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", 404 bytes_available, len); 405 reset_ip_core(fifo); 406 ret = -EINVAL; 407 goto end_unlock; 408 } 409 410 if (bytes_available % sizeof(u32)) { 411 /* this probably can't happen unless IP 412 * registers were previously mishandled 413 */ 414 dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); 415 reset_ip_core(fifo); 416 ret = -EIO; 417 goto end_unlock; 418 } 419 420 words_available = bytes_available / sizeof(u32); 421 422 /* read data into an intermediate buffer, copying the contents 423 * to userspace when the buffer is full 424 */ 425 copied = 0; 426 while (words_available > 0) { 427 copy = min(words_available, READ_BUF_SIZE); 428 429 for (i = 0; i < copy; i++) { 430 tmp_buf[i] = ioread32(fifo->base_addr + 431 XLLF_RDFD_OFFSET); 432 } 433 434 if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, 435 copy * sizeof(u32))) { 436 reset_ip_core(fifo); 437 ret = -EFAULT; 438 goto end_unlock; 439 } 440 441 copied += copy; 442 words_available -= copy; 443 } 444 445 ret = bytes_available; 446 447end_unlock: 448 mutex_unlock(&fifo->read_lock); 449 450 return ret; 451} 452 453/** 454 * axis_fifo_write() - Write buffer to AXIS-FIFO character device. 455 * @f: Open file. 456 * @buf: User space buffer to write to the device. 457 * @len: User space buffer length. 458 * @off: Buffer offset. 459 * 460 * As defined by the device's documentation, we need to write to the device's 461 * data buffer then to the device's packet length register atomically. Also, 462 * we need to lock before checking if the device has available space to avoid 463 * any concurrency issue. 464 * 465 * Returns the number of bytes written to the device or negative error code 466 * on failure. 467 */ 468static ssize_t axis_fifo_write(struct file *f, const char __user *buf, 469 size_t len, loff_t *off) 470{ 471 struct axis_fifo *fifo = (struct axis_fifo *)f->private_data; 472 unsigned int words_to_write; 473 unsigned int copied; 474 unsigned int copy; 475 unsigned int i; 476 int ret; 477 u32 tmp_buf[WRITE_BUF_SIZE]; 478 479 if (len % sizeof(u32)) { 480 dev_err(fifo->dt_device, 481 "tried to send a packet that isn't word-aligned\n"); 482 return -EINVAL; 483 } 484 485 words_to_write = len / sizeof(u32); 486 487 if (!words_to_write) { 488 dev_err(fifo->dt_device, 489 "tried to send a packet of length 0\n"); 490 return -EINVAL; 491 } 492 493 if (words_to_write > fifo->tx_fifo_depth) { 494 dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n", 495 words_to_write, fifo->tx_fifo_depth); 496 return -EINVAL; 497 } 498 499 if (fifo->write_flags & O_NONBLOCK) { 500 /* 501 * Device opened in non-blocking mode. Try to lock it and then 502 * check if there is any room to write the given buffer. 503 */ 504 if (!mutex_trylock(&fifo->write_lock)) 505 return -EAGAIN; 506 507 if (words_to_write > ioread32(fifo->base_addr + 508 XLLF_TDFV_OFFSET)) { 509 ret = -EAGAIN; 510 goto end_unlock; 511 } 512 } else { 513 /* opened in blocking mode */ 514 515 /* wait for an interrupt (or timeout) if there isn't 516 * currently enough room in the fifo 517 */ 518 mutex_lock(&fifo->write_lock); 519 ret = wait_event_interruptible_timeout(fifo->write_queue, 520 ioread32(fifo->base_addr + XLLF_TDFV_OFFSET) 521 >= words_to_write, 522 write_timeout); 523 524 if (ret <= 0) { 525 if (ret == 0) { 526 ret = -EAGAIN; 527 } else if (ret != -ERESTARTSYS) { 528 dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n", 529 ret); 530 } 531 532 goto end_unlock; 533 } 534 } 535 536 /* write data from an intermediate buffer into the fifo IP, refilling 537 * the buffer with userspace data as needed 538 */ 539 copied = 0; 540 while (words_to_write > 0) { 541 copy = min(words_to_write, WRITE_BUF_SIZE); 542 543 if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), 544 copy * sizeof(u32))) { 545 reset_ip_core(fifo); 546 ret = -EFAULT; 547 goto end_unlock; 548 } 549 550 for (i = 0; i < copy; i++) 551 iowrite32(tmp_buf[i], fifo->base_addr + 552 XLLF_TDFD_OFFSET); 553 554 copied += copy; 555 words_to_write -= copy; 556 } 557 558 ret = copied * sizeof(u32); 559 560 /* write packet size to fifo */ 561 iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET); 562 563end_unlock: 564 mutex_unlock(&fifo->write_lock); 565 566 return ret; 567} 568 569static irqreturn_t axis_fifo_irq(int irq, void *dw) 570{ 571 struct axis_fifo *fifo = (struct axis_fifo *)dw; 572 unsigned int pending_interrupts; 573 574 do { 575 pending_interrupts = ioread32(fifo->base_addr + 576 XLLF_IER_OFFSET) & 577 ioread32(fifo->base_addr 578 + XLLF_ISR_OFFSET); 579 if (pending_interrupts & XLLF_INT_RC_MASK) { 580 /* packet received */ 581 582 /* wake the reader process if it is waiting */ 583 wake_up(&fifo->read_queue); 584 585 /* clear interrupt */ 586 iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK, 587 fifo->base_addr + XLLF_ISR_OFFSET); 588 } else if (pending_interrupts & XLLF_INT_TC_MASK) { 589 /* packet sent */ 590 591 /* wake the writer process if it is waiting */ 592 wake_up(&fifo->write_queue); 593 594 iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK, 595 fifo->base_addr + XLLF_ISR_OFFSET); 596 } else if (pending_interrupts & XLLF_INT_TFPF_MASK) { 597 /* transmit fifo programmable full */ 598 599 iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK, 600 fifo->base_addr + XLLF_ISR_OFFSET); 601 } else if (pending_interrupts & XLLF_INT_TFPE_MASK) { 602 /* transmit fifo programmable empty */ 603 604 iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK, 605 fifo->base_addr + XLLF_ISR_OFFSET); 606 } else if (pending_interrupts & XLLF_INT_RFPF_MASK) { 607 /* receive fifo programmable full */ 608 609 iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK, 610 fifo->base_addr + XLLF_ISR_OFFSET); 611 } else if (pending_interrupts & XLLF_INT_RFPE_MASK) { 612 /* receive fifo programmable empty */ 613 614 iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK, 615 fifo->base_addr + XLLF_ISR_OFFSET); 616 } else if (pending_interrupts & XLLF_INT_TRC_MASK) { 617 /* transmit reset complete interrupt */ 618 619 iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK, 620 fifo->base_addr + XLLF_ISR_OFFSET); 621 } else if (pending_interrupts & XLLF_INT_RRC_MASK) { 622 /* receive reset complete interrupt */ 623 624 iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK, 625 fifo->base_addr + XLLF_ISR_OFFSET); 626 } else if (pending_interrupts & XLLF_INT_RPURE_MASK) { 627 /* receive fifo under-read error interrupt */ 628 dev_err(fifo->dt_device, 629 "receive under-read interrupt\n"); 630 631 iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK, 632 fifo->base_addr + XLLF_ISR_OFFSET); 633 } else if (pending_interrupts & XLLF_INT_RPORE_MASK) { 634 /* receive over-read error interrupt */ 635 dev_err(fifo->dt_device, 636 "receive over-read interrupt\n"); 637 638 iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK, 639 fifo->base_addr + XLLF_ISR_OFFSET); 640 } else if (pending_interrupts & XLLF_INT_RPUE_MASK) { 641 /* receive underrun error interrupt */ 642 dev_err(fifo->dt_device, 643 "receive underrun error interrupt\n"); 644 645 iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK, 646 fifo->base_addr + XLLF_ISR_OFFSET); 647 } else if (pending_interrupts & XLLF_INT_TPOE_MASK) { 648 /* transmit overrun error interrupt */ 649 dev_err(fifo->dt_device, 650 "transmit overrun error interrupt\n"); 651 652 iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK, 653 fifo->base_addr + XLLF_ISR_OFFSET); 654 } else if (pending_interrupts & XLLF_INT_TSE_MASK) { 655 /* transmit length mismatch error interrupt */ 656 dev_err(fifo->dt_device, 657 "transmit length mismatch error interrupt\n"); 658 659 iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK, 660 fifo->base_addr + XLLF_ISR_OFFSET); 661 } else if (pending_interrupts) { 662 /* unknown interrupt type */ 663 dev_err(fifo->dt_device, 664 "unknown interrupt(s) 0x%x\n", 665 pending_interrupts); 666 667 iowrite32(XLLF_INT_ALL_MASK, 668 fifo->base_addr + XLLF_ISR_OFFSET); 669 } 670 } while (pending_interrupts); 671 672 return IRQ_HANDLED; 673} 674 675static int axis_fifo_open(struct inode *inod, struct file *f) 676{ 677 struct axis_fifo *fifo = container_of(f->private_data, 678 struct axis_fifo, miscdev); 679 f->private_data = fifo; 680 681 if (((f->f_flags & O_ACCMODE) == O_WRONLY) || 682 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 683 if (fifo->has_tx_fifo) { 684 fifo->write_flags = f->f_flags; 685 } else { 686 dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n"); 687 return -EPERM; 688 } 689 } 690 691 if (((f->f_flags & O_ACCMODE) == O_RDONLY) || 692 ((f->f_flags & O_ACCMODE) == O_RDWR)) { 693 if (fifo->has_rx_fifo) { 694 fifo->read_flags = f->f_flags; 695 } else { 696 dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n"); 697 return -EPERM; 698 } 699 } 700 701 return 0; 702} 703 704static int axis_fifo_close(struct inode *inod, struct file *f) 705{ 706 f->private_data = NULL; 707 708 return 0; 709} 710 711static const struct file_operations fops = { 712 .owner = THIS_MODULE, 713 .open = axis_fifo_open, 714 .release = axis_fifo_close, 715 .read = axis_fifo_read, 716 .write = axis_fifo_write 717}; 718 719/* read named property from the device tree */ 720static int get_dts_property(struct axis_fifo *fifo, 721 char *name, unsigned int *var) 722{ 723 int rc; 724 725 rc = of_property_read_u32(fifo->dt_device->of_node, name, var); 726 if (rc) { 727 dev_err(fifo->dt_device, "couldn't read IP dts property '%s'", 728 name); 729 return rc; 730 } 731 dev_dbg(fifo->dt_device, "dts property '%s' = %u\n", 732 name, *var); 733 734 return 0; 735} 736 737static int axis_fifo_parse_dt(struct axis_fifo *fifo) 738{ 739 int ret; 740 unsigned int value; 741 742 ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value); 743 if (ret) { 744 dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n"); 745 goto end; 746 } else if (value != 32) { 747 dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n"); 748 ret = -EIO; 749 goto end; 750 } 751 752 ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value); 753 if (ret) { 754 dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n"); 755 goto end; 756 } else if (value != 32) { 757 dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n"); 758 ret = -EIO; 759 goto end; 760 } 761 762 ret = get_dts_property(fifo, "xlnx,rx-fifo-depth", 763 &fifo->rx_fifo_depth); 764 if (ret) { 765 dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n"); 766 ret = -EIO; 767 goto end; 768 } 769 770 ret = get_dts_property(fifo, "xlnx,tx-fifo-depth", 771 &fifo->tx_fifo_depth); 772 if (ret) { 773 dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n"); 774 ret = -EIO; 775 goto end; 776 } 777 778 /* IP sets TDFV to fifo depth - 4 so we will do the same */ 779 fifo->tx_fifo_depth -= 4; 780 781 ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); 782 if (ret) { 783 dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); 784 ret = -EIO; 785 goto end; 786 } 787 788 ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo); 789 if (ret) { 790 dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n"); 791 ret = -EIO; 792 goto end; 793 } 794 795end: 796 return ret; 797} 798 799static int axis_fifo_probe(struct platform_device *pdev) 800{ 801 struct resource *r_mem; /* IO mem resources */ 802 struct device *dev = &pdev->dev; /* OS device (from device tree) */ 803 struct axis_fifo *fifo = NULL; 804 char *device_name; 805 int rc = 0; /* error return value */ 806 807 /* ---------------------------- 808 * init wrapper device 809 * ---------------------------- 810 */ 811 812 device_name = devm_kzalloc(dev, 32, GFP_KERNEL); 813 if (!device_name) 814 return -ENOMEM; 815 816 /* allocate device wrapper memory */ 817 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 818 if (!fifo) 819 return -ENOMEM; 820 821 dev_set_drvdata(dev, fifo); 822 fifo->dt_device = dev; 823 824 init_waitqueue_head(&fifo->read_queue); 825 init_waitqueue_head(&fifo->write_queue); 826 827 mutex_init(&fifo->read_lock); 828 mutex_init(&fifo->write_lock); 829 830 /* ---------------------------- 831 * init device memory space 832 * ---------------------------- 833 */ 834 835 /* get iospace for the device and request physical memory */ 836 fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem); 837 if (IS_ERR(fifo->base_addr)) { 838 rc = PTR_ERR(fifo->base_addr); 839 goto err_initial; 840 } 841 842 dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr); 843 844 /* create unique device name */ 845 snprintf(device_name, 32, "%s_%pa", DRIVER_NAME, &r_mem->start); 846 dev_dbg(fifo->dt_device, "device name [%s]\n", device_name); 847 848 /* ---------------------------- 849 * init IP 850 * ---------------------------- 851 */ 852 853 rc = axis_fifo_parse_dt(fifo); 854 if (rc) 855 goto err_initial; 856 857 reset_ip_core(fifo); 858 859 /* ---------------------------- 860 * init device interrupts 861 * ---------------------------- 862 */ 863 864 /* get IRQ resource */ 865 rc = platform_get_irq(pdev, 0); 866 if (rc < 0) 867 goto err_initial; 868 869 /* request IRQ */ 870 fifo->irq = rc; 871 rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0, 872 DRIVER_NAME, fifo); 873 if (rc) { 874 dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n", 875 fifo->irq); 876 goto err_initial; 877 } 878 879 /* ---------------------------- 880 * init char device 881 * ---------------------------- 882 */ 883 884 /* create character device */ 885 fifo->miscdev.fops = &fops; 886 fifo->miscdev.minor = MISC_DYNAMIC_MINOR; 887 fifo->miscdev.name = device_name; 888 fifo->miscdev.groups = axis_fifo_attrs_groups; 889 fifo->miscdev.parent = dev; 890 rc = misc_register(&fifo->miscdev); 891 if (rc < 0) 892 goto err_initial; 893 894 return 0; 895 896err_initial: 897 dev_set_drvdata(dev, NULL); 898 return rc; 899} 900 901static void axis_fifo_remove(struct platform_device *pdev) 902{ 903 struct device *dev = &pdev->dev; 904 struct axis_fifo *fifo = dev_get_drvdata(dev); 905 906 misc_deregister(&fifo->miscdev); 907 dev_set_drvdata(dev, NULL); 908} 909 910static const struct of_device_id axis_fifo_of_match[] = { 911 { .compatible = "xlnx,axi-fifo-mm-s-4.1", }, 912 {}, 913}; 914MODULE_DEVICE_TABLE(of, axis_fifo_of_match); 915 916static struct platform_driver axis_fifo_driver = { 917 .driver = { 918 .name = DRIVER_NAME, 919 .of_match_table = axis_fifo_of_match, 920 }, 921 .probe = axis_fifo_probe, 922 .remove_new = axis_fifo_remove, 923}; 924 925static int __init axis_fifo_init(void) 926{ 927 if (read_timeout >= 0) 928 read_timeout = msecs_to_jiffies(read_timeout); 929 else 930 read_timeout = MAX_SCHEDULE_TIMEOUT; 931 932 if (write_timeout >= 0) 933 write_timeout = msecs_to_jiffies(write_timeout); 934 else 935 write_timeout = MAX_SCHEDULE_TIMEOUT; 936 937 pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n", 938 read_timeout, write_timeout); 939 return platform_driver_register(&axis_fifo_driver); 940} 941 942module_init(axis_fifo_init); 943 944static void __exit axis_fifo_exit(void) 945{ 946 platform_driver_unregister(&axis_fifo_driver); 947} 948 949module_exit(axis_fifo_exit); 950 951MODULE_LICENSE("GPL"); 952MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>"); 953MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver"); 954