1/* 2 * Copyright 2020, Data61 3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO) 4 * ABN 41 687 119 230. 5 * 6 * This software may be distributed and modified according to the terms of 7 * the GNU General Public License version 2. Note that NO WARRANTY is provided. 8 * See "LICENSE_GPLv2.txt" for details. 9 * 10 * @TAG(DATA61_GPL) 11 */ 12 13#include <platsupport/fdt.h> 14#include <platsupport/driver_module.h> 15#include <ethdrivers/tx2.h> 16#include <ethdrivers/raw.h> 17#include <ethdrivers/helpers.h> 18#include <string.h> 19#include <utils/util.h> 20#include <stdio.h> 21#include "uboot/tx2_configs.h" 22#include "tx2.h" 23#include "io.h" 24 25#include "uboot/dwc_eth_qos.h" 26static void free_desc_ring(struct tx2_eth_data *dev, ps_dma_man_t *dma_man) 27{ 28 if (dev->rx_ring != NULL) { 29 dma_unpin_free(dma_man, (void *)dev->rx_ring, sizeof(struct eqos_desc) * dev->rx_size); 30 dev->rx_ring = NULL; 31 } 32 33 if (dev->tx_ring != NULL) { 34 dma_unpin_free(dma_man, (void *)dev->tx_ring, sizeof(struct eqos_desc) * dev->tx_size); 35 dev->tx_ring = NULL; 36 } 37 38 if (dev->rx_cookies != NULL) { 39 free(dev->rx_cookies); 40 dev->rx_cookies = NULL; 41 } 42 43 if (dev->tx_cookies != NULL) { 44 free(dev->tx_cookies); 45 dev->tx_cookies = NULL; 46 } 47 48 if (dev->tx_lengths != NULL) { 49 free(dev->tx_lengths); 50 dev->tx_lengths = NULL; 51 } 52} 53 54static int initialize_desc_ring(struct tx2_eth_data *dev, ps_dma_man_t *dma_man, struct eth_driver *eth_driver) 55{ 56 dma_addr_t rx_ring = dma_alloc_pin(dma_man, ALIGN_UP(sizeof(struct eqos_desc) * dev->rx_size, ARCH_DMA_MINALIGN), 0, 57 ARCH_DMA_MINALIGN); 58 if (!rx_ring.phys) { 59 LOG_ERROR("Failed to allocate rx_ring"); 60 return -1; 61 } 62 dev->rx_ring = rx_ring.virt; 63 dev->rx_ring_phys = rx_ring.phys; 64 65 dma_addr_t tx_ring = dma_alloc_pin(dma_man, ALIGN_UP(sizeof(struct eqos_desc) * dev->tx_size, ARCH_DMA_MINALIGN), 0, 66 ARCH_DMA_MINALIGN); 67 if (!tx_ring.phys) { 68 LOG_ERROR("Failed to allocate tx_ring"); 69 free_desc_ring(dev, dma_man); 70 return -1; 71 } 72 dev->tx_ring = tx_ring.virt; 73 dev->tx_ring_phys = tx_ring.phys; 74 75 ps_dma_cache_clean_invalidate(dma_man, rx_ring.virt, sizeof(struct eqos_desc) * dev->rx_size); 76 ps_dma_cache_clean_invalidate(dma_man, tx_ring.virt, sizeof(struct eqos_desc) * dev->tx_size); 77 78 dev->rx_cookies = calloc(1, sizeof(void *) * dev->rx_size); 79 dev->tx_cookies = calloc(1, sizeof(void *) * dev->tx_size); 80 dev->tx_lengths = calloc(1, sizeof(unsigned int) * dev->tx_size); 81 82 if (dev->rx_cookies == NULL || dev->tx_cookies == NULL || dev->tx_lengths == NULL) { 83 84 if (dev->rx_cookies != NULL) { 85 free(dev->rx_cookies); 86 } 87 88 if (dev->tx_cookies != NULL) { 89 free(dev->tx_cookies); 90 } 91 92 if (dev->tx_lengths != NULL) { 93 free(dev->tx_lengths); 94 } 95 96 LOG_ERROR("Failed to malloc"); 97 free_desc_ring(dev, dma_man); 98 return -1; 99 } 100 101 /* Remaining needs to be 2 less than size as we cannot actually enqueue size many descriptors, 102 * since then the head and tail pointers would be equal, indicating empty. */ 103 dev->rx_remain = dev->rx_size; 104 dev->tx_remain = dev->tx_size; 105 106 dev->rdt = dev->rdh = dev->tdt = dev->tdh = 0; 107 108 /* zero both rings */ 109 memset((void *)dev->tx_ring, 0, sizeof(struct eqos_desc) * dev->tx_size); 110 memset((void *)dev->rx_ring, 0, sizeof(struct eqos_desc) * dev->rx_size); 111 112 __sync_synchronize(); 113 114 return 0; 115} 116 117static void fill_rx_bufs(struct eth_driver *driver) 118{ 119 struct tx2_eth_data *dev = (struct tx2_eth_data *)driver->eth_data; 120 121 while (dev->rx_remain > 0) { 122 123 void *cookie = NULL; 124 /* request a buffer */ 125 uintptr_t phys = driver->i_cb.allocate_rx_buf ? driver->i_cb.allocate_rx_buf(driver->cb_cookie, EQOS_MAX_PACKET_SIZE, 126 &cookie) : 0; 127 128 if (!phys) { 129 break; 130 } 131 132 if (dev->rx_cookies[dev->rdt] != NULL) { 133 ZF_LOGF("Overwriting a descriptor at dev->rdt %d", dev->rdt); 134 } 135 136 dev->rx_cookies[dev->rdt] = cookie; 137 dev->rx_ring[dev->rdt].des0 = phys; 138 dev->rx_ring[dev->rdt].des1 = 0; 139 dev->rx_ring[dev->rdt].des2 = 0; 140 dev->rx_ring[dev->rdt].des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V; 141 142 dev->rdt = (dev->rdt + 1) % dev->rx_size; 143 dev->rx_remain--; 144 } 145 __sync_synchronize(); 146 147 if (dev->rx_remain != dev->rx_size) { 148 /* We've refilled some buffers, so set the tail pointer so that the DMA controller knows */ 149 eqos_set_rx_tail_pointer(dev); 150 } 151 152 __sync_synchronize(); 153} 154 155static void complete_rx(struct eth_driver *eth_driver) 156{ 157 struct tx2_eth_data *dev = (struct tx2_eth_data *)eth_driver->eth_data; 158 unsigned int num_in_ring = dev->rx_size - dev->rx_remain; 159 160 for (int i = 0; i < num_in_ring; i++) { 161 unsigned int status = dev->rx_ring[dev->rdh].des3; 162 163 /* Ensure no memory references get ordered before we checked the descriptor was written back */ 164 __sync_synchronize(); 165 if (status & EQOS_DESC3_OWN) { 166 /* not complete yet */ 167 break; 168 } 169 170 /* TBD: Need to handle multiple buffers for single frame? */ 171 void *cookie = dev->rx_cookies[dev->rdh]; 172 dev->rx_cookies[dev->rdh] = 0; 173 unsigned int len = status & 0x7fff; 174 175 dev->rx_remain++; 176 /* update rdh */ 177 dev->rdh = (dev->rdh + 1) % dev->rx_size; 178 179 /* Give the buffers back */ 180 eth_driver->i_cb.rx_complete(eth_driver->cb_cookie, 1, &cookie, &len); 181 } 182} 183 184static void complete_tx(struct eth_driver *driver) 185{ 186 struct tx2_eth_data *dev = (struct tx2_eth_data *)driver->eth_data; 187 volatile struct eqos_desc *tx_desc; 188 189 while ((dev->tx_size - dev->tx_remain) > 0) { 190 uint32_t i; 191 for (i = 0; i < dev->tx_lengths[dev->tdh]; i++) { 192 uint32_t ring_pos = (i + dev->tdh) % dev->tx_size; 193 tx_desc = &dev->tx_ring[ring_pos]; 194 if ((tx_desc->des3 & EQOS_DESC3_OWN)) { 195 /* not all parts complete */ 196 return; 197 } 198 } 199 200 /* do not let memory loads happen before our checking of the descriptor write back */ 201 __sync_synchronize(); 202 203 /* increase TX Descriptor head */ 204 void *cookie = dev->tx_cookies[dev->tdh]; 205 dev->tx_remain += dev->tx_lengths[dev->tdh]; 206 dev->tdh = (dev->tdh + dev->tx_lengths[dev->tdh]) % dev->tx_size; 207 208 /* give the buffer back */ 209 driver->i_cb.tx_complete(driver->cb_cookie, cookie); 210 } 211} 212 213static void handle_irq(struct eth_driver *driver, int irq) 214{ 215 struct tx2_eth_data *eth_data = (struct tx2_eth_data *)driver->eth_data; 216 uint32_t val = eqos_handle_irq(eth_data, irq); 217 218 if (val & TX_IRQ) { 219 eqos_dma_disable_txirq(eth_data); 220 complete_tx(driver); 221 eqos_dma_enable_txirq(eth_data); 222 } 223 224 if (val & RX_IRQ) { 225 eqos_dma_disable_rxirq(eth_data); 226 complete_rx(driver); 227 fill_rx_bufs(driver); 228 /* 229 * RX IRQ is was disabled when checking the IRQ, and thus need to be 230 * re-enabled 231 */ 232 eqos_dma_enable_rxirq(eth_data); 233 } 234 235 if (val == 0) { 236 ZF_LOGD("No TX or RX IRQ, ignoring this interrupt"); 237 } 238} 239 240static void print_state(struct eth_driver *eth_driver) 241{ 242 ZF_LOGF("print_state not implemented\n"); 243} 244 245static void low_level_init(struct eth_driver *driver, uint8_t *mac, int *mtu) 246{ 247 ZF_LOGF("low_level_init not implemented\n"); 248} 249 250static int raw_tx(struct eth_driver *driver, unsigned int num, uintptr_t *phys, 251 unsigned int *len, void *cookie) 252{ 253 assert(num == 1); 254 struct tx2_eth_data *dev = (struct tx2_eth_data *)driver->eth_data; 255 int err; 256 /* Ensure we have room */ 257 if ((dev->tx_size - dev->tx_remain) > 32) { 258 /* try and complete some */ 259 complete_tx(driver); 260 if (dev->tx_remain < num) { 261 ZF_LOGE("Raw TX failed"); 262 return ETHIF_TX_FAILED; 263 } 264 } 265 __sync_synchronize(); 266 267 uint32_t i; 268 for (i = 0; i < num; i++) { 269 dev->tx_cookies[dev->tdt] = cookie; 270 dev->tx_lengths[dev->tdt] = num; 271 err = eqos_send(dev, (void *)phys[i], len[i]); 272 if (err == -ETIMEDOUT) { 273 ZF_LOGF("send timed out"); 274 } 275 dev->tdt = (dev->tdt + 1) % dev->tx_size; 276 } 277 278 dev->tx_remain -= num; 279 280 return ETHIF_TX_ENQUEUED; 281} 282 283static void raw_poll(struct eth_driver *driver) 284{ 285 complete_rx(driver); 286 complete_tx(driver); 287 fill_rx_bufs(driver); 288} 289 290static void get_mac(struct eth_driver *driver, uint8_t *mac) 291{ 292 memcpy(mac, TX2_DEFAULT_MAC, 6); 293} 294 295static struct raw_iface_funcs iface_fns = { 296 .raw_handleIRQ = handle_irq, 297 .print_state = print_state, 298 .low_level_init = low_level_init, 299 .raw_tx = raw_tx, 300 .raw_poll = raw_poll, 301 .get_mac = get_mac 302}; 303 304int ethif_tx2_init(struct eth_driver *eth_driver, ps_io_ops_t io_ops, void *config) 305{ 306 int err; 307 struct arm_eth_plat_config *plat_config = (struct arm_eth_plat_config *)config; 308 struct tx2_eth_data *eth_data = NULL; 309 void *eth_dev; 310 311 if (config == NULL) { 312 LOG_ERROR("Cannot get platform info; Passed in Config Pointer NULL"); 313 goto error; 314 } 315 316 eth_data = (struct tx2_eth_data *)malloc(sizeof(struct tx2_eth_data)); 317 if (eth_data == NULL) { 318 LOG_ERROR("Failed to allocate eth data struct"); 319 goto error; 320 } 321 322 uintptr_t base_addr = (uintptr_t)plat_config->buffer_addr; 323 324 eth_data->tx_size = EQOS_DESCRIPTORS_TX; 325 eth_data->rx_size = EQOS_DESCRIPTORS_RX; 326 eth_driver->dma_alignment = ARCH_DMA_MINALIGN; 327 eth_driver->eth_data = eth_data; 328 eth_driver->i_fn = iface_fns; 329 330 /* Initialize Descriptors */ 331 err = initialize_desc_ring(eth_data, &io_ops.dma_manager, eth_driver); 332 if (err) { 333 LOG_ERROR("Failed to allocate descriptor rings"); 334 goto error; 335 } 336 337 eth_dev = (struct eth_device *)tx2_initialise(base_addr, &io_ops); 338 if (NULL == eth_dev) { 339 LOG_ERROR("Failed to initialize tx2 Ethernet Device"); 340 goto error; 341 } 342 eth_data->eth_dev = eth_dev; 343 344 fill_rx_bufs(eth_driver); 345 346 err = eqos_start(eth_data); 347 if (err) { 348 goto error; 349 } 350 return 0; 351error: 352 if (eth_data != NULL) { 353 free(eth_data); 354 } 355 free_desc_ring(eth_data, &io_ops.dma_manager); 356 return -1; 357} 358 359static void eth_irq_handle(void *data, ps_irq_acknowledge_fn_t acknowledge_fn, void *ack_data) 360{ 361 362 struct eth_driver *eth = data; 363 364 handle_irq(eth, 0); 365 366 int error = acknowledge_fn(ack_data); 367 if (error) { 368 LOG_ERROR("Failed to acknowledge IRQ"); 369 } 370 371} 372 373typedef struct { 374 void *addr; 375 ps_io_ops_t *io_ops; 376 struct eth_driver *eth_driver; 377} callback_args_t; 378 379static int allocate_register_callback(pmem_region_t pmem, unsigned curr_num, size_t num_regs, void *token) 380{ 381 if (token == NULL) { 382 return -EINVAL; 383 } 384 385 callback_args_t *args = token; 386 if (curr_num == 0) { 387 args->addr = ps_pmem_map(args->io_ops, pmem, false, PS_MEM_NORMAL); 388 if (!args->addr) { 389 ZF_LOGE("Failed to map the Eth device"); 390 return -EIO; 391 } 392 393 } 394 return 0; 395} 396 397static int allocate_irq_callback(ps_irq_t irq, unsigned curr_num, size_t num_irqs, void *token) 398{ 399 if (token == NULL) { 400 return -EINVAL; 401 } 402 callback_args_t *args = token; 403 /* Skip all interrupts except the first */ 404 if (curr_num != 0) { 405 return 0; 406 } 407 408 int res = ps_irq_register(&args->io_ops->irq_ops, irq, eth_irq_handle, args->eth_driver); 409 if (res < 0) { 410 return -EIO; 411 } 412 413 return 0; 414} 415 416 417int ethif_tx2_init_module(ps_io_ops_t *io_ops, const char *device_path) 418{ 419 420 struct arm_eth_plat_config plat_config; 421 struct eth_driver *eth_driver; 422 int error = ps_calloc(&io_ops->malloc_ops, 1, sizeof(*eth_driver), (void **)ð_driver); 423 if (error) { 424 ZF_LOGE("Failed to allocate struct for eth_driver"); 425 return -1; 426 } 427 428 ps_fdt_cookie_t *cookie = NULL; 429 callback_args_t args = {.io_ops = io_ops, .eth_driver = eth_driver}; 430 /* read the ethernet's path in the DTB */ 431 error = ps_fdt_read_path(&io_ops->io_fdt, &io_ops->malloc_ops, device_path, &cookie); 432 if (error) { 433 return -ENODEV; 434 } 435 436 437 /* walk the registers and allocate them */ 438 error = ps_fdt_walk_registers(&io_ops->io_fdt, cookie, allocate_register_callback, &args); 439 if (error) { 440 return -ENODEV; 441 } 442 if (args.addr == NULL) { 443 return -ENODEV; 444 } 445 446 /* walk the interrupts and allocate the first */ 447 error = ps_fdt_walk_irqs(&io_ops->io_fdt, cookie, allocate_irq_callback, &args); 448 if (error) { 449 return -ENODEV; 450 } 451 452 error = ps_fdt_cleanup_cookie(&io_ops->malloc_ops, cookie); 453 if (error) { 454 return -ENODEV; 455 } 456 plat_config.buffer_addr = args.addr; 457 plat_config.prom_mode = 1; 458 459 460 error = ethif_tx2_init(eth_driver, *io_ops, &plat_config); 461 if (error) { 462 return -ENODEV; 463 } 464 465 return ps_interface_register(&io_ops->interface_registration_ops, PS_ETHERNET_INTERFACE, eth_driver, NULL); 466 467} 468 469static const char *compatible_strings[] = { 470 "nvidia,eqos", 471 NULL 472}; 473 474PS_DRIVER_MODULE_DEFINE(tx2_ether_qos, compatible_strings, ethif_tx2_init_module); 475