Lines Matching refs:dt

122 static void dma_test_free_rings(struct dma_test *dt)
124 if (dt->rx_ring) {
125 tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
126 tb_ring_free(dt->rx_ring);
127 dt->rx_ring = NULL;
129 if (dt->tx_ring) {
130 tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
131 tb_ring_free(dt->tx_ring);
132 dt->tx_ring = NULL;
136 static int dma_test_start_rings(struct dma_test *dt)
139 struct tb_xdomain *xd = dt->xd;
148 if (dt->packets_to_send && dt->packets_to_receive)
151 if (dt->packets_to_send) {
157 dt->tx_ring = ring;
162 dma_test_free_rings(dt);
166 dt->tx_hopid = ret;
169 if (dt->packets_to_receive) {
179 dma_test_free_rings(dt);
183 dt->rx_ring = ring;
187 dma_test_free_rings(dt);
191 dt->rx_hopid = ret;
194 ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
195 dt->tx_ring ? dt->tx_ring->hop : -1,
196 dt->rx_hopid,
197 dt->rx_ring ? dt->rx_ring->hop : -1);
199 dma_test_free_rings(dt);
203 if (dt->tx_ring)
204 tb_ring_start(dt->tx_ring);
205 if (dt->rx_ring)
206 tb_ring_start(dt->rx_ring);
211 static void dma_test_stop_rings(struct dma_test *dt)
215 if (dt->rx_ring)
216 tb_ring_stop(dt->rx_ring);
217 if (dt->tx_ring)
218 tb_ring_stop(dt->tx_ring);
220 ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
221 dt->tx_ring ? dt->tx_ring->hop : -1,
222 dt->rx_hopid,
223 dt->rx_ring ? dt->rx_ring->hop : -1);
225 dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
227 dma_test_free_rings(dt);
234 struct dma_test *dt = tf->dma_test;
235 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
246 dt->packets_received++;
247 dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
248 dt->packets_to_receive);
251 dt->crc_errors++;
253 dt->buffer_overflow_errors++;
257 if (dt->packets_received == dt->packets_to_receive)
258 complete(&dt->complete);
261 static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
263 struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
290 tf->dma_test = dt;
293 tb_ring_rx(dt->rx_ring, &tf->frame);
303 struct dma_test *dt = tf->dma_test;
304 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
312 static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
314 struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
326 tf->dma_test = dt;
348 dt->packets_sent++;
349 dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
350 dt->packets_to_send);
352 tb_ring_tx(dt->tx_ring, &tf->frame);
362 struct dma_test *dt = tb_service_get_drvdata(svc); \
365 ret = mutex_lock_interruptible(&dt->lock); \
368 __get(dt, val); \
369 mutex_unlock(&dt->lock); \
375 struct dma_test *dt = tb_service_get_drvdata(svc); \
381 ret = mutex_lock_interruptible(&dt->lock); \
384 __set(dt, val); \
385 mutex_unlock(&dt->lock); \
391 static void lanes_get(const struct dma_test *dt, u64 *val)
393 *val = dt->link_width;
401 static void lanes_set(struct dma_test *dt, u64 val)
403 dt->link_width = val;
407 static void speed_get(const struct dma_test *dt, u64 *val)
409 *val = dt->link_speed;
425 static void speed_set(struct dma_test *dt, u64 val)
427 dt->link_speed = val;
431 static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
433 *val = dt->packets_to_receive;
441 static void packets_to_receive_set(struct dma_test *dt, u64 val)
443 dt->packets_to_receive = val;
448 static void packets_to_send_get(const struct dma_test *dt, u64 *val)
450 *val = dt->packets_to_send;
458 static void packets_to_send_set(struct dma_test *dt, u64 val)
460 dt->packets_to_send = val;
465 static int dma_test_set_bonding(struct dma_test *dt)
467 switch (dt->link_width) {
469 return tb_xdomain_lane_bonding_enable(dt->xd);
471 tb_xdomain_lane_bonding_disable(dt->xd);
478 static bool dma_test_validate_config(struct dma_test *dt)
480 if (!dt->packets_to_send && !dt->packets_to_receive)
482 if (dt->packets_to_send && dt->packets_to_receive &&
483 dt->packets_to_send != dt->packets_to_receive)
488 static void dma_test_check_errors(struct dma_test *dt, int ret)
490 if (!dt->error_code) {
491 if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
492 dt->error_code = DMA_TEST_SPEED_ERROR;
493 } else if (dt->link_width && dt->link_width != dt->xd->link_width) {
494 dt->error_code = DMA_TEST_WIDTH_ERROR;
495 } else if (dt->packets_to_send != dt->packets_sent ||
496 dt->packets_to_receive != dt->packets_received ||
497 dt->crc_errors || dt->buffer_overflow_errors) {
498 dt->error_code = DMA_TEST_PACKET_ERROR;
504 dt->result = DMA_TEST_FAIL;
510 struct dma_test *dt = tb_service_get_drvdata(svc);
516 ret = mutex_lock_interruptible(&dt->lock);
520 dt->packets_sent = 0;
521 dt->packets_received = 0;
522 dt->crc_errors = 0;
523 dt->buffer_overflow_errors = 0;
524 dt->result = DMA_TEST_SUCCESS;
525 dt->error_code = DMA_TEST_NO_ERROR;
528 if (dt->link_speed)
529 dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
530 if (dt->link_width)
531 dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
532 dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
533 dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
535 if (!dma_test_validate_config(dt)) {
537 dt->error_code = DMA_TEST_CONFIG_ERROR;
541 ret = dma_test_set_bonding(dt);
544 dt->error_code = DMA_TEST_BONDING_ERROR;
548 ret = dma_test_start_rings(dt);
551 dt->error_code = DMA_TEST_DMA_ERROR;
555 if (dt->packets_to_receive) {
556 reinit_completion(&dt->complete);
557 ret = dma_test_submit_rx(dt, dt->packets_to_receive);
560 dt->error_code = DMA_TEST_BUFFER_ERROR;
565 if (dt->packets_to_send) {
566 ret = dma_test_submit_tx(dt, dt->packets_to_send);
569 dt->error_code = DMA_TEST_BUFFER_ERROR;
574 if (dt->packets_to_receive) {
575 ret = wait_for_completion_interruptible(&dt->complete);
577 dt->error_code = DMA_TEST_INTERRUPTED;
583 dma_test_stop_rings(dt);
585 dma_test_check_errors(dt, ret);
586 mutex_unlock(&dt->lock);
588 dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
596 struct dma_test *dt = tb_service_get_drvdata(svc);
599 ret = mutex_lock_interruptible(&dt->lock);
603 seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
604 if (dt->result == DMA_TEST_NOT_RUN)
607 seq_printf(s, "packets received: %u\n", dt->packets_received);
608 seq_printf(s, "packets sent: %u\n", dt->packets_sent);
609 seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
611 dt->buffer_overflow_errors);
612 seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
615 mutex_unlock(&dt->lock);
622 struct dma_test *dt = tb_service_get_drvdata(svc);
624 dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
626 debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
627 debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
628 debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
630 debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
632 debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
633 debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
639 struct dma_test *dt;
641 dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
642 if (!dt)
645 dt->svc = svc;
646 dt->xd = xd;
647 mutex_init(&dt->lock);
648 init_completion(&dt->complete);
650 tb_service_set_drvdata(svc, dt);
658 struct dma_test *dt = tb_service_get_drvdata(svc);
660 mutex_lock(&dt->lock);
661 debugfs_remove_recursive(dt->debugfs_dir);
662 mutex_unlock(&dt->lock);