Lines Matching refs:peer

64  * Eg: start performance test with peer (index 0) and get the test metrics
204 int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
249 static inline bool perf_link_is_up(struct perf_peer *peer)
253 link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
254 return !!(link & BIT_ULL_MASK(peer->pidx));
257 static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
260 struct perf_ctx *perf = peer->perf;
274 if (!perf_link_is_up(peer))
277 sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
284 ntb_peer_spad_write(perf->ntb, peer->pidx,
287 ntb_peer_spad_write(perf->ntb, peer->pidx,
290 ntb_peer_spad_write(perf->ntb, peer->pidx,
293 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
295 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
296 PERF_SPAD_NOTIFY(peer->gidx));
307 struct perf_peer *peer;
314 * by any peer. Yes, it makes peer with smaller index being
319 peer = &perf->peers[*pidx];
321 if (!perf_link_is_up(peer))
324 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
330 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
333 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
337 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
348 static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
351 struct perf_ctx *perf = peer->perf;
366 if (!perf_link_is_up(peer))
373 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
381 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
384 /* This call shall trigger peer message event */
385 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
421 static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
423 struct perf_ctx *perf = peer->perf;
426 return perf->cmd_send(peer, cmd, data);
432 static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
442 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
447 set_bit(cmd, &peer->sts);
449 dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
451 (void)queue_work(system_highpri_wq, &peer->service);
458 struct perf_peer *peer;
463 peer = &perf->peers[pidx];
467 peer->inbuf_size = data;
468 return perf_cmd_exec(peer, PERF_CMD_RSIZE);
470 peer->outbuf_xlat = data;
471 return perf_cmd_exec(peer, PERF_CMD_RXLAT);
485 struct perf_peer *peer;
490 peer = &perf->peers[pidx];
492 lnk_up = perf_link_is_up(peer);
495 !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
496 perf_cmd_exec(peer, PERF_CMD_SSIZE);
498 test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
499 perf_cmd_exec(peer, PERF_CMD_CLEAR);
532 static void perf_free_outbuf(struct perf_peer *peer)
534 (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
537 static int perf_setup_outbuf(struct perf_peer *peer)
539 struct perf_ctx *perf = peer->perf;
543 ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
544 peer->outbuf_xlat, peer->outbuf_size);
551 set_bit(PERF_STS_DONE, &peer->sts);
552 complete_all(&peer->init_comp);
557 static void perf_free_inbuf(struct perf_peer *peer)
559 if (!peer->inbuf)
562 (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
563 dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
564 peer->inbuf, peer->inbuf_xlat);
565 peer->inbuf = NULL;
568 static int perf_setup_inbuf(struct perf_peer *peer)
571 struct perf_ctx *perf = peer->perf;
575 ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
582 if (peer->inbuf_size > size_max) {
584 &peer->inbuf_size, &size_max);
588 peer->inbuf_size = round_up(peer->inbuf_size, size_align);
590 perf_free_inbuf(peer);
592 peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
593 peer->inbuf_size, &peer->inbuf_xlat,
595 if (!peer->inbuf) {
597 &peer->inbuf_size);
600 if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
606 ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
607 peer->inbuf_xlat, peer->inbuf_size);
618 (void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
623 perf_free_inbuf(peer);
630 struct perf_peer *peer = to_peer_service(work);
632 if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
633 perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
635 if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
636 perf_setup_inbuf(peer);
638 if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
639 perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
641 if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
642 perf_setup_outbuf(peer);
644 if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
645 init_completion(&peer->init_comp);
646 clear_bit(PERF_STS_DONE, &peer->sts);
647 if (test_bit(0, &peer->perf->busy_flag) &&
648 peer == peer->perf->test_peer) {
649 dev_warn(&peer->perf->ntb->dev,
651 perf_terminate_test(peer->perf);
653 perf_free_outbuf(peer);
654 perf_free_inbuf(peer);
762 struct perf_peer *peer = &perf->peers[pidx];
764 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
792 struct perf_peer *peer = pthr->perf->test_peer;
808 vbase = peer->outbuf;
810 dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
875 struct perf_peer *peer = pthr->perf->test_peer;
895 peer->dma_dst_addr =
897 peer->out_phys_addr, peer->outbuf_size,
900 peer->dma_dst_addr)) {
903 peer->dma_dst_addr = 0;
909 &peer->out_phys_addr,
910 &peer->dma_dst_addr);
924 struct perf_peer *peer = pthr->perf->test_peer;
933 chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
936 bnd_dst = peer->outbuf + peer->outbuf_size;
937 flt_dst = peer->outbuf;
954 if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
955 flt_dst = peer->outbuf;
1079 static int perf_submit_test(struct perf_peer *peer)
1081 struct perf_ctx *perf = peer->perf;
1085 ret = wait_for_completion_interruptible(&peer->init_comp);
1092 perf->test_peer = peer;
1186 struct perf_peer *peer;
1215 peer = &perf->peers[pidx];
1219 ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
1220 peer->gidx);
1224 test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
1227 "\tOut buffer addr 0x%pK\n", peer->outbuf);
1230 "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
1233 "\tOut buffer size %pa\n", &peer->outbuf_size);
1236 "\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
1238 if (!peer->inbuf) {
1245 "\tIn buffer addr 0x%pK\n", peer->inbuf);
1248 "\tIn buffer size %pa\n", &peer->inbuf_size);
1251 "\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
1291 struct perf_peer *peer;
1301 peer = &perf->peers[pidx];
1303 ret = perf_submit_test(peer);
1409 static int perf_setup_peer_mw(struct perf_peer *peer)
1411 struct perf_ctx *perf = peer->perf;
1417 &peer->outbuf_size);
1421 peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
1422 peer->outbuf_size);
1423 if (!peer->outbuf)
1426 peer->out_phys_addr = phys_addr;
1428 if (max_mw_size && peer->outbuf_size > max_mw_size) {
1429 peer->outbuf_size = max_mw_size;
1430 dev_warn(&peer->perf->ntb->dev,
1431 "Peer %d outbuf reduced to %pa\n", peer->pidx,
1432 &peer->outbuf_size);
1440 struct perf_peer *peer;
1446 peer = &perf->peers[pidx];
1448 peer->perf = perf;
1449 peer->pidx = pidx;
1453 peer->gidx = pidx + 1;
1455 peer->gidx = pidx;
1457 INIT_WORK(&peer->service, perf_service_work);
1458 init_completion(&peer->init_comp);