Lines Matching refs:bu

52     struct bulk_e10k       *bu;
58 struct bulk_e10k *bu;
80 static void idc_request_device_info(struct bulk_e10k *bu);
81 static void idc_register_queue_memory(struct bulk_e10k *bu);
223 struct bulk_e10k *bu = st;
228 b->st = bu;
229 bu->binding = b;
231 idc_request_device_info(bu);
236 static void idc_request_device_info(struct bulk_e10k *bu)
242 err = e10k_request_device_info__tx(bu->binding, NOP_CONT);
248 static void idc_register_queue_memory(struct bulk_e10k *bu)
253 /* r = e10k_register_queue_memory__tx(bu->binding, NOP_CONT, bu->qi, */
254 /* bu->txframe, bu->txhwbframe, bu->rxframe, bu->buffer_size, E10K_HDRSZ, */
255 /* bu->int_vector, bu->int_core, USE_INTERRUPTS, false); */
266 struct bulk_e10k *bu = b->st;
275 bu->mac = macaddr;
284 e10k_initialize(&bu->d, virt);
287 err = allocmap_frame(bu->ring_size * E10K_DESCSZ, &rx, NULL, &bu->rxframe);
289 err = allocmap_frame(bu->ring_size * E10K_DESCSZ, &tx, NULL, &bu->txframe);
291 err = allocmap_frame(0x1000, &txhwb, NULL, &bu->txhwbframe);
294 bu->q = e10k_queue_init(tx, bu->ring_size, txhwb, rx, bu->ring_size,
295 &ops, bu);
299 err = pci_setup_inthandler(interrupt_handler, bu, &bu->int_vector);
301 bu->int_core = disp_get_core_id();
309 idc_register_queue_memory(bu);
315 struct bulk_e10k *bu = b->st;
318 bu->ready = true;
327 struct bulk_e10k *bu = b->st;
329 e10k_queue_bump_rxtail(bu->q);
330 e10k_queue_bump_txtail(bu->q);
341 rxe->bu->received(rxe->bu, &rxe->msg);
342 stack_alloc_free(&rxe->bu->rx_event_alloc, rxe);
346 static bool recv_one(struct bulk_e10k *bu)
357 res = e10k_queue_get_rxbuf(bu->q, &op, &hdrlen, &len, &last, &flags);
360 rxe = stack_alloc_alloc(&bu->rx_event_alloc);
393 event_queue_add(&bu->event_queue, &rxe->eqn,
404 txe->bu->transmitted(txe->bu, txe->op);
405 stack_alloc_free(&txe->bu->tx_event_alloc, txe);
410 static bool check_tx(struct bulk_e10k *bu)
417 if (e10k_tdt_rd(&bu->d, bu->qi) != e10k_tdh_rd(&bu->d, bu->qi)) {
418 DEBUG("Nonempty: %"PRIx32" %"PRIx32"\n", e10k_tdt_rd(&bu->d,
419 bu->qi), e10k_tdh_rd(&bu->d, bu->qi));
422 if (e10k_queue_get_txbuf(bu->q, &op) == 0) {
424 txe = stack_alloc_alloc(&bu->tx_event_alloc);
430 event_queue_add(&bu->event_queue, &txe->eqn,
442 struct bulk_e10k *bu = arg;
444 while (recv_one(bu));
445 while (check_tx(bu));
458 struct bulk_e10k *bu = arg;
463 cur = recv_one(bu);
467 cur = check_tx(bu);
472 waitset_chan_register_polled(bu->waitset, &bu->wscs,
473 MKCLOSURE(ws_event, bu));
479 struct bulk_e10k *bu = wscs_to_e10k(chan);
481 if (e10k_queue_get_txpoll(bu->q) != 0 &&
482 e10k_queue_rxpoll(bu->q) != 0)
495 struct bulk_e10k *bu = arg;
499 found = check_tx(bu);
500 found = recv_one(bu) || found;
514 struct bulk_e10k *bu = opaque;
515 e10k_tdt_wr(&bu->d, bu->qi, tail);
522 struct bulk_e10k *bu = opaque;
523 e10k_rdt_1_wr(&bu->d, bu->qi, tail);
534 * @param bu Channel struct
543 errval_t bulk_e10k_init(struct bulk_e10k *bu,
561 bu->qi = queue;
562 bu->ready = false;
563 bu->received = received;
564 bu->transmitted = transmitted;
565 bu->buffer_size = buffer_size;
566 bu->ring_size = ring_size;
567 bu->waitset = ws;
570 stack_alloc_init(&bu->rx_event_alloc, ring_size);
571 stack_alloc_init(&bu->tx_event_alloc, ring_size);
575 rxe[i].bu = bu;
576 txe[i].bu = bu;
577 stack_alloc_free(&bu->rx_event_alloc, rxe + i);
578 stack_alloc_free(&bu->tx_event_alloc, txe + i);
592 err = e10k_bind(iref, bind_cb, bu, ws, IDC_BIND_FLAGS_DEFAULT);
595 while (!bu->ready) {
601 event_queue_init(&bu->event_queue, ws, EVENT_QUEUE_CONTINUOUS);
605 waitset_chanstate_init(&bu->wscs, CHANTYPE_BULK_E10K);
606 waitset_chan_register_polled(ws, &bu->wscs,
607 MKCLOSURE(ws_event, bu));
609 thread_create(recv_thread, bu);
619 * @param bu Channel struct
625 errval_t bulk_e10k_rx_add(struct bulk_e10k *bu, uint64_t phys, uint64_t header,
629 bu, phys, header, opaque);
630 int r = e10k_queue_add_rxbuf(bu->q, phys, header, opaque);
632 e10k_queue_bump_rxtail(bu->q);
639 * @param bu Channel struct
642 errval_t bulk_e10k_send(struct bulk_e10k *bu, struct bulk_net_msgdesc *desc)
656 e10k_queue_add_txcontext(bu->q, 0, ETHHDR_LEN, IPHDR_LEN, 0, 0);
657 e10k_queue_add_txbuf_ctx(bu->q, desc->parts[0].phys,
662 e10k_queue_add_txbuf(bu->q, desc->parts[i].phys,
666 e10k_queue_bump_txtail(bu->q);
674 * @param bu Channel struct
677 errval_t bulk_e10k_port_add(struct bulk_e10k *bu, uint16_t port)
682 err = port_bind(0, 0, bu->qi, port);
692 * @param bu Channel struct
696 errval_t bulk_e10k_port_alloc(struct bulk_e10k *bu, uint16_t *port)
698 return port_get(0, 0, bu->qi, port);
704 * @param bu Channel struct
707 errval_t bulk_e10k_ip_info(struct bulk_e10k *bu, uint32_t *ip)
719 * @param bu Channnel struct
723 errval_t bulk_e10k_arp_lookup(struct bulk_e10k *bu, uint32_t ip, uint64_t *mac)