Lines Matching defs:sc

15 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
508 * given sc.
519 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
521 u32 gc = group_context(sc->hw_context, sc->group);
522 u32 index = sc->hw_context & 0x7;
524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
535 struct send_context *sc;
537 sc = container_of(work, struct send_context, halt_work);
538 sc_restart(sc);
551 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
561 if (sc->credits <= release_credits)
564 threshold = sc->credits - release_credits;
576 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
578 return (sc->credits * percent) / 100;
584 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
590 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
592 old_threshold = (sc->credit_ctrl >>
597 sc->credit_ctrl =
598 (sc->credit_ctrl
603 write_kctxt_csr(sc->dd, sc->hw_context,
604 SC(CREDIT_CTRL), sc->credit_ctrl);
610 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
613 sc_return_credits(sc);
619 * Set the CHECK_ENABLE register for the send context 'sc'.
621 void set_pio_integrity(struct send_context *sc)
623 struct hfi1_devdata *dd = sc->dd;
624 u32 hw_context = sc->hw_context;
625 int type = sc->type;
632 static u32 get_buffers_allocated(struct send_context *sc)
638 ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
642 static void reset_buffers_allocated(struct send_context *sc)
647 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
658 struct send_context *sc = NULL;
672 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
673 if (!sc)
676 sc->buffers_allocated = alloc_percpu(u32);
677 if (!sc->buffers_allocated) {
678 kfree(sc);
689 free_percpu(sc->buffers_allocated);
690 kfree(sc);
695 sci->sc = sc;
697 sc->dd = dd;
698 sc->node = numa;
699 sc->type = type;
700 spin_lock_init(&sc->alloc_lock);
701 spin_lock_init(&sc->release_lock);
702 spin_lock_init(&sc->credit_ctrl_lock);
703 seqlock_init(&sc->waitlock);
704 INIT_LIST_HEAD(&sc->piowait);
705 INIT_WORK(&sc->halt_work, sc_halted);
706 init_waitqueue_head(&sc->halt_wait);
709 sc->group = 0;
711 sc->sw_index = sw_index;
712 sc->hw_context = hw_context;
713 cr_group_addresses(sc, &dma);
714 sc->credits = sci->credits;
715 sc->size = sc->credits * PIO_BLOCK_SIZE;
720 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
730 set_pio_integrity(sc);
771 thresh = sc_percent_to_threshold(sc, 50);
773 thresh = sc_percent_to_threshold(sc,
776 thresh = min(sc_percent_to_threshold(sc, 50),
777 sc_mtu_to_threshold(sc, hfi1_max_mtu,
788 sc->credit_ctrl = reg;
812 sc->sr_size = sci->credits + 1;
813 sc->sr = kcalloc_node(sc->sr_size,
816 if (!sc->sr) {
817 sc_free(sc);
827 sc->group,
828 sc->credits,
829 sc->credit_ctrl,
832 return sc;
836 void sc_free(struct send_context *sc)
843 if (!sc)
846 sc->flags |= SCF_IN_FREE; /* ensure no restarts */
847 dd = sc->dd;
848 if (!list_empty(&sc->piowait))
850 sw_index = sc->sw_index;
851 hw_context = sc->hw_context;
852 sc_disable(sc); /* make sure the HW is disabled */
853 flush_work(&sc->halt_work);
856 dd->send_contexts[sw_index].sc = NULL;
871 kfree(sc->sr);
872 free_percpu(sc->buffers_allocated);
873 kfree(sc);
877 void sc_disable(struct send_context *sc)
883 if (!sc)
887 spin_lock_irq(&sc->alloc_lock);
888 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
890 sc->flags &= ~SCF_ENABLED;
891 sc_wait_for_packet_egress(sc, 1);
892 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
902 spin_lock(&sc->release_lock);
903 if (sc->sr) { /* this context has a shadow ring */
904 while (sc->sr_tail != sc->sr_head) {
905 pbuf = &sc->sr[sc->sr_tail].pbuf;
908 sc->sr_tail++;
909 if (sc->sr_tail >= sc->sr_size)
910 sc->sr_tail = 0;
913 spin_unlock(&sc->release_lock);
915 write_seqlock(&sc->waitlock);
916 list_splice_init(&sc->piowait, &wake_list);
917 write_sequnlock(&sc->waitlock);
931 spin_unlock_irq(&sc->alloc_lock);
957 * @sc: valid send context
969 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
971 struct hfi1_devdata *dd = sc->dd;
978 reg = read_csr(dd, sc->hw_context * 8 +
981 if (sc->flags & SCF_HALTED ||
982 is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
994 __func__, sc->sw_index,
995 sc->hw_context, (u32)reg);
1014 struct send_context *sc = dd->send_contexts[i].sc;
1016 if (!sc)
1018 sc_wait_for_packet_egress(sc, 0);
1031 int sc_restart(struct send_context *sc)
1033 struct hfi1_devdata *dd = sc->dd;
1039 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1042 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1043 sc->hw_context);
1053 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1058 __func__, sc->sw_index, sc->hw_context);
1075 if (sc->type != SC_USER) {
1079 count = get_buffers_allocated(sc);
1085 __func__, sc->sw_index,
1086 sc->hw_context, count);
1102 sc_disable(sc);
1110 return sc_enable(sc);
1120 struct send_context *sc;
1124 sc = dd->send_contexts[i].sc;
1130 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1134 sc_disable(sc);
1147 struct send_context *sc;
1151 sc = dd->send_contexts[i].sc;
1152 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1154 if (sc->flags & SCF_LINK_DOWN)
1157 sc_enable(sc); /* will clear the sc frozen flag */
1175 struct send_context *sc;
1179 sc = dd->send_contexts[i].sc;
1180 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1183 sc_enable(sc); /* will clear the sc link down flag */
1243 int sc_enable(struct send_context *sc)
1250 if (!sc)
1252 dd = sc->dd;
1261 spin_lock_irqsave(&sc->alloc_lock, flags);
1262 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1268 *sc->hw_free = 0;
1269 sc->free = 0;
1270 sc->alloc_free = 0;
1271 sc->fill = 0;
1272 sc->fill_wrap = 0;
1273 sc->sr_head = 0;
1274 sc->sr_tail = 0;
1275 sc->flags = 0;
1277 reset_buffers_allocated(sc);
1285 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1287 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1301 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1315 sc->sw_index, sc->hw_context, ret);
1323 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1328 read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1329 sc->flags |= SCF_ENABLED;
1332 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1338 void sc_return_credits(struct send_context *sc)
1340 if (!sc)
1344 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1350 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1352 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1356 void sc_flush(struct send_context *sc)
1358 if (!sc)
1361 sc_wait_for_packet_egress(sc, 1);
1365 void sc_drop(struct send_context *sc)
1367 if (!sc)
1370 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1371 __func__, sc->sw_index, sc->hw_context);
1382 void sc_stop(struct send_context *sc, int flag)
1387 spin_lock_irqsave(&sc->alloc_lock, flags);
1389 sc->flags |= flag;
1390 sc->flags &= ~SCF_ENABLED;
1391 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1392 wake_up(&sc->halt_wait);
1401 * @sc: the PIO send context we are allocating from
1409 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1420 spin_lock_irqsave(&sc->alloc_lock, flags);
1421 if (!(sc->flags & SCF_ENABLED)) {
1422 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1427 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1431 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1435 sc->alloc_free = READ_ONCE(sc->free);
1437 (unsigned long)sc->credits -
1438 (sc->fill - sc->alloc_free);
1441 sc_release_update(sc);
1442 sc->alloc_free = READ_ONCE(sc->free);
1451 this_cpu_inc(*sc->buffers_allocated);
1454 head = sc->sr_head;
1457 sc->fill += blocks;
1458 fill_wrap = sc->fill_wrap;
1459 sc->fill_wrap += blocks;
1460 if (sc->fill_wrap >= sc->credits)
1461 sc->fill_wrap = sc->fill_wrap - sc->credits;
1470 pbuf = &sc->sr[head].pbuf;
1471 pbuf->sent_at = sc->fill;
1474 pbuf->sc = sc; /* could be filled in at sc->sr init time */
1479 if (next >= sc->sr_size)
1486 sc->sr_head = next;
1487 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1490 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1491 pbuf->end = sc->base_addr + sc->size;
1510 void sc_add_credit_return_intr(struct send_context *sc)
1515 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1516 if (sc->credit_intr_count == 0) {
1517 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1518 write_kctxt_csr(sc->dd, sc->hw_context,
1519 SC(CREDIT_CTRL), sc->credit_ctrl);
1521 sc->credit_intr_count++;
1522 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1529 void sc_del_credit_return_intr(struct send_context *sc)
1533 WARN_ON(sc->credit_intr_count == 0);
1536 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1537 sc->credit_intr_count--;
1538 if (sc->credit_intr_count == 0) {
1539 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1540 write_kctxt_csr(sc->dd, sc->hw_context,
1541 SC(CREDIT_CTRL), sc->credit_ctrl);
1543 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1550 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1553 sc_add_credit_return_intr(sc);
1555 sc_del_credit_return_intr(sc);
1556 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1558 sc_return_credits(sc);
1563 * @sc: the send context
1569 static void sc_piobufavail(struct send_context *sc)
1571 struct hfi1_devdata *dd = sc->dd;
1579 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1580 dd->send_contexts[sc->sw_index].type != SC_VL15)
1582 list = &sc->piowait;
1589 write_seqlock_irqsave(&sc->waitlock, flags);
1616 hfi1_sc_wantpiobuf_intr(sc, 0);
1618 hfi1_sc_wantpiobuf_intr(sc, 1);
1620 write_sequnlock_irqrestore(&sc->waitlock, flags);
1656 void sc_release_update(struct send_context *sc)
1667 if (!sc)
1670 spin_lock_irqsave(&sc->release_lock, flags);
1672 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
1673 old_free = sc->free;
1678 trace_hfi1_piofree(sc, extra);
1682 head = READ_ONCE(sc->sr_head); /* snapshot the head */
1683 tail = sc->sr_tail;
1685 pbuf = &sc->sr[tail].pbuf;
1698 if (tail >= sc->sr_size)
1701 sc->sr_tail = tail;
1704 sc->free = free;
1705 spin_unlock_irqrestore(&sc->release_lock, flags);
1706 sc_piobufavail(sc);
1720 struct send_context *sc;
1731 sc = dd->send_contexts[sw_index].sc;
1732 if (unlikely(!sc))
1735 gc = group_context(hw_context, sc->group);
1736 gc_end = gc + group_size(sc->group);
1745 sc_release_update(dd->send_contexts[sw_index].sc);
1781 return dd->vld[0].sc;
1788 rval = !rval ? dd->vld[0].sc : rval;
1796 * @sc5: the 5 bit sc
1798 * This function returns an send context based on the selector and an sc
1979 dd->vld[15].sc = sc_alloc(dd, SC_VL15,
1981 if (!dd->vld[15].sc)
1984 hfi1_init_ctxt(dd->vld[15].sc);
1993 dd->kernel_send_context[0] = dd->vld[15].sc;
2003 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
2005 if (!dd->vld[i].sc)
2007 dd->kernel_send_context[i + 1] = dd->vld[i].sc;
2008 hfi1_init_ctxt(dd->vld[i].sc);
2020 sc_enable(dd->vld[15].sc);
2021 ctxt = dd->vld[15].sc->hw_context;
2026 dd->vld[15].sc->sw_index, ctxt);
2029 sc_enable(dd->vld[i].sc);
2030 ctxt = dd->vld[i].sc->hw_context;
2047 sc_free(dd->vld[i].sc);
2048 dd->vld[i].sc = NULL;
2058 sc_free(dd->vld[15].sc);
2125 struct send_context *sc = sci->sc;
2131 sc->flags, sc->sw_index, sc->hw_context, sc->group);
2133 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
2135 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
2137 sc->credit_intr_count, sc->credit_ctrl);
2138 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
2140 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>