Lines Matching refs:sc
15 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
519 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) in cr_group_addresses() argument
521 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses()
522 u32 index = sc->hw_context & 0x7; in cr_group_addresses()
524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
535 struct send_context *sc; in sc_halted() local
537 sc = container_of(work, struct send_context, halt_work); in sc_halted()
538 sc_restart(sc); in sc_halted()
551 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) in sc_mtu_to_threshold() argument
561 if (sc->credits <= release_credits) in sc_mtu_to_threshold()
564 threshold = sc->credits - release_credits; in sc_mtu_to_threshold()
576 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) in sc_percent_to_threshold() argument
578 return (sc->credits * percent) / 100; in sc_percent_to_threshold()
584 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) in sc_set_cr_threshold() argument
590 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
592 old_threshold = (sc->credit_ctrl >> in sc_set_cr_threshold()
597 sc->credit_ctrl = in sc_set_cr_threshold()
598 (sc->credit_ctrl in sc_set_cr_threshold()
603 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
604 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_set_cr_threshold()
610 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
613 sc_return_credits(sc); in sc_set_cr_threshold()
621 void set_pio_integrity(struct send_context *sc) in set_pio_integrity() argument
623 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity()
624 u32 hw_context = sc->hw_context; in set_pio_integrity()
625 int type = sc->type; in set_pio_integrity()
632 static u32 get_buffers_allocated(struct send_context *sc) in get_buffers_allocated() argument
638 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); in get_buffers_allocated()
642 static void reset_buffers_allocated(struct send_context *sc) in reset_buffers_allocated() argument
647 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; in reset_buffers_allocated()
658 struct send_context *sc = NULL; in sc_alloc() local
672 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); in sc_alloc()
673 if (!sc) in sc_alloc()
676 sc->buffers_allocated = alloc_percpu(u32); in sc_alloc()
677 if (!sc->buffers_allocated) { in sc_alloc()
678 kfree(sc); in sc_alloc()
689 free_percpu(sc->buffers_allocated); in sc_alloc()
690 kfree(sc); in sc_alloc()
695 sci->sc = sc; in sc_alloc()
697 sc->dd = dd; in sc_alloc()
698 sc->node = numa; in sc_alloc()
699 sc->type = type; in sc_alloc()
700 spin_lock_init(&sc->alloc_lock); in sc_alloc()
701 spin_lock_init(&sc->release_lock); in sc_alloc()
702 spin_lock_init(&sc->credit_ctrl_lock); in sc_alloc()
703 seqlock_init(&sc->waitlock); in sc_alloc()
704 INIT_LIST_HEAD(&sc->piowait); in sc_alloc()
705 INIT_WORK(&sc->halt_work, sc_halted); in sc_alloc()
706 init_waitqueue_head(&sc->halt_wait); in sc_alloc()
709 sc->group = 0; in sc_alloc()
711 sc->sw_index = sw_index; in sc_alloc()
712 sc->hw_context = hw_context; in sc_alloc()
713 cr_group_addresses(sc, &dma); in sc_alloc()
714 sc->credits = sci->credits; in sc_alloc()
715 sc->size = sc->credits * PIO_BLOCK_SIZE; in sc_alloc()
720 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
730 set_pio_integrity(sc); in sc_alloc()
771 thresh = sc_percent_to_threshold(sc, 50); in sc_alloc()
773 thresh = sc_percent_to_threshold(sc, in sc_alloc()
776 thresh = min(sc_percent_to_threshold(sc, 50), in sc_alloc()
777 sc_mtu_to_threshold(sc, hfi1_max_mtu, in sc_alloc()
788 sc->credit_ctrl = reg; in sc_alloc()
812 sc->sr_size = sci->credits + 1; in sc_alloc()
813 sc->sr = kcalloc_node(sc->sr_size, in sc_alloc()
816 if (!sc->sr) { in sc_alloc()
817 sc_free(sc); in sc_alloc()
827 sc->group, in sc_alloc()
828 sc->credits, in sc_alloc()
829 sc->credit_ctrl, in sc_alloc()
832 return sc; in sc_alloc()
836 void sc_free(struct send_context *sc) in sc_free() argument
843 if (!sc) in sc_free()
846 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ in sc_free()
847 dd = sc->dd; in sc_free()
848 if (!list_empty(&sc->piowait)) in sc_free()
850 sw_index = sc->sw_index; in sc_free()
851 hw_context = sc->hw_context; in sc_free()
852 sc_disable(sc); /* make sure the HW is disabled */ in sc_free()
853 flush_work(&sc->halt_work); in sc_free()
856 dd->send_contexts[sw_index].sc = NULL; in sc_free()
871 kfree(sc->sr); in sc_free()
872 free_percpu(sc->buffers_allocated); in sc_free()
873 kfree(sc); in sc_free()
877 void sc_disable(struct send_context *sc) in sc_disable() argument
883 if (!sc) in sc_disable()
887 spin_lock_irq(&sc->alloc_lock); in sc_disable()
888 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
890 sc->flags &= ~SCF_ENABLED; in sc_disable()
891 sc_wait_for_packet_egress(sc, 1); in sc_disable()
892 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
902 spin_lock(&sc->release_lock); in sc_disable()
903 if (sc->sr) { /* this context has a shadow ring */ in sc_disable()
904 while (sc->sr_tail != sc->sr_head) { in sc_disable()
905 pbuf = &sc->sr[sc->sr_tail].pbuf; in sc_disable()
908 sc->sr_tail++; in sc_disable()
909 if (sc->sr_tail >= sc->sr_size) in sc_disable()
910 sc->sr_tail = 0; in sc_disable()
913 spin_unlock(&sc->release_lock); in sc_disable()
915 write_seqlock(&sc->waitlock); in sc_disable()
916 if (!list_empty(&sc->piowait)) in sc_disable()
917 list_move(&sc->piowait, &wake_list); in sc_disable()
918 write_sequnlock(&sc->waitlock); in sc_disable()
932 spin_unlock_irq(&sc->alloc_lock); in sc_disable()
970 static void sc_wait_for_packet_egress(struct send_context *sc, int pause) in sc_wait_for_packet_egress() argument
972 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress()
979 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
982 if (sc->flags & SCF_HALTED || in sc_wait_for_packet_egress()
983 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
995 __func__, sc->sw_index, in sc_wait_for_packet_egress()
996 sc->hw_context, (u32)reg); in sc_wait_for_packet_egress()
1015 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait() local
1017 if (!sc) in sc_wait()
1019 sc_wait_for_packet_egress(sc, 0); in sc_wait()
1032 int sc_restart(struct send_context *sc) in sc_restart() argument
1034 struct hfi1_devdata *dd = sc->dd; in sc_restart()
1040 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) in sc_restart()
1043 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1044 sc->hw_context); in sc_restart()
1054 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1059 __func__, sc->sw_index, sc->hw_context); in sc_restart()
1076 if (sc->type != SC_USER) { in sc_restart()
1080 count = get_buffers_allocated(sc); in sc_restart()
1086 __func__, sc->sw_index, in sc_restart()
1087 sc->hw_context, count); in sc_restart()
1103 sc_disable(sc); in sc_restart()
1111 return sc_enable(sc); in sc_restart()
1121 struct send_context *sc; in pio_freeze() local
1125 sc = dd->send_contexts[i].sc; in pio_freeze()
1131 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_freeze()
1135 sc_disable(sc); in pio_freeze()
1148 struct send_context *sc; in pio_kernel_unfreeze() local
1152 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1153 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_kernel_unfreeze()
1155 if (sc->flags & SCF_LINK_DOWN) in pio_kernel_unfreeze()
1158 sc_enable(sc); /* will clear the sc frozen flag */ in pio_kernel_unfreeze()
1176 struct send_context *sc; in pio_kernel_linkup() local
1180 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1181 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) in pio_kernel_linkup()
1184 sc_enable(sc); /* will clear the sc link down flag */ in pio_kernel_linkup()
1244 int sc_enable(struct send_context *sc) in sc_enable() argument
1251 if (!sc) in sc_enable()
1253 dd = sc->dd; in sc_enable()
1262 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_enable()
1263 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1269 *sc->hw_free = 0; in sc_enable()
1270 sc->free = 0; in sc_enable()
1271 sc->alloc_free = 0; in sc_enable()
1272 sc->fill = 0; in sc_enable()
1273 sc->fill_wrap = 0; in sc_enable()
1274 sc->sr_head = 0; in sc_enable()
1275 sc->sr_tail = 0; in sc_enable()
1276 sc->flags = 0; in sc_enable()
1278 reset_buffers_allocated(sc); in sc_enable()
1286 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1288 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1302 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << in sc_enable()
1316 sc->sw_index, sc->hw_context, ret); in sc_enable()
1324 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1329 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1330 sc->flags |= SCF_ENABLED; in sc_enable()
1333 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_enable()
1339 void sc_return_credits(struct send_context *sc) in sc_return_credits() argument
1341 if (!sc) in sc_return_credits()
1345 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1351 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1353 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1357 void sc_flush(struct send_context *sc) in sc_flush() argument
1359 if (!sc) in sc_flush()
1362 sc_wait_for_packet_egress(sc, 1); in sc_flush()
1366 void sc_drop(struct send_context *sc) in sc_drop() argument
1368 if (!sc) in sc_drop()
1371 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1372 __func__, sc->sw_index, sc->hw_context); in sc_drop()
1383 void sc_stop(struct send_context *sc, int flag) in sc_stop() argument
1388 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_stop()
1390 sc->flags |= flag; in sc_stop()
1391 sc->flags &= ~SCF_ENABLED; in sc_stop()
1392 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_stop()
1393 wake_up(&sc->halt_wait); in sc_stop()
1410 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, in sc_buffer_alloc() argument
1421 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_buffer_alloc()
1422 if (!(sc->flags & SCF_ENABLED)) { in sc_buffer_alloc()
1423 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1428 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1432 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1436 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1438 (unsigned long)sc->credits - in sc_buffer_alloc()
1439 (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1442 sc_release_update(sc); in sc_buffer_alloc()
1443 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1452 this_cpu_inc(*sc->buffers_allocated); in sc_buffer_alloc()
1455 head = sc->sr_head; in sc_buffer_alloc()
1458 sc->fill += blocks; in sc_buffer_alloc()
1459 fill_wrap = sc->fill_wrap; in sc_buffer_alloc()
1460 sc->fill_wrap += blocks; in sc_buffer_alloc()
1461 if (sc->fill_wrap >= sc->credits) in sc_buffer_alloc()
1462 sc->fill_wrap = sc->fill_wrap - sc->credits; in sc_buffer_alloc()
1471 pbuf = &sc->sr[head].pbuf; in sc_buffer_alloc()
1472 pbuf->sent_at = sc->fill; in sc_buffer_alloc()
1475 pbuf->sc = sc; /* could be filled in at sc->sr init time */ in sc_buffer_alloc()
1480 if (next >= sc->sr_size) in sc_buffer_alloc()
1487 sc->sr_head = next; in sc_buffer_alloc()
1488 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1491 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; in sc_buffer_alloc()
1492 pbuf->end = sc->base_addr + sc->size; in sc_buffer_alloc()
1511 void sc_add_credit_return_intr(struct send_context *sc) in sc_add_credit_return_intr() argument
1516 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1517 if (sc->credit_intr_count == 0) { in sc_add_credit_return_intr()
1518 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_add_credit_return_intr()
1519 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1520 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_add_credit_return_intr()
1522 sc->credit_intr_count++; in sc_add_credit_return_intr()
1523 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1530 void sc_del_credit_return_intr(struct send_context *sc) in sc_del_credit_return_intr() argument
1534 WARN_ON(sc->credit_intr_count == 0); in sc_del_credit_return_intr()
1537 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1538 sc->credit_intr_count--; in sc_del_credit_return_intr()
1539 if (sc->credit_intr_count == 0) { in sc_del_credit_return_intr()
1540 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_del_credit_return_intr()
1541 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1542 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_del_credit_return_intr()
1544 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1551 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) in hfi1_sc_wantpiobuf_intr() argument
1554 sc_add_credit_return_intr(sc); in hfi1_sc_wantpiobuf_intr()
1556 sc_del_credit_return_intr(sc); in hfi1_sc_wantpiobuf_intr()
1557 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); in hfi1_sc_wantpiobuf_intr()
1559 sc_return_credits(sc); in hfi1_sc_wantpiobuf_intr()
1570 static void sc_piobufavail(struct send_context *sc) in sc_piobufavail() argument
1572 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail()
1580 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1581 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1583 list = &sc->piowait; in sc_piobufavail()
1590 write_seqlock_irqsave(&sc->waitlock, flags); in sc_piobufavail()
1617 hfi1_sc_wantpiobuf_intr(sc, 0); in sc_piobufavail()
1619 hfi1_sc_wantpiobuf_intr(sc, 1); in sc_piobufavail()
1621 write_sequnlock_irqrestore(&sc->waitlock, flags); in sc_piobufavail()
1657 void sc_release_update(struct send_context *sc) in sc_release_update() argument
1668 if (!sc) in sc_release_update()
1671 spin_lock_irqsave(&sc->release_lock, flags); in sc_release_update()
1673 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ in sc_release_update()
1674 old_free = sc->free; in sc_release_update()
1679 trace_hfi1_piofree(sc, extra); in sc_release_update()
1683 head = READ_ONCE(sc->sr_head); /* snapshot the head */ in sc_release_update()
1684 tail = sc->sr_tail; in sc_release_update()
1686 pbuf = &sc->sr[tail].pbuf; in sc_release_update()
1699 if (tail >= sc->sr_size) in sc_release_update()
1702 sc->sr_tail = tail; in sc_release_update()
1705 sc->free = free; in sc_release_update()
1706 spin_unlock_irqrestore(&sc->release_lock, flags); in sc_release_update()
1707 sc_piobufavail(sc); in sc_release_update()
1721 struct send_context *sc; in sc_group_release_update() local
1732 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1733 if (unlikely(!sc)) in sc_group_release_update()
1736 gc = group_context(hw_context, sc->group); in sc_group_release_update()
1737 gc_end = gc + group_size(sc->group); in sc_group_release_update()
1746 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1782 return dd->vld[0].sc; in pio_select_send_context_vl()
1789 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1983 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
1985 if (!dd->vld[15].sc) in init_pervl_scs()
1988 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
1997 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2007 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2009 if (!dd->vld[i].sc) in init_pervl_scs()
2011 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2012 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2024 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2025 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2030 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2033 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2034 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2051 sc_free(dd->vld[i].sc); in init_pervl_scs()
2052 dd->vld[i].sc = NULL; in init_pervl_scs()
2062 sc_free(dd->vld[15].sc); in init_pervl_scs()
2125 struct send_context *sc = sci->sc; in seqfile_dump_sci() local
2131 sc->flags, sc->sw_index, sc->hw_context, sc->group); in seqfile_dump_sci()
2133 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); in seqfile_dump_sci()
2135 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); in seqfile_dump_sci()
2137 sc->credit_intr_count, sc->credit_ctrl); in seqfile_dump_sci()
2138 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()
2140 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> in seqfile_dump_sci()