Lines Matching refs:uctxt

42 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
45 struct hfi1_ctxtdata *uctxt);
46 static void user_init(struct hfi1_ctxtdata *uctxt);
56 struct hfi1_ctxtdata *uctxt);
57 static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
64 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
67 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
69 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
70 static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
71 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
184 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_file_ioctl() local
191 !uctxt) in hfi1_file_ioctl()
208 if (uctxt) in hfi1_file_ioctl()
209 sc_return_credits(uctxt->sc); in hfi1_file_ioctl()
225 ret = manage_rcvq(uctxt, fd->subctxt, arg); in hfi1_file_ioctl()
231 uctxt->poll_type = (typeof(uctxt->poll_type))uval; in hfi1_file_ioctl()
235 ret = user_event_ack(uctxt, fd->subctxt, arg); in hfi1_file_ioctl()
239 ret = set_ctxt_pkey(uctxt, arg); in hfi1_file_ioctl()
243 ret = ctxt_reset(uctxt); in hfi1_file_ioctl()
282 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter()
323 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_file_mmap() local
335 if (!is_valid_mmap(token) || !uctxt || in hfi1_file_mmap()
340 dd = uctxt->dd; in hfi1_file_mmap()
344 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap()
361 (uctxt->sc->hw_context * BIT(16))) + in hfi1_file_mmap()
369 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); in hfi1_file_mmap()
386 cr_page_offset = ((u64)uctxt->sc->hw_free - in hfi1_file_mmap()
387 (u64)dd->cr_base[uctxt->numa_id].va) & in hfi1_file_mmap()
389 memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset; in hfi1_file_mmap()
390 memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset; in hfi1_file_mmap()
403 memlen = rcvhdrq_size(uctxt); in hfi1_file_mmap()
404 memvirt = uctxt->rcvhdrq; in hfi1_file_mmap()
405 memdma = uctxt->rcvhdrq_dma; in hfi1_file_mmap()
416 memlen = uctxt->egrbufs.size; in hfi1_file_mmap()
438 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { in hfi1_file_mmap()
439 memlen = uctxt->egrbufs.buffers[i].len; in hfi1_file_mmap()
440 memvirt = uctxt->egrbufs.buffers[i].addr; in hfi1_file_mmap()
441 memdma = uctxt->egrbufs.buffers[i].dma; in hfi1_file_mmap()
466 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap()
482 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; in hfi1_file_mmap()
509 if ((flags & VM_WRITE) || !hfi1_rcvhdrtail_kvaddr(uctxt)) { in hfi1_file_mmap()
514 memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt); in hfi1_file_mmap()
515 memdma = uctxt->rcvhdrqtailaddr_dma; in hfi1_file_mmap()
519 memaddr = (u64)uctxt->subctxt_uregbase; in hfi1_file_mmap()
525 memaddr = (u64)uctxt->subctxt_rcvhdr_base; in hfi1_file_mmap()
526 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt; in hfi1_file_mmap()
531 memaddr = (u64)uctxt->subctxt_rcvegrbuf; in hfi1_file_mmap()
532 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; in hfi1_file_mmap()
557 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap()
613 struct hfi1_ctxtdata *uctxt; in hfi1_poll() local
616 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; in hfi1_poll()
617 if (!uctxt) in hfi1_poll()
619 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) in hfi1_poll()
621 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) in hfi1_poll()
632 struct hfi1_ctxtdata *uctxt = fdata->uctxt; in hfi1_file_close() local
640 if (!uctxt) in hfi1_file_close()
643 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close()
647 hfi1_user_sdma_free_queues(fdata, uctxt); in hfi1_file_close()
659 fdata->uctxt = NULL; in hfi1_file_close()
660 hfi1_rcd_put(uctxt); in hfi1_file_close()
666 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; in hfi1_file_close()
670 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); in hfi1_file_close()
671 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { in hfi1_file_close()
688 HFI1_RCVCTRL_URGENT_DIS, uctxt); in hfi1_file_close()
690 hfi1_clear_ctxt_jkey(dd, uctxt); in hfi1_file_close()
695 if (uctxt->sc) { in hfi1_file_close()
696 sc_disable(uctxt->sc); in hfi1_file_close()
697 set_pio_integrity(uctxt->sc); in hfi1_file_close()
700 hfi1_free_ctxt_rcv_groups(uctxt); in hfi1_file_close()
701 hfi1_clear_ctxt_pkey(dd, uctxt); in hfi1_file_close()
703 uctxt->event_flags = 0; in hfi1_file_close()
705 deallocate_ctxt(uctxt); in hfi1_file_close()
754 fd->uctxt->wait, in complete_subctxt()
755 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); in complete_subctxt()
757 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) in complete_subctxt()
762 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id); in complete_subctxt()
763 ret = init_user_ctxt(fd, fd->uctxt); in complete_subctxt()
768 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); in complete_subctxt()
770 hfi1_rcd_put(fd->uctxt); in complete_subctxt()
771 fd->uctxt = NULL; in complete_subctxt()
781 struct hfi1_ctxtdata *uctxt = NULL; in assign_ctxt() local
784 if (fd->uctxt) in assign_ctxt()
816 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); in assign_ctxt()
823 ret = setup_base_ctxt(fd, uctxt); in assign_ctxt()
825 deallocate_ctxt(uctxt); in assign_ctxt()
848 struct hfi1_ctxtdata *uctxt) in match_ctxt() argument
855 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) in match_ctxt()
859 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || in match_ctxt()
860 uctxt->jkey != generate_jkey(current_uid()) || in match_ctxt()
861 uctxt->subctxt_id != uinfo->subctxt_id || in match_ctxt()
862 uctxt->subctxt_cnt != uinfo->subctxt_cnt) in match_ctxt()
866 if (uctxt->userversion != uinfo->userversion) in match_ctxt()
871 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { in match_ctxt()
877 subctxt = find_first_zero_bit(uctxt->in_use_ctxts, in match_ctxt()
879 if (subctxt >= uctxt->subctxt_cnt) { in match_ctxt()
885 __set_bit(fd->subctxt, uctxt->in_use_ctxts); in match_ctxt()
888 fd->uctxt = uctxt; in match_ctxt()
889 hfi1_rcd_get(uctxt); in match_ctxt()
911 struct hfi1_ctxtdata *uctxt; in find_sub_ctxt() local
920 uctxt = hfi1_rcd_get_by_index(dd, i); in find_sub_ctxt()
921 if (uctxt) { in find_sub_ctxt()
922 ret = match_ctxt(fd, uinfo, uctxt); in find_sub_ctxt()
923 hfi1_rcd_put(uctxt); in find_sub_ctxt()
937 struct hfi1_ctxtdata *uctxt; in allocate_ctxt() local
963 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); in allocate_ctxt()
969 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, in allocate_ctxt()
970 uctxt->numa_id); in allocate_ctxt()
975 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); in allocate_ctxt()
976 if (!uctxt->sc) { in allocate_ctxt()
980 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index, in allocate_ctxt()
981 uctxt->sc->hw_context); in allocate_ctxt()
982 ret = sc_enable(uctxt->sc); in allocate_ctxt()
995 __set_bit(0, uctxt->in_use_ctxts); in allocate_ctxt()
997 init_subctxts(uctxt, uinfo); in allocate_ctxt()
998 uctxt->userversion = uinfo->userversion; in allocate_ctxt()
999 uctxt->flags = hfi1_cap_mask; /* save current flag state */ in allocate_ctxt()
1000 init_waitqueue_head(&uctxt->wait); in allocate_ctxt()
1001 strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); in allocate_ctxt()
1002 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); in allocate_ctxt()
1003 uctxt->jkey = generate_jkey(current_uid()); in allocate_ctxt()
1012 *rcd = uctxt; in allocate_ctxt()
1017 hfi1_free_ctxt(uctxt); in allocate_ctxt()
1021 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt) in deallocate_ctxt() argument
1025 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) in deallocate_ctxt()
1026 aspm_enable_all(uctxt->dd); in deallocate_ctxt()
1029 hfi1_free_ctxt(uctxt); in deallocate_ctxt()
1032 static void init_subctxts(struct hfi1_ctxtdata *uctxt, in init_subctxts() argument
1035 uctxt->subctxt_cnt = uinfo->subctxt_cnt; in init_subctxts()
1036 uctxt->subctxt_id = uinfo->subctxt_id; in init_subctxts()
1037 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); in init_subctxts()
1040 static int setup_subctxt(struct hfi1_ctxtdata *uctxt) in setup_subctxt() argument
1043 u16 num_subctxts = uctxt->subctxt_cnt; in setup_subctxt()
1045 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); in setup_subctxt()
1046 if (!uctxt->subctxt_uregbase) in setup_subctxt()
1050 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) * in setup_subctxt()
1052 if (!uctxt->subctxt_rcvhdr_base) { in setup_subctxt()
1057 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * in setup_subctxt()
1059 if (!uctxt->subctxt_rcvegrbuf) { in setup_subctxt()
1067 vfree(uctxt->subctxt_rcvhdr_base); in setup_subctxt()
1068 uctxt->subctxt_rcvhdr_base = NULL; in setup_subctxt()
1070 vfree(uctxt->subctxt_uregbase); in setup_subctxt()
1071 uctxt->subctxt_uregbase = NULL; in setup_subctxt()
1076 static void user_init(struct hfi1_ctxtdata *uctxt) in user_init() argument
1081 uctxt->urgent = 0; in user_init()
1082 uctxt->urgent_poll = 0; in user_init()
1095 if (hfi1_rcvhdrtail_kvaddr(uctxt)) in user_init()
1096 clear_rcvhdrtail(uctxt); in user_init()
1099 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); in user_init()
1103 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP)) in user_init()
1110 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR)) in user_init()
1112 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL)) in user_init()
1114 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) in user_init()
1122 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL)) in user_init()
1126 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); in user_init()
1132 struct hfi1_ctxtdata *uctxt = fd->uctxt; in get_ctxt_info() local
1138 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) & in get_ctxt_info()
1140 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) | in get_ctxt_info()
1141 HFI1_CAP_KGET_MASK(uctxt->flags, K2U); in get_ctxt_info()
1147 cinfo.unit = uctxt->dd->unit; in get_ctxt_info()
1148 cinfo.ctxt = uctxt->ctxt; in get_ctxt_info()
1150 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, in get_ctxt_info()
1151 uctxt->dd->rcv_entries.group_size) + in get_ctxt_info()
1152 uctxt->expected_count; in get_ctxt_info()
1153 cinfo.credits = uctxt->sc->credits; in get_ctxt_info()
1154 cinfo.numa_node = uctxt->numa_id; in get_ctxt_info()
1156 cinfo.send_ctxt = uctxt->sc->hw_context; in get_ctxt_info()
1158 cinfo.egrtids = uctxt->egrbufs.alloced; in get_ctxt_info()
1159 cinfo.rcvhdrq_cnt = get_hdrq_cnt(uctxt); in get_ctxt_info()
1160 cinfo.rcvhdrq_entsize = get_hdrqentsize(uctxt) << 2; in get_ctxt_info()
1162 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; in get_ctxt_info()
1164 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); in get_ctxt_info()
1172 struct hfi1_ctxtdata *uctxt) in init_user_ctxt() argument
1176 ret = hfi1_user_sdma_alloc_queues(uctxt, fd); in init_user_ctxt()
1180 ret = hfi1_user_exp_rcv_init(fd, uctxt); in init_user_ctxt()
1182 hfi1_user_sdma_free_queues(fd, uctxt); in init_user_ctxt()
1188 struct hfi1_ctxtdata *uctxt) in setup_base_ctxt() argument
1190 struct hfi1_devdata *dd = uctxt->dd; in setup_base_ctxt()
1193 hfi1_init_ctxt(uctxt->sc); in setup_base_ctxt()
1196 ret = hfi1_create_rcvhdrq(dd, uctxt); in setup_base_ctxt()
1200 ret = hfi1_setup_eagerbufs(uctxt); in setup_base_ctxt()
1205 if (uctxt->subctxt_cnt) in setup_base_ctxt()
1206 ret = setup_subctxt(uctxt); in setup_base_ctxt()
1210 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); in setup_base_ctxt()
1214 ret = init_user_ctxt(fd, uctxt); in setup_base_ctxt()
1216 hfi1_free_ctxt_rcv_groups(uctxt); in setup_base_ctxt()
1220 user_init(uctxt); in setup_base_ctxt()
1223 fd->uctxt = uctxt; in setup_base_ctxt()
1224 hfi1_rcd_get(uctxt); in setup_base_ctxt()
1227 if (uctxt->subctxt_cnt) { in setup_base_ctxt()
1233 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); in setup_base_ctxt()
1239 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); in setup_base_ctxt()
1240 wake_up(&uctxt->wait); in setup_base_ctxt()
1249 struct hfi1_ctxtdata *uctxt = fd->uctxt; in get_base_info() local
1250 struct hfi1_devdata *dd = uctxt->dd; in get_base_info()
1253 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); in get_base_info()
1262 binfo.jkey = uctxt->jkey; in get_base_info()
1269 offset = ((u64)uctxt->sc->hw_free - in get_base_info()
1270 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; in get_base_info()
1271 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, in get_base_info()
1273 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, in get_base_info()
1275 uctxt->sc->base_addr); in get_base_info()
1277 uctxt->ctxt, in get_base_info()
1279 uctxt->sc->base_addr); in get_base_info()
1280 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, in get_base_info()
1282 uctxt->rcvhdrq); in get_base_info()
1283 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, in get_base_info()
1285 uctxt->egrbufs.rcvtids[0].dma); in get_base_info()
1286 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, in get_base_info()
1292 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, in get_base_info()
1294 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) * in get_base_info()
1296 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, in get_base_info()
1299 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, in get_base_info()
1303 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, in get_base_info()
1305 if (uctxt->subctxt_cnt) { in get_base_info()
1307 uctxt->ctxt, in get_base_info()
1310 uctxt->ctxt, in get_base_info()
1313 uctxt->ctxt, in get_base_info()
1443 struct hfi1_ctxtdata *uctxt = fd->uctxt; in poll_urgent() local
1444 struct hfi1_devdata *dd = uctxt->dd; in poll_urgent()
1447 poll_wait(fp, &uctxt->wait, pt); in poll_urgent()
1450 if (uctxt->urgent != uctxt->urgent_poll) { in poll_urgent()
1452 uctxt->urgent_poll = uctxt->urgent; in poll_urgent()
1455 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); in poll_urgent()
1466 struct hfi1_ctxtdata *uctxt = fd->uctxt; in poll_next() local
1467 struct hfi1_devdata *dd = uctxt->dd; in poll_next()
1470 poll_wait(fp, &uctxt->wait, pt); in poll_next()
1473 if (hdrqempty(uctxt)) { in poll_next()
1474 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); in poll_next()
1475 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); in poll_next()
1492 struct hfi1_ctxtdata *uctxt; in hfi1_set_uevent_bits() local
1501 uctxt = hfi1_rcd_get_by_index(dd, ctxt); in hfi1_set_uevent_bits()
1502 if (uctxt) { in hfi1_set_uevent_bits()
1509 evs = dd->events + uctxt_offset(uctxt); in hfi1_set_uevent_bits()
1511 for (i = 1; i < uctxt->subctxt_cnt; i++) in hfi1_set_uevent_bits()
1513 hfi1_rcd_put(uctxt); in hfi1_set_uevent_bits()
1530 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, in manage_rcvq() argument
1533 struct hfi1_devdata *dd = uctxt->dd; in manage_rcvq()
1553 if (hfi1_rcvhdrtail_kvaddr(uctxt)) in manage_rcvq()
1554 clear_rcvhdrtail(uctxt); in manage_rcvq()
1559 hfi1_rcvctrl(dd, rcvctrl_op, uctxt); in manage_rcvq()
1570 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt, in user_event_ack() argument
1574 struct hfi1_devdata *dd = uctxt->dd; in user_event_ack()
1584 evs = dd->events + uctxt_offset(uctxt) + subctxt; in user_event_ack()
1594 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg) in set_ctxt_pkey() argument
1597 struct hfi1_pportdata *ppd = uctxt->ppd; in set_ctxt_pkey()
1598 struct hfi1_devdata *dd = uctxt->dd; in set_ctxt_pkey()
1612 return hfi1_set_ctxt_pkey(dd, uctxt, pkey); in set_ctxt_pkey()
1621 static int ctxt_reset(struct hfi1_ctxtdata *uctxt) in ctxt_reset() argument
1627 if (!uctxt || !uctxt->dd || !uctxt->sc) in ctxt_reset()
1636 dd = uctxt->dd; in ctxt_reset()
1637 sc = uctxt->sc; in ctxt_reset()
1670 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt); in ctxt_reset()