Lines Matching refs:clt
50 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt) in rtrs_clt_is_connected() argument
56 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) in rtrs_clt_is_connected()
67 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) in __rtrs_get_permit() argument
69 size_t max_depth = clt->queue_depth; in __rtrs_get_permit()
81 bit = find_first_zero_bit(clt->permits_map, max_depth); in __rtrs_get_permit()
84 } while (test_and_set_bit_lock(bit, clt->permits_map)); in __rtrs_get_permit()
86 permit = get_permit(clt, bit); in __rtrs_get_permit()
94 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, in __rtrs_put_permit() argument
97 clear_bit_unlock(permit->mem_id, clt->permits_map); in __rtrs_put_permit()
114 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, in rtrs_clt_get_permit() argument
121 permit = __rtrs_get_permit(clt, con_type); in rtrs_clt_get_permit()
126 prepare_to_wait(&clt->permits_wait, &wait, in rtrs_clt_get_permit()
128 permit = __rtrs_get_permit(clt, con_type); in rtrs_clt_get_permit()
135 finish_wait(&clt->permits_wait, &wait); in rtrs_clt_get_permit()
149 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, in rtrs_clt_put_permit() argument
152 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) in rtrs_clt_put_permit()
155 __rtrs_put_permit(clt, permit); in rtrs_clt_put_permit()
164 if (waitqueue_active(&clt->permits_wait)) in rtrs_clt_put_permit()
165 wake_up(&clt->permits_wait); in rtrs_clt_put_permit()
545 rtrs_err(clt_path->clt, in rtrs_clt_rkey_rsp_done()
611 rtrs_err(clt_path->clt, "RDMA failed: %s\n", in rtrs_clt_rdma_done()
689 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); in rtrs_clt_rdma_done()
733 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", in post_recv_path()
745 struct rtrs_clt_sess *clt; member
782 struct rtrs_clt_sess *clt; in get_next_path_rr() local
784 clt = it->clt; in get_next_path_rr()
792 ppcpu_path = this_cpu_ptr(clt->pcpu_path); in get_next_path_rr()
795 path = list_first_or_null_rcu(&clt->paths_list, in get_next_path_rr()
798 path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path); in get_next_path_rr()
817 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_inflight() local
822 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_inflight()
868 struct rtrs_clt_sess *clt = it->clt; in get_next_path_min_latency() local
873 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { in get_next_path_min_latency()
898 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) in path_it_init() argument
901 it->clt = clt; in path_it_init()
904 if (clt->mp_policy == MP_POLICY_RR) in path_it_init()
906 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT) in path_it_init()
967 req->mp_policy = clt_path->clt->mp_policy; in rtrs_clt_init_req()
1280 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, in rtrs_clt_failover_req() argument
1289 for (path_it_init(&it, clt); in rtrs_clt_failover_req()
1290 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; in rtrs_clt_failover_req()
1315 struct rtrs_clt_sess *clt = clt_path->clt; in fail_all_outstanding_reqs() local
1333 err = rtrs_clt_failover_req(clt, req); in fail_all_outstanding_reqs()
1404 static int alloc_permits(struct rtrs_clt_sess *clt) in alloc_permits() argument
1409 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL); in alloc_permits()
1410 if (!clt->permits_map) { in alloc_permits()
1414 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); in alloc_permits()
1415 if (!clt->permits) { in alloc_permits()
1419 chunk_bits = ilog2(clt->queue_depth - 1) + 1; in alloc_permits()
1420 for (i = 0; i < clt->queue_depth; i++) { in alloc_permits()
1423 permit = get_permit(clt, i); in alloc_permits()
1431 bitmap_free(clt->permits_map); in alloc_permits()
1432 clt->permits_map = NULL; in alloc_permits()
1437 static void free_permits(struct rtrs_clt_sess *clt) in free_permits() argument
1439 if (clt->permits_map) in free_permits()
1440 wait_event(clt->permits_wait, in free_permits()
1441 bitmap_empty(clt->permits_map, clt->queue_depth)); in free_permits()
1443 bitmap_free(clt->permits_map); in free_permits()
1444 clt->permits_map = NULL; in free_permits()
1445 kfree(clt->permits); in free_permits()
1446 clt->permits = NULL; in free_permits()
1468 clt_path->clt->max_segments = in query_fast_reg_mode()
1469 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); in query_fast_reg_mode()
1509 struct rtrs_clt_sess *clt; in rtrs_clt_err_recovery_work() local
1513 clt = clt_path->clt; in rtrs_clt_err_recovery_work()
1514 delay_ms = clt->reconnect_delay_sec * 1000; in rtrs_clt_err_recovery_work()
1521 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, in alloc_path() argument
1564 strscpy(clt_path->s.sessname, clt->sessname, in alloc_path()
1566 clt_path->clt = clt; in alloc_path()
1662 rtrs_wrn(clt_path->clt, in create_con_cq_qp()
1787 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_route_resolved() local
1809 uuid_copy(&msg.paths_uuid, &clt->paths_uuid); in rtrs_rdma_route_resolved()
1813 rtrs_err(clt, "rdma_connect_locked(): %d\n", err); in rtrs_rdma_route_resolved()
1822 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_rdma_conn_established() local
1831 rtrs_err(clt, "Invalid RTRS connection response\n"); in rtrs_rdma_conn_established()
1835 rtrs_err(clt, "Invalid RTRS magic\n"); in rtrs_rdma_conn_established()
1840 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n", in rtrs_rdma_conn_established()
1846 rtrs_err(clt, "Invalid RTRS message: errno %d\n", in rtrs_rdma_conn_established()
1854 rtrs_err(clt, "Error: queue depth changed\n"); in rtrs_rdma_conn_established()
1860 rtrs_err(clt, in rtrs_rdma_conn_established()
1888 mutex_lock(&clt->paths_mutex); in rtrs_rdma_conn_established()
1889 clt->queue_depth = clt_path->queue_depth; in rtrs_rdma_conn_established()
1890 clt->max_io_size = min_not_zero(clt_path->max_io_size, in rtrs_rdma_conn_established()
1891 clt->max_io_size); in rtrs_rdma_conn_established()
1892 mutex_unlock(&clt->paths_mutex); in rtrs_rdma_conn_established()
2114 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_up() local
2124 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2125 up = ++clt->paths_up; in rtrs_clt_path_up()
2131 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num) in rtrs_clt_path_up()
2132 clt->paths_up = clt->paths_num; in rtrs_clt_path_up()
2134 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED); in rtrs_clt_path_up()
2135 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_up()
2145 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_path_down() local
2151 mutex_lock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2152 WARN_ON(!clt->paths_up); in rtrs_clt_path_down()
2153 if (--clt->paths_up == 0) in rtrs_clt_path_down()
2154 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED); in rtrs_clt_path_down()
2155 mutex_unlock(&clt->paths_ev_mutex); in rtrs_clt_path_down()
2223 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_remove_path_from_arr() local
2228 mutex_lock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2263 clt->paths_num--; in rtrs_clt_remove_path_from_arr()
2270 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path); in rtrs_clt_remove_path_from_arr()
2280 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); in rtrs_clt_remove_path_from_arr()
2282 lockdep_is_held(&clt->paths_mutex)) != clt_path) in rtrs_clt_remove_path_from_arr()
2308 mutex_unlock(&clt->paths_mutex); in rtrs_clt_remove_path_from_arr()
2313 struct rtrs_clt_sess *clt = clt_path->clt; in rtrs_clt_add_path_to_arr() local
2315 mutex_lock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2316 clt->paths_num++; in rtrs_clt_add_path_to_arr()
2318 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_add_path_to_arr()
2319 mutex_unlock(&clt->paths_mutex); in rtrs_clt_add_path_to_arr()
2398 rtrs_err(clt_path->clt, "Path info request send failed: %s\n", in rtrs_clt_info_req_done()
2415 rtrs_err(clt_path->clt, in process_info_rsp()
2427 rtrs_err(clt_path->clt, in process_info_rsp()
2445 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", in process_info_rsp()
2460 rtrs_err(clt_path->clt, in process_info_rsp()
2465 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); in process_info_rsp()
2487 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", in rtrs_clt_info_rsp_done()
2494 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2502 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", in rtrs_clt_info_rsp_done()
2509 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", in rtrs_clt_info_rsp_done()
2552 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); in rtrs_send_path_info()
2568 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); in rtrs_send_path_info()
2618 rtrs_err(clt_path->clt, in init_path()
2625 rtrs_err(clt_path->clt, in init_path()
2640 struct rtrs_clt_sess *clt; in rtrs_clt_reconnect_work() local
2645 clt = clt_path->clt; in rtrs_clt_reconnect_work()
2652 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { in rtrs_clt_reconnect_work()
2677 struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, in rtrs_clt_dev_release() local
2680 mutex_destroy(&clt->paths_ev_mutex); in rtrs_clt_dev_release()
2681 mutex_destroy(&clt->paths_mutex); in rtrs_clt_dev_release()
2682 kfree(clt); in rtrs_clt_dev_release()
2692 struct rtrs_clt_sess *clt; in alloc_clt() local
2698 if (strlen(sessname) >= sizeof(clt->sessname)) in alloc_clt()
2701 clt = kzalloc(sizeof(*clt), GFP_KERNEL); in alloc_clt()
2702 if (!clt) in alloc_clt()
2705 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path)); in alloc_clt()
2706 if (!clt->pcpu_path) { in alloc_clt()
2707 kfree(clt); in alloc_clt()
2711 clt->dev.class = rtrs_clt_dev_class; in alloc_clt()
2712 clt->dev.release = rtrs_clt_dev_release; in alloc_clt()
2713 uuid_gen(&clt->paths_uuid); in alloc_clt()
2714 INIT_LIST_HEAD_RCU(&clt->paths_list); in alloc_clt()
2715 clt->paths_num = paths_num; in alloc_clt()
2716 clt->paths_up = MAX_PATHS_NUM; in alloc_clt()
2717 clt->port = port; in alloc_clt()
2718 clt->pdu_sz = pdu_sz; in alloc_clt()
2719 clt->max_segments = RTRS_MAX_SEGMENTS; in alloc_clt()
2720 clt->reconnect_delay_sec = reconnect_delay_sec; in alloc_clt()
2721 clt->max_reconnect_attempts = max_reconnect_attempts; in alloc_clt()
2722 clt->priv = priv; in alloc_clt()
2723 clt->link_ev = link_ev; in alloc_clt()
2724 clt->mp_policy = MP_POLICY_MIN_INFLIGHT; in alloc_clt()
2725 strscpy(clt->sessname, sessname, sizeof(clt->sessname)); in alloc_clt()
2726 init_waitqueue_head(&clt->permits_wait); in alloc_clt()
2727 mutex_init(&clt->paths_ev_mutex); in alloc_clt()
2728 mutex_init(&clt->paths_mutex); in alloc_clt()
2729 device_initialize(&clt->dev); in alloc_clt()
2731 err = dev_set_name(&clt->dev, "%s", sessname); in alloc_clt()
2739 dev_set_uevent_suppress(&clt->dev, true); in alloc_clt()
2740 err = device_add(&clt->dev); in alloc_clt()
2744 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); in alloc_clt()
2745 if (!clt->kobj_paths) { in alloc_clt()
2749 err = rtrs_clt_create_sysfs_root_files(clt); in alloc_clt()
2751 kobject_del(clt->kobj_paths); in alloc_clt()
2752 kobject_put(clt->kobj_paths); in alloc_clt()
2755 dev_set_uevent_suppress(&clt->dev, false); in alloc_clt()
2756 kobject_uevent(&clt->dev.kobj, KOBJ_ADD); in alloc_clt()
2758 return clt; in alloc_clt()
2760 device_del(&clt->dev); in alloc_clt()
2762 free_percpu(clt->pcpu_path); in alloc_clt()
2763 put_device(&clt->dev); in alloc_clt()
2767 static void free_clt(struct rtrs_clt_sess *clt) in free_clt() argument
2769 free_percpu(clt->pcpu_path); in free_clt()
2774 device_unregister(&clt->dev); in free_clt()
2803 struct rtrs_clt_sess *clt; in rtrs_clt_open() local
2812 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, in rtrs_clt_open()
2816 if (IS_ERR(clt)) { in rtrs_clt_open()
2817 err = PTR_ERR(clt); in rtrs_clt_open()
2823 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, in rtrs_clt_open()
2831 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); in rtrs_clt_open()
2853 err = alloc_permits(clt); in rtrs_clt_open()
2857 return clt; in rtrs_clt_open()
2860 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_open()
2865 rtrs_clt_destroy_sysfs_root(clt); in rtrs_clt_open()
2866 free_clt(clt); in rtrs_clt_open()
2877 void rtrs_clt_close(struct rtrs_clt_sess *clt) in rtrs_clt_close() argument
2882 rtrs_clt_destroy_sysfs_root(clt); in rtrs_clt_close()
2885 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { in rtrs_clt_close()
2890 free_permits(clt); in rtrs_clt_close()
2891 free_clt(clt); in rtrs_clt_close()
2954 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) in rtrs_clt_set_max_reconnect_attempts() argument
2956 clt->max_reconnect_attempts = (unsigned int)value; in rtrs_clt_set_max_reconnect_attempts()
2959 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) in rtrs_clt_get_max_reconnect_attempts() argument
2961 return (int)clt->max_reconnect_attempts; in rtrs_clt_get_max_reconnect_attempts()
2989 struct rtrs_clt_sess *clt, struct rtrs_permit *permit, in rtrs_clt_request() argument
3015 for (path_it_init(&it, clt); in rtrs_clt_request()
3016 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_request()
3021 rtrs_wrn_rl(clt_path->clt, in rtrs_clt_request()
3049 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) in rtrs_clt_rdma_cq_direct() argument
3058 for (path_it_init(&it, clt); in rtrs_clt_rdma_cq_direct()
3059 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { in rtrs_clt_rdma_cq_direct()
3083 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) in rtrs_clt_query() argument
3085 if (!rtrs_clt_is_connected(clt)) in rtrs_clt_query()
3088 attr->queue_depth = clt->queue_depth; in rtrs_clt_query()
3089 attr->max_segments = clt->max_segments; in rtrs_clt_query()
3091 attr->max_io_size = min_t(int, clt->max_io_size, in rtrs_clt_query()
3092 clt->max_segments * SZ_4K); in rtrs_clt_query()
3098 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, in rtrs_clt_create_path_from_sysfs() argument
3104 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); in rtrs_clt_create_path_from_sysfs()
3108 mutex_lock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()
3109 if (clt->paths_num == 0) { in rtrs_clt_create_path_from_sysfs()
3118 mutex_unlock(&clt->paths_mutex); in rtrs_clt_create_path_from_sysfs()