Lines Matching refs:srv

102 	struct rtrs_srv_sess *srv = srv_path->srv;  in rtrs_srv_free_ops_ids()  local
106 for (i = 0; i < srv->queue_depth; i++) in rtrs_srv_free_ops_ids()
131 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_alloc_ops_ids() local
135 srv_path->ops_ids = kcalloc(srv->queue_depth, in rtrs_srv_alloc_ops_ids()
141 for (i = 0; i < srv->queue_depth; ++i) { in rtrs_srv_alloc_ops_ids()
537 void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) in rtrs_srv_set_sess_priv() argument
539 srv->priv = priv; in rtrs_srv_set_sess_priv()
562 struct rtrs_srv_sess *srv = srv_path->srv; in map_cont_bufs() local
579 mrs_num = srv->queue_depth; in map_cont_bufs()
583 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); in map_cont_bufs()
584 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); in map_cont_bufs()
601 srv->queue_depth - chunks); in map_cont_bufs()
608 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs()
649 chunk_bits = ilog2(srv->queue_depth - 1) + 1; in map_cont_bufs()
712 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_up() local
713 struct rtrs_srv_ctx *ctx = srv->ctx; in rtrs_srv_path_up()
716 mutex_lock(&srv->paths_ev_mutex); in rtrs_srv_path_up()
717 up = ++srv->paths_up; in rtrs_srv_path_up()
719 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL); in rtrs_srv_path_up()
720 mutex_unlock(&srv->paths_ev_mutex); in rtrs_srv_path_up()
728 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_down() local
729 struct rtrs_srv_ctx *ctx = srv->ctx; in rtrs_srv_path_down()
735 mutex_lock(&srv->paths_ev_mutex); in rtrs_srv_path_down()
736 WARN_ON(!srv->paths_up); in rtrs_srv_path_down()
737 if (--srv->paths_up == 0) in rtrs_srv_path_down()
738 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv); in rtrs_srv_path_down()
739 mutex_unlock(&srv->paths_ev_mutex); in rtrs_srv_path_down()
745 struct rtrs_srv_sess *srv; in exist_pathname() local
750 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { in exist_pathname()
751 mutex_lock(&srv->paths_mutex); in exist_pathname()
754 if (uuid_equal(&srv->paths_uuid, path_uuid)) { in exist_pathname()
755 mutex_unlock(&srv->paths_mutex); in exist_pathname()
759 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in exist_pathname()
766 mutex_unlock(&srv->paths_mutex); in exist_pathname()
800 if (exist_pathname(srv_path->srv->ctx, in process_info_req()
801 msg->pathname, &srv_path->srv->paths_uuid)) { in process_info_req()
851 get_device(&srv_path->srv->dev); in process_info_req()
962 struct rtrs_srv_sess *srv = srv_path->srv; in post_recv_path() local
971 q_size = srv->queue_depth; in post_recv_path()
989 struct rtrs_srv_sess *srv = srv_path->srv; in process_read() local
990 struct rtrs_srv_ctx *ctx = srv->ctx; in process_read()
1017 data = page_address(srv->chunks[buf_id]); in process_read()
1018 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, in process_read()
1047 struct rtrs_srv_sess *srv = srv_path->srv; in process_write() local
1048 struct rtrs_srv_ctx *ctx = srv->ctx; in process_write()
1070 data = page_address(srv->chunks[buf_id]); in process_write()
1071 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, in process_write()
1134 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_inv_rkey_done() local
1145 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done()
1191 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_rdma_done() local
1228 if (msg_id >= srv->queue_depth || off >= max_chunk_size) { in rtrs_srv_rdma_done()
1247 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done()
1284 int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, in rtrs_srv_get_path_name() argument
1290 mutex_lock(&srv->paths_mutex); in rtrs_srv_get_path_name()
1291 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in rtrs_srv_get_path_name()
1299 mutex_unlock(&srv->paths_mutex); in rtrs_srv_get_path_name()
1309 int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) in rtrs_srv_get_queue_depth() argument
1311 return srv->queue_depth; in rtrs_srv_get_queue_depth()
1335 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, in rtrs_srv_dev_release() local
1338 kfree(srv); in rtrs_srv_dev_release()
1341 static void free_srv(struct rtrs_srv_sess *srv) in free_srv() argument
1345 WARN_ON(refcount_read(&srv->refcount)); in free_srv()
1346 for (i = 0; i < srv->queue_depth; i++) in free_srv()
1347 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in free_srv()
1348 kfree(srv->chunks); in free_srv()
1349 mutex_destroy(&srv->paths_mutex); in free_srv()
1350 mutex_destroy(&srv->paths_ev_mutex); in free_srv()
1352 put_device(&srv->dev); in free_srv()
1359 struct rtrs_srv_sess *srv; in get_or_create_srv() local
1363 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { in get_or_create_srv()
1364 if (uuid_equal(&srv->paths_uuid, paths_uuid) && in get_or_create_srv()
1365 refcount_inc_not_zero(&srv->refcount)) { in get_or_create_srv()
1367 return srv; in get_or_create_srv()
1381 srv = kzalloc(sizeof(*srv), GFP_KERNEL); in get_or_create_srv()
1382 if (!srv) in get_or_create_srv()
1385 INIT_LIST_HEAD(&srv->paths_list); in get_or_create_srv()
1386 mutex_init(&srv->paths_mutex); in get_or_create_srv()
1387 mutex_init(&srv->paths_ev_mutex); in get_or_create_srv()
1388 uuid_copy(&srv->paths_uuid, paths_uuid); in get_or_create_srv()
1389 srv->queue_depth = sess_queue_depth; in get_or_create_srv()
1390 srv->ctx = ctx; in get_or_create_srv()
1391 device_initialize(&srv->dev); in get_or_create_srv()
1392 srv->dev.release = rtrs_srv_dev_release; in get_or_create_srv()
1394 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), in get_or_create_srv()
1396 if (!srv->chunks) in get_or_create_srv()
1399 for (i = 0; i < srv->queue_depth; i++) { in get_or_create_srv()
1400 srv->chunks[i] = alloc_pages(GFP_KERNEL, in get_or_create_srv()
1402 if (!srv->chunks[i]) in get_or_create_srv()
1405 refcount_set(&srv->refcount, 1); in get_or_create_srv()
1407 list_add(&srv->ctx_list, &ctx->srv_list); in get_or_create_srv()
1410 return srv; in get_or_create_srv()
1414 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in get_or_create_srv()
1415 kfree(srv->chunks); in get_or_create_srv()
1418 kfree(srv); in get_or_create_srv()
1422 static void put_srv(struct rtrs_srv_sess *srv) in put_srv() argument
1424 if (refcount_dec_and_test(&srv->refcount)) { in put_srv()
1425 struct rtrs_srv_ctx *ctx = srv->ctx; in put_srv()
1427 WARN_ON(srv->dev.kobj.state_in_sysfs); in put_srv()
1430 list_del(&srv->ctx_list); in put_srv()
1432 free_srv(srv); in put_srv()
1436 static void __add_path_to_srv(struct rtrs_srv_sess *srv, in __add_path_to_srv() argument
1439 list_add_tail(&srv_path->s.entry, &srv->paths_list); in __add_path_to_srv()
1440 srv->paths_num++; in __add_path_to_srv()
1441 WARN_ON(srv->paths_num >= MAX_PATHS_NUM); in __add_path_to_srv()
1446 struct rtrs_srv_sess *srv = srv_path->srv; in del_path_from_srv() local
1448 if (WARN_ON(!srv)) in del_path_from_srv()
1451 mutex_lock(&srv->paths_mutex); in del_path_from_srv()
1453 WARN_ON(!srv->paths_num); in del_path_from_srv()
1454 srv->paths_num--; in del_path_from_srv()
1455 mutex_unlock(&srv->paths_mutex); in del_path_from_srv()
1482 static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, in __is_path_w_addr_exists() argument
1487 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in __is_path_w_addr_exists()
1554 put_srv(srv_path->srv); in rtrs_srv_close_work()
1555 srv_path->srv = NULL; in rtrs_srv_close_work()
1566 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_rdma_do_accept() local
1580 .queue_depth = cpu_to_le16(srv->queue_depth), in rtrs_rdma_do_accept()
1615 __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) in __find_path() argument
1619 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in __find_path()
1631 struct rtrs_srv_sess *srv = srv_path->srv; in create_con() local
1660 s->signal_interval = min_not_zero(srv->queue_depth, in create_con()
1667 srv->queue_depth * (1 + 4) + 1); in create_con()
1671 srv->queue_depth * (1 + 2) + 1); in create_con()
1673 max_recv_wr = srv->queue_depth + 1; in create_con()
1712 static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, in __alloc_path() argument
1723 if (srv->paths_num >= MAX_PATHS_NUM) { in __alloc_path()
1727 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) { in __alloc_path()
1746 srv_path->dma_addr = kcalloc(srv->queue_depth, in __alloc_path()
1758 srv_path->srv = srv; in __alloc_path()
1790 __add_path_to_srv(srv, srv_path); in __alloc_path()
1818 struct rtrs_srv_sess *srv; in rtrs_rdma_connect() local
1851 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn); in rtrs_rdma_connect()
1852 if (IS_ERR(srv)) { in rtrs_rdma_connect()
1853 err = PTR_ERR(srv); in rtrs_rdma_connect()
1857 mutex_lock(&srv->paths_mutex); in rtrs_rdma_connect()
1858 srv_path = __find_path(srv, &msg->sess_uuid); in rtrs_rdma_connect()
1863 put_srv(srv); in rtrs_rdma_connect()
1868 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1877 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1883 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1887 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, in rtrs_rdma_connect()
1890 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1891 put_srv(srv); in rtrs_rdma_connect()
1921 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
1929 mutex_unlock(&srv->paths_mutex); in rtrs_rdma_connect()
2176 static void close_paths(struct rtrs_srv_sess *srv) in close_paths() argument
2180 mutex_lock(&srv->paths_mutex); in close_paths()
2181 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in close_paths()
2183 mutex_unlock(&srv->paths_mutex); in close_paths()
2188 struct rtrs_srv_sess *srv; in close_ctx() local
2191 list_for_each_entry(srv, &ctx->srv_list, ctx_list) in close_ctx()
2192 close_paths(srv); in close_ctx()