Lines Matching refs:osd

49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
53 static void unlink_linger(struct ceph_osd *osd,
55 static void clear_backoffs(struct ceph_osd *osd);
77 static inline void verify_osd_locked(struct ceph_osd *osd) in verify_osd_locked() argument
79 struct ceph_osd_client *osdc = osd->o_osdc; in verify_osd_locked()
81 WARN_ON(!(mutex_is_locked(&osd->lock) && in verify_osd_locked()
92 static inline void verify_osd_locked(struct ceph_osd *osd) { } in verify_osd_locked() argument
428 t->osd = CEPH_HOMELESS_OSD; in target_init()
457 dest->osd = src->osd; in target_copy()
1139 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in DEFINE_RB_FUNCS() local
1141 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS()
1161 static bool osd_homeless(struct ceph_osd *osd) in osd_homeless() argument
1163 return osd->o_osd == CEPH_HOMELESS_OSD; in osd_homeless()
1166 static bool osd_registered(struct ceph_osd *osd) in osd_registered() argument
1168 verify_osdc_locked(osd->o_osdc); in osd_registered()
1170 return !RB_EMPTY_NODE(&osd->o_node); in osd_registered()
1176 static void osd_init(struct ceph_osd *osd) in osd_init() argument
1178 refcount_set(&osd->o_ref, 1); in osd_init()
1179 RB_CLEAR_NODE(&osd->o_node); in osd_init()
1180 osd->o_requests = RB_ROOT; in osd_init()
1181 osd->o_linger_requests = RB_ROOT; in osd_init()
1182 osd->o_backoff_mappings = RB_ROOT; in osd_init()
1183 osd->o_backoffs_by_id = RB_ROOT; in osd_init()
1184 INIT_LIST_HEAD(&osd->o_osd_lru); in osd_init()
1185 INIT_LIST_HEAD(&osd->o_keepalive_item); in osd_init()
1186 osd->o_incarnation = 1; in osd_init()
1187 mutex_init(&osd->lock); in osd_init()
1190 static void osd_cleanup(struct ceph_osd *osd) in osd_cleanup() argument
1192 WARN_ON(!RB_EMPTY_NODE(&osd->o_node)); in osd_cleanup()
1193 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in osd_cleanup()
1194 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in osd_cleanup()
1195 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings)); in osd_cleanup()
1196 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id)); in osd_cleanup()
1197 WARN_ON(!list_empty(&osd->o_osd_lru)); in osd_cleanup()
1198 WARN_ON(!list_empty(&osd->o_keepalive_item)); in osd_cleanup()
1200 if (osd->o_auth.authorizer) { in osd_cleanup()
1201 WARN_ON(osd_homeless(osd)); in osd_cleanup()
1202 ceph_auth_destroy_authorizer(osd->o_auth.authorizer); in osd_cleanup()
1211 struct ceph_osd *osd; in create_osd() local
1215 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL); in create_osd()
1216 osd_init(osd); in create_osd()
1217 osd->o_osdc = osdc; in create_osd()
1218 osd->o_osd = onum; in create_osd()
1220 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr); in create_osd()
1222 return osd; in create_osd()
1225 static struct ceph_osd *get_osd(struct ceph_osd *osd) in get_osd() argument
1227 if (refcount_inc_not_zero(&osd->o_ref)) { in get_osd()
1228 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, in get_osd()
1229 refcount_read(&osd->o_ref)); in get_osd()
1230 return osd; in get_osd()
1232 dout("get_osd %p FAIL\n", osd); in get_osd()
1237 static void put_osd(struct ceph_osd *osd) in put_osd() argument
1239 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), in put_osd()
1240 refcount_read(&osd->o_ref) - 1); in put_osd()
1241 if (refcount_dec_and_test(&osd->o_ref)) { in put_osd()
1242 osd_cleanup(osd); in put_osd()
1243 kfree(osd); in put_osd()
1247 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node) in DEFINE_RB_FUNCS() argument
1249 static void __move_osd_to_lru(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1251 struct ceph_osd_client *osdc = osd->o_osdc; in DEFINE_RB_FUNCS()
1253 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in DEFINE_RB_FUNCS()
1254 BUG_ON(!list_empty(&osd->o_osd_lru)); in DEFINE_RB_FUNCS()
1257 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); in DEFINE_RB_FUNCS()
1260 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; in DEFINE_RB_FUNCS()
1263 static void maybe_move_osd_to_lru(struct ceph_osd *osd) in maybe_move_osd_to_lru() argument
1265 if (RB_EMPTY_ROOT(&osd->o_requests) && in maybe_move_osd_to_lru()
1266 RB_EMPTY_ROOT(&osd->o_linger_requests)) in maybe_move_osd_to_lru()
1267 __move_osd_to_lru(osd); in maybe_move_osd_to_lru()
1270 static void __remove_osd_from_lru(struct ceph_osd *osd) in __remove_osd_from_lru() argument
1272 struct ceph_osd_client *osdc = osd->o_osdc; in __remove_osd_from_lru()
1274 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in __remove_osd_from_lru()
1277 if (!list_empty(&osd->o_osd_lru)) in __remove_osd_from_lru()
1278 list_del_init(&osd->o_osd_lru); in __remove_osd_from_lru()
1286 static void close_osd(struct ceph_osd *osd) in close_osd() argument
1288 struct ceph_osd_client *osdc = osd->o_osdc; in close_osd()
1292 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in close_osd()
1294 ceph_con_close(&osd->o_con); in close_osd()
1296 for (n = rb_first(&osd->o_requests); n; ) { in close_osd()
1303 unlink_request(osd, req); in close_osd()
1306 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd()
1314 unlink_linger(osd, lreq); in close_osd()
1317 clear_backoffs(osd); in close_osd()
1319 __remove_osd_from_lru(osd); in close_osd()
1320 erase_osd(&osdc->osds, osd); in close_osd()
1321 put_osd(osd); in close_osd()
1327 static int reopen_osd(struct ceph_osd *osd) in reopen_osd() argument
1331 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in reopen_osd()
1333 if (RB_EMPTY_ROOT(&osd->o_requests) && in reopen_osd()
1334 RB_EMPTY_ROOT(&osd->o_linger_requests)) { in reopen_osd()
1335 close_osd(osd); in reopen_osd()
1339 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd]; in reopen_osd()
1340 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) && in reopen_osd()
1341 !ceph_con_opened(&osd->o_con)) { in reopen_osd()
1347 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd()
1356 ceph_con_close(&osd->o_con); in reopen_osd()
1357 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr); in reopen_osd()
1358 osd->o_incarnation++; in reopen_osd()
1366 struct ceph_osd *osd; in lookup_create_osd() local
1374 osd = lookup_osd(&osdc->osds, o); in lookup_create_osd()
1376 osd = &osdc->homeless_osd; in lookup_create_osd()
1377 if (!osd) { in lookup_create_osd()
1381 osd = create_osd(osdc, o); in lookup_create_osd()
1382 insert_osd(&osdc->osds, osd); in lookup_create_osd()
1383 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, in lookup_create_osd()
1384 &osdc->osdmap->osd_addr[osd->o_osd]); in lookup_create_osd()
1387 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd); in lookup_create_osd()
1388 return osd; in lookup_create_osd()
1396 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req) in link_request() argument
1398 verify_osd_locked(osd); in link_request()
1400 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in link_request()
1403 if (!osd_homeless(osd)) in link_request()
1404 __remove_osd_from_lru(osd); in link_request()
1406 atomic_inc(&osd->o_osdc->num_homeless); in link_request()
1408 get_osd(osd); in link_request()
1409 insert_request(&osd->o_requests, req); in link_request()
1410 req->r_osd = osd; in link_request()
1413 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req) in unlink_request() argument
1415 verify_osd_locked(osd); in unlink_request()
1416 WARN_ON(req->r_osd != osd); in unlink_request()
1417 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd, in unlink_request()
1421 erase_request(&osd->o_requests, req); in unlink_request()
1422 put_osd(osd); in unlink_request()
1424 if (!osd_homeless(osd)) in unlink_request()
1425 maybe_move_osd_to_lru(osd); in unlink_request()
1427 atomic_dec(&osd->o_osdc->num_homeless); in unlink_request()
1546 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1571 t->osd = CEPH_HOMELESS_OSD; in calc_target()
1634 t->osd = acting.osds[pos]; in calc_target()
1637 t->osd = acting.primary; in calc_target()
1649 legacy_change, force_resend, split, ct_res, t->osd); in calc_target()
1908 static void clear_backoffs(struct ceph_osd *osd) in DEFINE_RB_FUNCS()
1910 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) { in DEFINE_RB_FUNCS()
1912 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS()
1921 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in DEFINE_RB_FUNCS()
1924 erase_spg_mapping(&osd->o_backoff_mappings, spg); in DEFINE_RB_FUNCS()
1955 struct ceph_osd *osd = req->r_osd; in should_plug_request() local
1960 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid); in should_plug_request()
1970 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool, in should_plug_request()
2254 struct ceph_osd *osd = req->r_osd; in send_request() local
2256 verify_osd_locked(osd); in send_request()
2257 WARN_ON(osd->o_osd != req->r_t.osd); in send_request()
2281 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags, in send_request()
2288 req->r_sent = osd->o_incarnation; in send_request()
2290 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request)); in send_request()
2320 struct ceph_osd *osd; in __submit_request() local
2334 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked); in __submit_request()
2335 if (IS_ERR(osd)) { in __submit_request()
2336 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked); in __submit_request()
2375 } else if (!osd_homeless(osd)) { in __submit_request()
2381 mutex_lock(&osd->lock); in __submit_request()
2388 link_request(osd, req); in __submit_request()
2393 mutex_unlock(&osd->lock); in __submit_request()
2711 WARN_ON(lreq->osd); in linger_release()
2772 static void link_linger(struct ceph_osd *osd, in DEFINE_RB_INSDEL_FUNCS()
2775 verify_osd_locked(osd); in DEFINE_RB_INSDEL_FUNCS()
2776 WARN_ON(!lreq->linger_id || lreq->osd); in DEFINE_RB_INSDEL_FUNCS()
2777 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in DEFINE_RB_INSDEL_FUNCS()
2778 osd->o_osd, lreq, lreq->linger_id); in DEFINE_RB_INSDEL_FUNCS()
2780 if (!osd_homeless(osd)) in DEFINE_RB_INSDEL_FUNCS()
2781 __remove_osd_from_lru(osd); in DEFINE_RB_INSDEL_FUNCS()
2783 atomic_inc(&osd->o_osdc->num_homeless); in DEFINE_RB_INSDEL_FUNCS()
2785 get_osd(osd); in DEFINE_RB_INSDEL_FUNCS()
2786 insert_linger(&osd->o_linger_requests, lreq); in DEFINE_RB_INSDEL_FUNCS()
2787 lreq->osd = osd; in DEFINE_RB_INSDEL_FUNCS()
2790 static void unlink_linger(struct ceph_osd *osd, in unlink_linger() argument
2793 verify_osd_locked(osd); in unlink_linger()
2794 WARN_ON(lreq->osd != osd); in unlink_linger()
2795 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd, in unlink_linger()
2796 osd->o_osd, lreq, lreq->linger_id); in unlink_linger()
2798 lreq->osd = NULL; in unlink_linger()
2799 erase_linger(&osd->o_linger_requests, lreq); in unlink_linger()
2800 put_osd(osd); in unlink_linger()
2802 if (!osd_homeless(osd)) in unlink_linger()
2803 maybe_move_osd_to_lru(osd); in unlink_linger()
2805 atomic_dec(&osd->o_osdc->num_homeless); in unlink_linger()
3188 link_request(lreq->osd, req); in send_linger_ping()
3195 struct ceph_osd *osd; in linger_submit() local
3201 osd = lookup_create_osd(osdc, lreq->t.osd, true); in linger_submit()
3202 link_linger(osd, lreq); in linger_submit()
3235 unlink_linger(lreq->osd, lreq); in __linger_cancel()
3375 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_timeout() local
3378 for (p = rb_first(&osd->o_requests); p; ) { in handle_timeout()
3386 req, req->r_tid, osd->o_osd); in handle_timeout()
3392 req->r_tid, osd->o_osd); in handle_timeout()
3396 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { in handle_timeout()
3401 lreq, lreq->linger_id, osd->o_osd); in handle_timeout()
3411 list_move_tail(&osd->o_keepalive_item, &slow_osds); in handle_timeout()
3433 struct ceph_osd *osd = list_first_entry(&slow_osds, in handle_timeout() local
3436 list_del_init(&osd->o_keepalive_item); in handle_timeout()
3437 ceph_con_keepalive(&osd->o_con); in handle_timeout()
3451 struct ceph_osd *osd, *nosd; in handle_osds_timeout() local
3455 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { in handle_osds_timeout()
3456 if (time_before(jiffies, osd->lru_ttl)) in handle_osds_timeout()
3459 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests)); in handle_osds_timeout()
3460 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests)); in handle_osds_timeout()
3461 close_osd(osd); in handle_osds_timeout()
3676 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) in handle_reply() argument
3678 struct ceph_osd_client *osdc = osd->o_osdc; in handle_reply()
3689 if (!osd_registered(osd)) { in handle_reply()
3690 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_reply()
3693 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_reply()
3695 mutex_lock(&osd->lock); in handle_reply()
3696 req = lookup_request(&osd->o_requests, tid); in handle_reply()
3698 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid); in handle_reply()
3730 unlink_request(osd, req); in handle_reply()
3731 mutex_unlock(&osd->lock); in handle_reply()
3748 unlink_request(osd, req); in handle_reply()
3749 mutex_unlock(&osd->lock); in handle_reply()
3793 mutex_unlock(&osd->lock); in handle_reply()
3802 mutex_unlock(&osd->lock); in handle_reply()
3838 struct ceph_osd *osd; in recalc_linger_target() local
3840 osd = lookup_create_osd(osdc, lreq->t.osd, true); in recalc_linger_target()
3841 if (osd != lreq->osd) { in recalc_linger_target()
3842 unlink_linger(lreq->osd, lreq); in recalc_linger_target()
3843 link_linger(osd, lreq); in recalc_linger_target()
3853 static void scan_requests(struct ceph_osd *osd, in scan_requests() argument
3860 struct ceph_osd_client *osdc = osd->o_osdc; in scan_requests()
3864 for (n = rb_first(&osd->o_linger_requests); n; ) { in scan_requests()
3900 for (n = rb_first(&osd->o_requests); n; ) { in scan_requests()
3922 unlink_request(osd, req); in scan_requests()
3987 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in handle_one_map() local
3991 scan_requests(osd, skipped_map, was_full, true, need_resend, in handle_one_map()
3993 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || in handle_one_map()
3994 memcmp(&osd->o_con.peer_addr, in handle_one_map()
3995 ceph_osd_addr(osdc->osdmap, osd->o_osd), in handle_one_map()
3997 close_osd(osd); in handle_one_map()
4030 struct ceph_osd *osd; in kick_requests() local
4035 osd = lookup_create_osd(osdc, req->r_t.osd, true); in kick_requests()
4036 link_request(osd, req); in kick_requests()
4038 if (!osd_homeless(osd) && !req->r_t.paused) in kick_requests()
4046 if (!osd_homeless(lreq->osd)) in kick_requests()
4173 static void kick_osd_requests(struct ceph_osd *osd) in kick_osd_requests() argument
4177 clear_backoffs(osd); in kick_osd_requests()
4179 for (n = rb_first(&osd->o_requests); n; ) { in kick_osd_requests()
4192 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in kick_osd_requests()
4205 struct ceph_osd *osd = con->private; in osd_fault() local
4206 struct ceph_osd_client *osdc = osd->o_osdc; in osd_fault()
4208 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd); in osd_fault()
4211 if (!osd_registered(osd)) { in osd_fault()
4212 dout("%s osd%d unknown\n", __func__, osd->o_osd); in osd_fault()
4216 if (!reopen_osd(osd)) in osd_fault()
4217 kick_osd_requests(osd); in osd_fault()
4321 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m) in handle_backoff_block() argument
4327 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_block()
4330 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid); in handle_backoff_block()
4338 insert_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_block()
4354 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_block()
4365 ceph_con_send(&osd->o_con, msg); in handle_backoff_block()
4380 static void handle_backoff_unblock(struct ceph_osd *osd, in handle_backoff_unblock() argument
4387 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd, in handle_backoff_unblock()
4390 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id); in handle_backoff_unblock()
4393 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4401 __func__, osd->o_osd, m->spgid.pgid.pool, in handle_backoff_unblock()
4406 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid); in handle_backoff_unblock()
4410 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff); in handle_backoff_unblock()
4414 erase_spg_mapping(&osd->o_backoff_mappings, spg); in handle_backoff_unblock()
4418 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in handle_backoff_unblock()
4438 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg) in handle_backoff() argument
4440 struct ceph_osd_client *osdc = osd->o_osdc; in handle_backoff()
4445 if (!osd_registered(osd)) { in handle_backoff()
4446 dout("%s osd%d unknown\n", __func__, osd->o_osd); in handle_backoff()
4450 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num)); in handle_backoff()
4452 mutex_lock(&osd->lock); in handle_backoff()
4462 handle_backoff_block(osd, &m); in handle_backoff()
4465 handle_backoff_unblock(osd, &m); in handle_backoff()
4468 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op); in handle_backoff()
4475 mutex_unlock(&osd->lock); in handle_backoff()
4652 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_sync() local
4654 mutex_lock(&osd->lock); in ceph_osdc_sync()
4655 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { in ceph_osdc_sync()
4666 mutex_unlock(&osd->lock); in ceph_osdc_sync()
4675 mutex_unlock(&osd->lock); in ceph_osdc_sync()
5143 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in ceph_osdc_reopen_osds() local
5146 if (!reopen_osd(osd)) in ceph_osdc_reopen_osds()
5147 kick_osd_requests(osd); in ceph_osdc_reopen_osds()
5234 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), in ceph_osdc_stop() local
5236 close_osd(osd); in ceph_osdc_stop()
5317 struct ceph_osd *osd = con->private; in osd_dispatch() local
5318 struct ceph_osd_client *osdc = osd->o_osdc; in osd_dispatch()
5326 handle_reply(osd, msg); in osd_dispatch()
5329 handle_backoff(osd, msg); in osd_dispatch()
5352 struct ceph_osd *osd = con->private; in get_reply() local
5353 struct ceph_osd_client *osdc = osd->o_osdc; in get_reply()
5361 if (!osd_registered(osd)) { in get_reply()
5362 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd); in get_reply()
5366 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num)); in get_reply()
5368 mutex_lock(&osd->lock); in get_reply()
5369 req = lookup_request(&osd->o_requests, tid); in get_reply()
5372 osd->o_osd, tid); in get_reply()
5381 __func__, osd->o_osd, req->r_tid, front_len, in get_reply()
5393 __func__, osd->o_osd, req->r_tid, data_len, in get_reply()
5404 mutex_unlock(&osd->lock); in get_reply()
5441 struct ceph_osd *osd = con->private; in osd_alloc_msg() local
5454 osd->o_osd, type); in osd_alloc_msg()
5465 struct ceph_osd *osd = con->private; in osd_get_con() local
5466 if (get_osd(osd)) in osd_get_con()
5473 struct ceph_osd *osd = con->private; in osd_put_con() local
5474 put_osd(osd); in osd_put_con()