1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include "core.h"
8 #include "peer.h"
9 #include "debug.h"
10
ath11k_peer_find_list_by_id(struct ath11k_base * ab,int peer_id)11 static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
12 int peer_id)
13 {
14 struct ath11k_peer *peer;
15
16 lockdep_assert_held(&ab->base_lock);
17
18 list_for_each_entry(peer, &ab->peers, list) {
19 if (peer->peer_id != peer_id)
20 continue;
21
22 return peer;
23 }
24
25 return NULL;
26 }
27
ath11k_peer_find(struct ath11k_base * ab,int vdev_id,const u8 * addr)28 struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
29 const u8 *addr)
30 {
31 struct ath11k_peer *peer;
32
33 lockdep_assert_held(&ab->base_lock);
34
35 list_for_each_entry(peer, &ab->peers, list) {
36 if (peer->vdev_id != vdev_id)
37 continue;
38 if (!ether_addr_equal(peer->addr, addr))
39 continue;
40
41 return peer;
42 }
43
44 return NULL;
45 }
46
ath11k_peer_find_by_addr(struct ath11k_base * ab,const u8 * addr)47 struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
48 const u8 *addr)
49 {
50 struct ath11k_peer *peer;
51
52 lockdep_assert_held(&ab->base_lock);
53
54 if (!ab->rhead_peer_addr)
55 return NULL;
56
57 peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
58 ab->rhash_peer_addr_param);
59
60 return peer;
61 }
62
ath11k_peer_find_by_id(struct ath11k_base * ab,int peer_id)63 struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
64 int peer_id)
65 {
66 struct ath11k_peer *peer;
67
68 lockdep_assert_held(&ab->base_lock);
69
70 if (!ab->rhead_peer_id)
71 return NULL;
72
73 peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
74 ab->rhash_peer_id_param);
75
76 return peer;
77 }
78
ath11k_peer_find_by_vdev_id(struct ath11k_base * ab,int vdev_id)79 struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
80 int vdev_id)
81 {
82 struct ath11k_peer *peer;
83
84 spin_lock_bh(&ab->base_lock);
85
86 list_for_each_entry(peer, &ab->peers, list) {
87 if (vdev_id == peer->vdev_id) {
88 spin_unlock_bh(&ab->base_lock);
89 return peer;
90 }
91 }
92 spin_unlock_bh(&ab->base_lock);
93 return NULL;
94 }
95
ath11k_peer_unmap_event(struct ath11k_base * ab,u16 peer_id)96 void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
97 {
98 struct ath11k_peer *peer;
99
100 spin_lock_bh(&ab->base_lock);
101
102 peer = ath11k_peer_find_list_by_id(ab, peer_id);
103 if (!peer) {
104 ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
105 peer_id);
106 goto exit;
107 }
108
109 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
110 peer->vdev_id, peer->addr, peer_id);
111
112 list_del(&peer->list);
113 kfree(peer);
114 wake_up(&ab->peer_mapping_wq);
115
116 exit:
117 spin_unlock_bh(&ab->base_lock);
118 }
119
ath11k_peer_map_event(struct ath11k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)120 void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
121 u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
122 {
123 struct ath11k_peer *peer;
124
125 spin_lock_bh(&ab->base_lock);
126 peer = ath11k_peer_find(ab, vdev_id, mac_addr);
127 if (!peer) {
128 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
129 if (!peer)
130 goto exit;
131
132 peer->vdev_id = vdev_id;
133 peer->peer_id = peer_id;
134 peer->ast_hash = ast_hash;
135 peer->hw_peer_id = hw_peer_id;
136 ether_addr_copy(peer->addr, mac_addr);
137 list_add(&peer->list, &ab->peers);
138 wake_up(&ab->peer_mapping_wq);
139 }
140
141 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
142 vdev_id, mac_addr, peer_id);
143
144 exit:
145 spin_unlock_bh(&ab->base_lock);
146 }
147
ath11k_wait_for_peer_common(struct ath11k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)148 static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
149 const u8 *addr, bool expect_mapped)
150 {
151 int ret;
152
153 ret = wait_event_timeout(ab->peer_mapping_wq, ({
154 bool mapped;
155
156 spin_lock_bh(&ab->base_lock);
157 mapped = !!ath11k_peer_find(ab, vdev_id, addr);
158 spin_unlock_bh(&ab->base_lock);
159
160 (mapped == expect_mapped ||
161 test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
162 }), 3 * HZ);
163
164 if (ret <= 0)
165 return -ETIMEDOUT;
166
167 return 0;
168 }
169
ath11k_peer_rhash_insert(struct ath11k_base * ab,struct rhashtable * rtbl,struct rhash_head * rhead,struct rhashtable_params * params,void * key)170 static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
171 struct rhashtable *rtbl,
172 struct rhash_head *rhead,
173 struct rhashtable_params *params,
174 void *key)
175 {
176 struct ath11k_peer *tmp;
177
178 lockdep_assert_held(&ab->tbl_mtx_lock);
179
180 tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
181
182 if (!tmp)
183 return 0;
184 else if (IS_ERR(tmp))
185 return PTR_ERR(tmp);
186 else
187 return -EEXIST;
188 }
189
ath11k_peer_rhash_remove(struct ath11k_base * ab,struct rhashtable * rtbl,struct rhash_head * rhead,struct rhashtable_params * params)190 static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
191 struct rhashtable *rtbl,
192 struct rhash_head *rhead,
193 struct rhashtable_params *params)
194 {
195 int ret;
196
197 lockdep_assert_held(&ab->tbl_mtx_lock);
198
199 ret = rhashtable_remove_fast(rtbl, rhead, *params);
200 if (ret && ret != -ENOENT)
201 return ret;
202
203 return 0;
204 }
205
ath11k_peer_rhash_add(struct ath11k_base * ab,struct ath11k_peer * peer)206 static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
207 {
208 int ret;
209
210 lockdep_assert_held(&ab->base_lock);
211 lockdep_assert_held(&ab->tbl_mtx_lock);
212
213 if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
214 return -EPERM;
215
216 ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
217 &ab->rhash_peer_id_param, &peer->peer_id);
218 if (ret) {
219 ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
220 peer->addr, peer->peer_id, ret);
221 return ret;
222 }
223
224 ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
225 &ab->rhash_peer_addr_param, &peer->addr);
226 if (ret) {
227 ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
228 peer->addr, peer->peer_id, ret);
229 goto err_clean;
230 }
231
232 return 0;
233
234 err_clean:
235 ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
236 &ab->rhash_peer_id_param);
237 return ret;
238 }
239
ath11k_peer_cleanup(struct ath11k * ar,u32 vdev_id)240 void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
241 {
242 struct ath11k_peer *peer, *tmp;
243 struct ath11k_base *ab = ar->ab;
244
245 lockdep_assert_held(&ar->conf_mutex);
246
247 mutex_lock(&ab->tbl_mtx_lock);
248 spin_lock_bh(&ab->base_lock);
249 list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
250 if (peer->vdev_id != vdev_id)
251 continue;
252
253 ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
254 peer->addr, vdev_id);
255
256 ath11k_peer_rhash_delete(ab, peer);
257 list_del(&peer->list);
258 kfree(peer);
259 ar->num_peers--;
260 }
261
262 spin_unlock_bh(&ab->base_lock);
263 mutex_unlock(&ab->tbl_mtx_lock);
264 }
265
ath11k_wait_for_peer_deleted(struct ath11k * ar,int vdev_id,const u8 * addr)266 static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
267 {
268 return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
269 }
270
ath11k_wait_for_peer_delete_done(struct ath11k * ar,u32 vdev_id,const u8 * addr)271 int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
272 const u8 *addr)
273 {
274 int ret;
275 unsigned long time_left;
276
277 ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
278 if (ret) {
279 ath11k_warn(ar->ab, "failed wait for peer deleted");
280 return ret;
281 }
282
283 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
284 3 * HZ);
285 if (time_left == 0) {
286 ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
287 return -ETIMEDOUT;
288 }
289
290 return 0;
291 }
292
__ath11k_peer_delete(struct ath11k * ar,u32 vdev_id,const u8 * addr)293 static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
294 {
295 int ret;
296 struct ath11k_peer *peer;
297 struct ath11k_base *ab = ar->ab;
298
299 lockdep_assert_held(&ar->conf_mutex);
300
301 mutex_lock(&ab->tbl_mtx_lock);
302 spin_lock_bh(&ab->base_lock);
303
304 peer = ath11k_peer_find_by_addr(ab, addr);
305 /* Check if the found peer is what we want to remove.
306 * While the sta is transitioning to another band we may
307 * have 2 peer with the same addr assigned to different
308 * vdev_id. Make sure we are deleting the correct peer.
309 */
310 if (peer && peer->vdev_id == vdev_id)
311 ath11k_peer_rhash_delete(ab, peer);
312
313 /* Fallback to peer list search if the correct peer can't be found.
314 * Skip the deletion of the peer from the rhash since it has already
315 * been deleted in peer add.
316 */
317 if (!peer)
318 peer = ath11k_peer_find(ab, vdev_id, addr);
319
320 if (!peer) {
321 spin_unlock_bh(&ab->base_lock);
322 mutex_unlock(&ab->tbl_mtx_lock);
323
324 ath11k_warn(ab,
325 "failed to find peer vdev_id %d addr %pM in delete\n",
326 vdev_id, addr);
327 return -EINVAL;
328 }
329
330 spin_unlock_bh(&ab->base_lock);
331 mutex_unlock(&ab->tbl_mtx_lock);
332
333 reinit_completion(&ar->peer_delete_done);
334
335 ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
336 if (ret) {
337 ath11k_warn(ab,
338 "failed to delete peer vdev_id %d addr %pM ret %d\n",
339 vdev_id, addr, ret);
340 return ret;
341 }
342
343 ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
344 if (ret)
345 return ret;
346
347 return 0;
348 }
349
ath11k_peer_delete(struct ath11k * ar,u32 vdev_id,u8 * addr)350 int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
351 {
352 int ret;
353
354 lockdep_assert_held(&ar->conf_mutex);
355
356 ret = __ath11k_peer_delete(ar, vdev_id, addr);
357 if (ret)
358 return ret;
359
360 ar->num_peers--;
361
362 return 0;
363 }
364
ath11k_wait_for_peer_created(struct ath11k * ar,int vdev_id,const u8 * addr)365 static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
366 {
367 return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
368 }
369
ath11k_peer_create(struct ath11k * ar,struct ath11k_vif * arvif,struct ieee80211_sta * sta,struct peer_create_params * param)370 int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
371 struct ieee80211_sta *sta, struct peer_create_params *param)
372 {
373 struct ath11k_peer *peer;
374 struct ath11k_sta *arsta;
375 int ret, fbret;
376
377 lockdep_assert_held(&ar->conf_mutex);
378
379 if (ar->num_peers > (ar->max_num_peers - 1)) {
380 ath11k_warn(ar->ab,
381 "failed to create peer due to insufficient peer entry resource in firmware\n");
382 return -ENOBUFS;
383 }
384
385 spin_lock_bh(&ar->ab->base_lock);
386 peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
387 if (peer) {
388 if (peer->vdev_id == param->vdev_id) {
389 spin_unlock_bh(&ar->ab->base_lock);
390 return -EINVAL;
391 }
392
393 /* Assume sta is transitioning to another band.
394 * Remove here the peer from rhash.
395 */
396 mutex_lock(&ar->ab->tbl_mtx_lock);
397 ath11k_peer_rhash_delete(ar->ab, peer);
398 mutex_unlock(&ar->ab->tbl_mtx_lock);
399 }
400 spin_unlock_bh(&ar->ab->base_lock);
401
402 ret = ath11k_wmi_send_peer_create_cmd(ar, param);
403 if (ret) {
404 ath11k_warn(ar->ab,
405 "failed to send peer create vdev_id %d ret %d\n",
406 param->vdev_id, ret);
407 return ret;
408 }
409
410 ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
411 param->peer_addr);
412 if (ret)
413 return ret;
414
415 mutex_lock(&ar->ab->tbl_mtx_lock);
416 spin_lock_bh(&ar->ab->base_lock);
417
418 peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
419 if (!peer) {
420 spin_unlock_bh(&ar->ab->base_lock);
421 mutex_unlock(&ar->ab->tbl_mtx_lock);
422 ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
423 param->peer_addr, param->vdev_id);
424
425 ret = -ENOENT;
426 goto cleanup;
427 }
428
429 ret = ath11k_peer_rhash_add(ar->ab, peer);
430 if (ret) {
431 spin_unlock_bh(&ar->ab->base_lock);
432 mutex_unlock(&ar->ab->tbl_mtx_lock);
433 goto cleanup;
434 }
435
436 peer->pdev_idx = ar->pdev_idx;
437 peer->sta = sta;
438
439 if (arvif->vif->type == NL80211_IFTYPE_STATION) {
440 arvif->ast_hash = peer->ast_hash;
441 arvif->ast_idx = peer->hw_peer_id;
442 }
443
444 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
445 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
446
447 if (sta) {
448 arsta = (struct ath11k_sta *)sta->drv_priv;
449 arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
450 FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
451 peer->peer_id);
452
453 /* set HTT extension valid bit to 0 by default */
454 arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
455 }
456
457 ar->num_peers++;
458
459 spin_unlock_bh(&ar->ab->base_lock);
460 mutex_unlock(&ar->ab->tbl_mtx_lock);
461
462 return 0;
463
464 cleanup:
465 fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
466 if (fbret)
467 ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
468 param->peer_addr, param->vdev_id, fbret);
469
470 return ret;
471 }
472
ath11k_peer_rhash_delete(struct ath11k_base * ab,struct ath11k_peer * peer)473 int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
474 {
475 int ret;
476
477 lockdep_assert_held(&ab->base_lock);
478 lockdep_assert_held(&ab->tbl_mtx_lock);
479
480 if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
481 return -EPERM;
482
483 ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
484 &ab->rhash_peer_addr_param);
485 if (ret) {
486 ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
487 peer->addr, peer->peer_id, ret);
488 return ret;
489 }
490
491 ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
492 &ab->rhash_peer_id_param);
493 if (ret) {
494 ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
495 peer->addr, peer->peer_id, ret);
496 return ret;
497 }
498
499 return 0;
500 }
501
ath11k_peer_rhash_id_tbl_init(struct ath11k_base * ab)502 static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
503 {
504 struct rhashtable_params *param;
505 struct rhashtable *rhash_id_tbl;
506 int ret;
507 size_t size;
508
509 lockdep_assert_held(&ab->tbl_mtx_lock);
510
511 if (ab->rhead_peer_id)
512 return 0;
513
514 size = sizeof(*ab->rhead_peer_id);
515 rhash_id_tbl = kzalloc(size, GFP_KERNEL);
516 if (!rhash_id_tbl) {
517 ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
518 size);
519 return -ENOMEM;
520 }
521
522 param = &ab->rhash_peer_id_param;
523
524 param->key_offset = offsetof(struct ath11k_peer, peer_id);
525 param->head_offset = offsetof(struct ath11k_peer, rhash_id);
526 param->key_len = sizeof_field(struct ath11k_peer, peer_id);
527 param->automatic_shrinking = true;
528 param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
529
530 ret = rhashtable_init(rhash_id_tbl, param);
531 if (ret) {
532 ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
533 goto err_free;
534 }
535
536 spin_lock_bh(&ab->base_lock);
537
538 if (!ab->rhead_peer_id) {
539 ab->rhead_peer_id = rhash_id_tbl;
540 } else {
541 spin_unlock_bh(&ab->base_lock);
542 goto cleanup_tbl;
543 }
544
545 spin_unlock_bh(&ab->base_lock);
546
547 return 0;
548
549 cleanup_tbl:
550 rhashtable_destroy(rhash_id_tbl);
551 err_free:
552 kfree(rhash_id_tbl);
553
554 return ret;
555 }
556
ath11k_peer_rhash_addr_tbl_init(struct ath11k_base * ab)557 static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
558 {
559 struct rhashtable_params *param;
560 struct rhashtable *rhash_addr_tbl;
561 int ret;
562 size_t size;
563
564 lockdep_assert_held(&ab->tbl_mtx_lock);
565
566 if (ab->rhead_peer_addr)
567 return 0;
568
569 size = sizeof(*ab->rhead_peer_addr);
570 rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
571 if (!rhash_addr_tbl) {
572 ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
573 size);
574 return -ENOMEM;
575 }
576
577 param = &ab->rhash_peer_addr_param;
578
579 param->key_offset = offsetof(struct ath11k_peer, addr);
580 param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
581 param->key_len = sizeof_field(struct ath11k_peer, addr);
582 param->automatic_shrinking = true;
583 param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
584
585 ret = rhashtable_init(rhash_addr_tbl, param);
586 if (ret) {
587 ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
588 goto err_free;
589 }
590
591 spin_lock_bh(&ab->base_lock);
592
593 if (!ab->rhead_peer_addr) {
594 ab->rhead_peer_addr = rhash_addr_tbl;
595 } else {
596 spin_unlock_bh(&ab->base_lock);
597 goto cleanup_tbl;
598 }
599
600 spin_unlock_bh(&ab->base_lock);
601
602 return 0;
603
604 cleanup_tbl:
605 rhashtable_destroy(rhash_addr_tbl);
606 err_free:
607 kfree(rhash_addr_tbl);
608
609 return ret;
610 }
611
ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base * ab)612 static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
613 {
614 lockdep_assert_held(&ab->tbl_mtx_lock);
615
616 if (!ab->rhead_peer_id)
617 return;
618
619 rhashtable_destroy(ab->rhead_peer_id);
620 kfree(ab->rhead_peer_id);
621 ab->rhead_peer_id = NULL;
622 }
623
ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base * ab)624 static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
625 {
626 lockdep_assert_held(&ab->tbl_mtx_lock);
627
628 if (!ab->rhead_peer_addr)
629 return;
630
631 rhashtable_destroy(ab->rhead_peer_addr);
632 kfree(ab->rhead_peer_addr);
633 ab->rhead_peer_addr = NULL;
634 }
635
ath11k_peer_rhash_tbl_init(struct ath11k_base * ab)636 int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
637 {
638 int ret;
639
640 mutex_lock(&ab->tbl_mtx_lock);
641
642 ret = ath11k_peer_rhash_id_tbl_init(ab);
643 if (ret)
644 goto out;
645
646 ret = ath11k_peer_rhash_addr_tbl_init(ab);
647 if (ret)
648 goto cleanup_tbl;
649
650 mutex_unlock(&ab->tbl_mtx_lock);
651
652 return 0;
653
654 cleanup_tbl:
655 ath11k_peer_rhash_id_tbl_destroy(ab);
656 out:
657 mutex_unlock(&ab->tbl_mtx_lock);
658 return ret;
659 }
660
ath11k_peer_rhash_tbl_destroy(struct ath11k_base * ab)661 void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
662 {
663 mutex_lock(&ab->tbl_mtx_lock);
664
665 ath11k_peer_rhash_addr_tbl_destroy(ab);
666 ath11k_peer_rhash_id_tbl_destroy(ab);
667
668 mutex_unlock(&ab->tbl_mtx_lock);
669 }
670