1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include "core.h"
8#include "peer.h"
9#include "debug.h"
10
11static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
12						       int peer_id)
13{
14	struct ath11k_peer *peer;
15
16	lockdep_assert_held(&ab->base_lock);
17
18	list_for_each_entry(peer, &ab->peers, list) {
19		if (peer->peer_id != peer_id)
20			continue;
21
22		return peer;
23	}
24
25	return NULL;
26}
27
28struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
29				     const u8 *addr)
30{
31	struct ath11k_peer *peer;
32
33	lockdep_assert_held(&ab->base_lock);
34
35	list_for_each_entry(peer, &ab->peers, list) {
36		if (peer->vdev_id != vdev_id)
37			continue;
38		if (!ether_addr_equal(peer->addr, addr))
39			continue;
40
41		return peer;
42	}
43
44	return NULL;
45}
46
47struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
48					     const u8 *addr)
49{
50	struct ath11k_peer *peer;
51
52	lockdep_assert_held(&ab->base_lock);
53
54	if (!ab->rhead_peer_addr)
55		return NULL;
56
57	peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
58				      ab->rhash_peer_addr_param);
59
60	return peer;
61}
62
63struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
64					   int peer_id)
65{
66	struct ath11k_peer *peer;
67
68	lockdep_assert_held(&ab->base_lock);
69
70	if (!ab->rhead_peer_id)
71		return NULL;
72
73	peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
74				      ab->rhash_peer_id_param);
75
76	return peer;
77}
78
79struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
80						int vdev_id)
81{
82	struct ath11k_peer *peer;
83
84	spin_lock_bh(&ab->base_lock);
85
86	list_for_each_entry(peer, &ab->peers, list) {
87		if (vdev_id == peer->vdev_id) {
88			spin_unlock_bh(&ab->base_lock);
89			return peer;
90		}
91	}
92	spin_unlock_bh(&ab->base_lock);
93	return NULL;
94}
95
96void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
97{
98	struct ath11k_peer *peer;
99
100	spin_lock_bh(&ab->base_lock);
101
102	peer = ath11k_peer_find_list_by_id(ab, peer_id);
103	if (!peer) {
104		ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
105			    peer_id);
106		goto exit;
107	}
108
109	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
110		   peer->vdev_id, peer->addr, peer_id);
111
112	list_del(&peer->list);
113	kfree(peer);
114	wake_up(&ab->peer_mapping_wq);
115
116exit:
117	spin_unlock_bh(&ab->base_lock);
118}
119
120void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
121			   u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
122{
123	struct ath11k_peer *peer;
124
125	spin_lock_bh(&ab->base_lock);
126	peer = ath11k_peer_find(ab, vdev_id, mac_addr);
127	if (!peer) {
128		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
129		if (!peer)
130			goto exit;
131
132		peer->vdev_id = vdev_id;
133		peer->peer_id = peer_id;
134		peer->ast_hash = ast_hash;
135		peer->hw_peer_id = hw_peer_id;
136		ether_addr_copy(peer->addr, mac_addr);
137		list_add(&peer->list, &ab->peers);
138		wake_up(&ab->peer_mapping_wq);
139	}
140
141	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
142		   vdev_id, mac_addr, peer_id);
143
144exit:
145	spin_unlock_bh(&ab->base_lock);
146}
147
148static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
149				       const u8 *addr, bool expect_mapped)
150{
151	int ret;
152
153	ret = wait_event_timeout(ab->peer_mapping_wq, ({
154				bool mapped;
155
156				spin_lock_bh(&ab->base_lock);
157				mapped = !!ath11k_peer_find(ab, vdev_id, addr);
158				spin_unlock_bh(&ab->base_lock);
159
160				(mapped == expect_mapped ||
161				 test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
162				}), 3 * HZ);
163
164	if (ret <= 0)
165		return -ETIMEDOUT;
166
167	return 0;
168}
169
170static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
171					   struct rhashtable *rtbl,
172					   struct rhash_head *rhead,
173					   struct rhashtable_params *params,
174					   void *key)
175{
176	struct ath11k_peer *tmp;
177
178	lockdep_assert_held(&ab->tbl_mtx_lock);
179
180	tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
181
182	if (!tmp)
183		return 0;
184	else if (IS_ERR(tmp))
185		return PTR_ERR(tmp);
186	else
187		return -EEXIST;
188}
189
190static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
191					   struct rhashtable *rtbl,
192					   struct rhash_head *rhead,
193					   struct rhashtable_params *params)
194{
195	int ret;
196
197	lockdep_assert_held(&ab->tbl_mtx_lock);
198
199	ret = rhashtable_remove_fast(rtbl, rhead, *params);
200	if (ret && ret != -ENOENT)
201		return ret;
202
203	return 0;
204}
205
206static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
207{
208	int ret;
209
210	lockdep_assert_held(&ab->base_lock);
211	lockdep_assert_held(&ab->tbl_mtx_lock);
212
213	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
214		return -EPERM;
215
216	ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
217				       &ab->rhash_peer_id_param, &peer->peer_id);
218	if (ret) {
219		ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
220			    peer->addr, peer->peer_id, ret);
221		return ret;
222	}
223
224	ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
225				       &ab->rhash_peer_addr_param, &peer->addr);
226	if (ret) {
227		ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
228			    peer->addr, peer->peer_id, ret);
229		goto err_clean;
230	}
231
232	return 0;
233
234err_clean:
235	ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
236				 &ab->rhash_peer_id_param);
237	return ret;
238}
239
240void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
241{
242	struct ath11k_peer *peer, *tmp;
243	struct ath11k_base *ab = ar->ab;
244
245	lockdep_assert_held(&ar->conf_mutex);
246
247	mutex_lock(&ab->tbl_mtx_lock);
248	spin_lock_bh(&ab->base_lock);
249	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
250		if (peer->vdev_id != vdev_id)
251			continue;
252
253		ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
254			    peer->addr, vdev_id);
255
256		ath11k_peer_rhash_delete(ab, peer);
257		list_del(&peer->list);
258		kfree(peer);
259		ar->num_peers--;
260	}
261
262	spin_unlock_bh(&ab->base_lock);
263	mutex_unlock(&ab->tbl_mtx_lock);
264}
265
266static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
267{
268	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
269}
270
271int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
272				     const u8 *addr)
273{
274	int ret;
275	unsigned long time_left;
276
277	ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
278	if (ret) {
279		ath11k_warn(ar->ab, "failed wait for peer deleted");
280		return ret;
281	}
282
283	time_left = wait_for_completion_timeout(&ar->peer_delete_done,
284						3 * HZ);
285	if (time_left == 0) {
286		ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
287		return -ETIMEDOUT;
288	}
289
290	return 0;
291}
292
293static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
294{
295	int ret;
296	struct ath11k_peer *peer;
297	struct ath11k_base *ab = ar->ab;
298
299	lockdep_assert_held(&ar->conf_mutex);
300
301	mutex_lock(&ab->tbl_mtx_lock);
302	spin_lock_bh(&ab->base_lock);
303
304	peer = ath11k_peer_find_by_addr(ab, addr);
305	/* Check if the found peer is what we want to remove.
306	 * While the sta is transitioning to another band we may
307	 * have 2 peer with the same addr assigned to different
308	 * vdev_id. Make sure we are deleting the correct peer.
309	 */
310	if (peer && peer->vdev_id == vdev_id)
311		ath11k_peer_rhash_delete(ab, peer);
312
313	/* Fallback to peer list search if the correct peer can't be found.
314	 * Skip the deletion of the peer from the rhash since it has already
315	 * been deleted in peer add.
316	 */
317	if (!peer)
318		peer = ath11k_peer_find(ab, vdev_id, addr);
319
320	if (!peer) {
321		spin_unlock_bh(&ab->base_lock);
322		mutex_unlock(&ab->tbl_mtx_lock);
323
324		ath11k_warn(ab,
325			    "failed to find peer vdev_id %d addr %pM in delete\n",
326			    vdev_id, addr);
327		return -EINVAL;
328	}
329
330	spin_unlock_bh(&ab->base_lock);
331	mutex_unlock(&ab->tbl_mtx_lock);
332
333	reinit_completion(&ar->peer_delete_done);
334
335	ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
336	if (ret) {
337		ath11k_warn(ab,
338			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
339			    vdev_id, addr, ret);
340		return ret;
341	}
342
343	ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
344	if (ret)
345		return ret;
346
347	return 0;
348}
349
350int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
351{
352	int ret;
353
354	lockdep_assert_held(&ar->conf_mutex);
355
356	ret = __ath11k_peer_delete(ar, vdev_id, addr);
357	if (ret)
358		return ret;
359
360	ar->num_peers--;
361
362	return 0;
363}
364
365static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
366{
367	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
368}
369
370int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
371		       struct ieee80211_sta *sta, struct peer_create_params *param)
372{
373	struct ath11k_peer *peer;
374	struct ath11k_sta *arsta;
375	int ret, fbret;
376
377	lockdep_assert_held(&ar->conf_mutex);
378
379	if (ar->num_peers > (ar->max_num_peers - 1)) {
380		ath11k_warn(ar->ab,
381			    "failed to create peer due to insufficient peer entry resource in firmware\n");
382		return -ENOBUFS;
383	}
384
385	mutex_lock(&ar->ab->tbl_mtx_lock);
386	spin_lock_bh(&ar->ab->base_lock);
387	peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
388	if (peer) {
389		if (peer->vdev_id == param->vdev_id) {
390			spin_unlock_bh(&ar->ab->base_lock);
391			mutex_unlock(&ar->ab->tbl_mtx_lock);
392			return -EINVAL;
393		}
394
395		/* Assume sta is transitioning to another band.
396		 * Remove here the peer from rhash.
397		 */
398		ath11k_peer_rhash_delete(ar->ab, peer);
399	}
400	spin_unlock_bh(&ar->ab->base_lock);
401	mutex_unlock(&ar->ab->tbl_mtx_lock);
402
403	ret = ath11k_wmi_send_peer_create_cmd(ar, param);
404	if (ret) {
405		ath11k_warn(ar->ab,
406			    "failed to send peer create vdev_id %d ret %d\n",
407			    param->vdev_id, ret);
408		return ret;
409	}
410
411	ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
412					   param->peer_addr);
413	if (ret)
414		return ret;
415
416	mutex_lock(&ar->ab->tbl_mtx_lock);
417	spin_lock_bh(&ar->ab->base_lock);
418
419	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
420	if (!peer) {
421		spin_unlock_bh(&ar->ab->base_lock);
422		mutex_unlock(&ar->ab->tbl_mtx_lock);
423		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
424			    param->peer_addr, param->vdev_id);
425
426		ret = -ENOENT;
427		goto cleanup;
428	}
429
430	ret = ath11k_peer_rhash_add(ar->ab, peer);
431	if (ret) {
432		spin_unlock_bh(&ar->ab->base_lock);
433		mutex_unlock(&ar->ab->tbl_mtx_lock);
434		goto cleanup;
435	}
436
437	peer->pdev_idx = ar->pdev_idx;
438	peer->sta = sta;
439
440	if (arvif->vif->type == NL80211_IFTYPE_STATION) {
441		arvif->ast_hash = peer->ast_hash;
442		arvif->ast_idx = peer->hw_peer_id;
443	}
444
445	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
446	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
447
448	if (sta) {
449		arsta = ath11k_sta_to_arsta(sta);
450		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
451				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
452						  peer->peer_id);
453
454		/* set HTT extension valid bit to 0 by default */
455		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
456	}
457
458	ar->num_peers++;
459
460	spin_unlock_bh(&ar->ab->base_lock);
461	mutex_unlock(&ar->ab->tbl_mtx_lock);
462
463	return 0;
464
465cleanup:
466	fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
467	if (fbret)
468		ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
469			    param->peer_addr, param->vdev_id, fbret);
470
471	return ret;
472}
473
474int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
475{
476	int ret;
477
478	lockdep_assert_held(&ab->base_lock);
479	lockdep_assert_held(&ab->tbl_mtx_lock);
480
481	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
482		return -EPERM;
483
484	ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
485				       &ab->rhash_peer_addr_param);
486	if (ret) {
487		ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
488			    peer->addr, peer->peer_id, ret);
489		return ret;
490	}
491
492	ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
493				       &ab->rhash_peer_id_param);
494	if (ret) {
495		ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
496			    peer->addr, peer->peer_id, ret);
497		return ret;
498	}
499
500	return 0;
501}
502
503static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
504{
505	struct rhashtable_params *param;
506	struct rhashtable *rhash_id_tbl;
507	int ret;
508	size_t size;
509
510	lockdep_assert_held(&ab->tbl_mtx_lock);
511
512	if (ab->rhead_peer_id)
513		return 0;
514
515	size = sizeof(*ab->rhead_peer_id);
516	rhash_id_tbl = kzalloc(size, GFP_KERNEL);
517	if (!rhash_id_tbl) {
518		ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
519			    size);
520		return -ENOMEM;
521	}
522
523	param = &ab->rhash_peer_id_param;
524
525	param->key_offset = offsetof(struct ath11k_peer, peer_id);
526	param->head_offset = offsetof(struct ath11k_peer, rhash_id);
527	param->key_len = sizeof_field(struct ath11k_peer, peer_id);
528	param->automatic_shrinking = true;
529	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
530
531	ret = rhashtable_init(rhash_id_tbl, param);
532	if (ret) {
533		ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
534		goto err_free;
535	}
536
537	spin_lock_bh(&ab->base_lock);
538
539	if (!ab->rhead_peer_id) {
540		ab->rhead_peer_id = rhash_id_tbl;
541	} else {
542		spin_unlock_bh(&ab->base_lock);
543		goto cleanup_tbl;
544	}
545
546	spin_unlock_bh(&ab->base_lock);
547
548	return 0;
549
550cleanup_tbl:
551	rhashtable_destroy(rhash_id_tbl);
552err_free:
553	kfree(rhash_id_tbl);
554
555	return ret;
556}
557
558static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
559{
560	struct rhashtable_params *param;
561	struct rhashtable *rhash_addr_tbl;
562	int ret;
563	size_t size;
564
565	lockdep_assert_held(&ab->tbl_mtx_lock);
566
567	if (ab->rhead_peer_addr)
568		return 0;
569
570	size = sizeof(*ab->rhead_peer_addr);
571	rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
572	if (!rhash_addr_tbl) {
573		ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
574			    size);
575		return -ENOMEM;
576	}
577
578	param = &ab->rhash_peer_addr_param;
579
580	param->key_offset = offsetof(struct ath11k_peer, addr);
581	param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
582	param->key_len = sizeof_field(struct ath11k_peer, addr);
583	param->automatic_shrinking = true;
584	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
585
586	ret = rhashtable_init(rhash_addr_tbl, param);
587	if (ret) {
588		ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
589		goto err_free;
590	}
591
592	spin_lock_bh(&ab->base_lock);
593
594	if (!ab->rhead_peer_addr) {
595		ab->rhead_peer_addr = rhash_addr_tbl;
596	} else {
597		spin_unlock_bh(&ab->base_lock);
598		goto cleanup_tbl;
599	}
600
601	spin_unlock_bh(&ab->base_lock);
602
603	return 0;
604
605cleanup_tbl:
606	rhashtable_destroy(rhash_addr_tbl);
607err_free:
608	kfree(rhash_addr_tbl);
609
610	return ret;
611}
612
613static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
614{
615	lockdep_assert_held(&ab->tbl_mtx_lock);
616
617	if (!ab->rhead_peer_id)
618		return;
619
620	rhashtable_destroy(ab->rhead_peer_id);
621	kfree(ab->rhead_peer_id);
622	ab->rhead_peer_id = NULL;
623}
624
625static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
626{
627	lockdep_assert_held(&ab->tbl_mtx_lock);
628
629	if (!ab->rhead_peer_addr)
630		return;
631
632	rhashtable_destroy(ab->rhead_peer_addr);
633	kfree(ab->rhead_peer_addr);
634	ab->rhead_peer_addr = NULL;
635}
636
637int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
638{
639	int ret;
640
641	mutex_lock(&ab->tbl_mtx_lock);
642
643	ret = ath11k_peer_rhash_id_tbl_init(ab);
644	if (ret)
645		goto out;
646
647	ret = ath11k_peer_rhash_addr_tbl_init(ab);
648	if (ret)
649		goto cleanup_tbl;
650
651	mutex_unlock(&ab->tbl_mtx_lock);
652
653	return 0;
654
655cleanup_tbl:
656	ath11k_peer_rhash_id_tbl_destroy(ab);
657out:
658	mutex_unlock(&ab->tbl_mtx_lock);
659	return ret;
660}
661
662void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
663{
664	mutex_lock(&ab->tbl_mtx_lock);
665
666	ath11k_peer_rhash_addr_tbl_destroy(ab);
667	ath11k_peer_rhash_id_tbl_destroy(ab);
668
669	mutex_unlock(&ab->tbl_mtx_lock);
670}
671