1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2015-2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2023 Intel Corporation
5 */
6#include <linux/etherdevice.h>
7#include <linux/math64.h>
8#include <net/cfg80211.h>
9#include "mvm.h"
10#include "iwl-io.h"
11#include "iwl-prph.h"
12#include "constants.h"
13
14struct iwl_mvm_loc_entry {
15	struct list_head list;
16	u8 addr[ETH_ALEN];
17	u8 lci_len, civic_len;
18	u8 buf[];
19};
20
21struct iwl_mvm_smooth_entry {
22	struct list_head list;
23	u8 addr[ETH_ALEN];
24	s64 rtt_avg;
25	u64 host_time;
26};
27
28enum iwl_mvm_pasn_flags {
29	IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
30};
31
32struct iwl_mvm_ftm_pasn_entry {
33	struct list_head list;
34	u8 addr[ETH_ALEN];
35	u8 hltk[HLTK_11AZ_LEN];
36	u8 tk[TK_11AZ_LEN];
37	u8 cipher;
38	u8 tx_pn[IEEE80211_CCMP_PN_LEN];
39	u8 rx_pn[IEEE80211_CCMP_PN_LEN];
40	u32 flags;
41};
42
43int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
44			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
45			     u8 *hltk, u32 hltk_len)
46{
47	struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
48						      GFP_KERNEL);
49	u32 expected_tk_len;
50
51	lockdep_assert_held(&mvm->mutex);
52
53	if (!pasn)
54		return -ENOBUFS;
55
56	iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
57
58	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
59
60	switch (pasn->cipher) {
61	case IWL_LOCATION_CIPHER_CCMP_128:
62	case IWL_LOCATION_CIPHER_GCMP_128:
63		expected_tk_len = WLAN_KEY_LEN_CCMP;
64		break;
65	case IWL_LOCATION_CIPHER_GCMP_256:
66		expected_tk_len = WLAN_KEY_LEN_GCMP_256;
67		break;
68	default:
69		goto out;
70	}
71
72	/*
73	 * If associated to this AP and already have security context,
74	 * the TK is already configured for this station, so it
75	 * shouldn't be set again here.
76	 */
77	if (vif->cfg.assoc) {
78		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
79		struct ieee80211_bss_conf *link_conf;
80		unsigned int link_id;
81		struct ieee80211_sta *sta;
82		u8 sta_id;
83
84		rcu_read_lock();
85		for_each_vif_active_link(vif, link_conf, link_id) {
86			if (memcmp(addr, link_conf->bssid, ETH_ALEN))
87				continue;
88
89			sta_id = mvmvif->link[link_id]->ap_sta_id;
90			sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
91			if (!IS_ERR_OR_NULL(sta) && sta->mfp)
92				expected_tk_len = 0;
93			break;
94		}
95		rcu_read_unlock();
96	}
97
98	if (tk_len != expected_tk_len ||
99	    (hltk_len && hltk_len != sizeof(pasn->hltk))) {
100		IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
101			tk_len, hltk_len);
102		goto out;
103	}
104
105	if (!expected_tk_len && !hltk_len) {
106		IWL_ERR(mvm, "TK and HLTK not set\n");
107		goto out;
108	}
109
110	memcpy(pasn->addr, addr, sizeof(pasn->addr));
111
112	if (hltk_len) {
113		memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
114		pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
115	}
116
117	if (tk && tk_len)
118		memcpy(pasn->tk, tk, sizeof(pasn->tk));
119
120	list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
121	return 0;
122out:
123	kfree(pasn);
124	return -EINVAL;
125}
126
127void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
128{
129	struct iwl_mvm_ftm_pasn_entry *entry, *prev;
130
131	lockdep_assert_held(&mvm->mutex);
132
133	list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
134				 list) {
135		if (memcmp(entry->addr, addr, sizeof(entry->addr)))
136			continue;
137
138		list_del(&entry->list);
139		kfree(entry);
140		return;
141	}
142}
143
144static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
145{
146	struct iwl_mvm_loc_entry *e, *t;
147
148	mvm->ftm_initiator.req = NULL;
149	mvm->ftm_initiator.req_wdev = NULL;
150	memset(mvm->ftm_initiator.responses, 0,
151	       sizeof(mvm->ftm_initiator.responses));
152
153	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
154		list_del(&e->list);
155		kfree(e);
156	}
157}
158
159void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
160{
161	struct cfg80211_pmsr_result result = {
162		.status = NL80211_PMSR_STATUS_FAILURE,
163		.final = 1,
164		.host_time = ktime_get_boottime_ns(),
165		.type = NL80211_PMSR_TYPE_FTM,
166	};
167	int i;
168
169	lockdep_assert_held(&mvm->mutex);
170
171	if (!mvm->ftm_initiator.req)
172		return;
173
174	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
175		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
176		       ETH_ALEN);
177		result.ftm.burst_index = mvm->ftm_initiator.responses[i];
178
179		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
180				     mvm->ftm_initiator.req,
181				     &result, GFP_KERNEL);
182	}
183
184	cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
185			       mvm->ftm_initiator.req, GFP_KERNEL);
186	iwl_mvm_ftm_reset(mvm);
187}
188
189void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
190{
191	INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
192
193	IWL_DEBUG_INFO(mvm,
194		       "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
195			IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
196			IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
197			IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
198			IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
199			IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
200}
201
202void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
203{
204	struct iwl_mvm_smooth_entry *se, *st;
205
206	list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
207				 list) {
208		list_del(&se->list);
209		kfree(se);
210	}
211}
212
213static int
214iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
215{
216	switch (s) {
217	case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
218		return 0;
219	case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
220		return -EBUSY;
221	default:
222		WARN_ON_ONCE(1);
223		return -EIO;
224	}
225}
226
227static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
228			       struct iwl_tof_range_req_cmd_v5 *cmd,
229			       struct cfg80211_pmsr_request *req)
230{
231	int i;
232
233	cmd->request_id = req->cookie;
234	cmd->num_of_ap = req->n_peers;
235
236	/* use maximum for "no timeout" or bigger than what we can do */
237	if (!req->timeout || req->timeout > 255 * 100)
238		cmd->req_timeout = 255;
239	else
240		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
241
242	/*
243	 * We treat it always as random, since if not we'll
244	 * have filled our local address there instead.
245	 */
246	cmd->macaddr_random = 1;
247	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
248	for (i = 0; i < ETH_ALEN; i++)
249		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
250
251	if (vif->cfg.assoc)
252		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
253	else
254		eth_broadcast_addr(cmd->range_req_bssid);
255}
256
257static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
258				   struct ieee80211_vif *vif,
259				   struct iwl_tof_range_req_cmd_v9 *cmd,
260				   struct cfg80211_pmsr_request *req)
261{
262	int i;
263
264	cmd->initiator_flags =
265		cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
266			    IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
267	cmd->request_id = req->cookie;
268	cmd->num_of_ap = req->n_peers;
269
270	/*
271	 * Use a large value for "no timeout". Don't use the maximum value
272	 * because of fw limitations.
273	 */
274	if (req->timeout)
275		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
276	else
277		cmd->req_timeout_ms = cpu_to_le32(0xfffff);
278
279	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
280	for (i = 0; i < ETH_ALEN; i++)
281		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
282
283	if (vif->cfg.assoc) {
284		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
285
286		/* AP's TSF is only relevant if associated */
287		for (i = 0; i < req->n_peers; i++) {
288			if (req->peers[i].report_ap_tsf) {
289				struct iwl_mvm_vif *mvmvif =
290					iwl_mvm_vif_from_mac80211(vif);
291
292				cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
293				return;
294			}
295		}
296	} else {
297		eth_broadcast_addr(cmd->range_req_bssid);
298	}
299
300	/* Don't report AP's TSF */
301	cmd->tsf_mac_id = cpu_to_le32(0xff);
302}
303
304static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
305			       struct iwl_tof_range_req_cmd_v8 *cmd,
306			       struct cfg80211_pmsr_request *req)
307{
308	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
309}
310
311static int
312iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
313			      struct cfg80211_pmsr_request_peer *peer,
314			      u8 *channel, u8 *bandwidth,
315			      u8 *ctrl_ch_position)
316{
317	u32 freq = peer->chandef.chan->center_freq;
318
319	*channel = ieee80211_frequency_to_channel(freq);
320
321	switch (peer->chandef.width) {
322	case NL80211_CHAN_WIDTH_20_NOHT:
323		*bandwidth = IWL_TOF_BW_20_LEGACY;
324		break;
325	case NL80211_CHAN_WIDTH_20:
326		*bandwidth = IWL_TOF_BW_20_HT;
327		break;
328	case NL80211_CHAN_WIDTH_40:
329		*bandwidth = IWL_TOF_BW_40;
330		break;
331	case NL80211_CHAN_WIDTH_80:
332		*bandwidth = IWL_TOF_BW_80;
333		break;
334	default:
335		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
336			peer->chandef.width);
337		return -EINVAL;
338	}
339
340	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
341		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
342
343	return 0;
344}
345
346static int
347iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
348			      struct cfg80211_pmsr_request_peer *peer,
349			      u8 *channel, u8 *format_bw,
350			      u8 *ctrl_ch_position)
351{
352	u32 freq = peer->chandef.chan->center_freq;
353	u8 cmd_ver;
354
355	*channel = ieee80211_frequency_to_channel(freq);
356
357	switch (peer->chandef.width) {
358	case NL80211_CHAN_WIDTH_20_NOHT:
359		*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
360		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
361		break;
362	case NL80211_CHAN_WIDTH_20:
363		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
364		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
365		break;
366	case NL80211_CHAN_WIDTH_40:
367		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
368		*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
369		break;
370	case NL80211_CHAN_WIDTH_80:
371		*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
372		*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
373		break;
374	case NL80211_CHAN_WIDTH_160:
375		cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
376						WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
377						IWL_FW_CMD_VER_UNKNOWN);
378
379		if (cmd_ver >= 13) {
380			*format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
381			*format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
382			break;
383		}
384		fallthrough;
385	default:
386		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
387			peer->chandef.width);
388		return -EINVAL;
389	}
390
391	/* non EDCA based measurement must use HE preamble */
392	if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
393		*format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
394
395	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
396		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
397
398	return 0;
399}
400
401static int
402iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
403			  struct cfg80211_pmsr_request_peer *peer,
404			  struct iwl_tof_range_req_ap_entry_v2 *target)
405{
406	int ret;
407
408	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
409					    &target->bandwidth,
410					    &target->ctrl_ch_position);
411	if (ret)
412		return ret;
413
414	memcpy(target->bssid, peer->addr, ETH_ALEN);
415	target->burst_period =
416		cpu_to_le16(peer->ftm.burst_period);
417	target->samples_per_burst = peer->ftm.ftms_per_burst;
418	target->num_of_bursts = peer->ftm.num_bursts_exp;
419	target->measure_type = 0; /* regular two-sided FTM */
420	target->retries_per_sample = peer->ftm.ftmr_retries;
421	target->asap_mode = peer->ftm.asap;
422	target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
423
424	if (peer->ftm.request_lci)
425		target->location_req |= IWL_TOF_LOC_LCI;
426	if (peer->ftm.request_civicloc)
427		target->location_req |= IWL_TOF_LOC_CIVIC;
428
429	target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
430
431	return 0;
432}
433
434#define FTM_PUT_FLAG(flag)	(target->initiator_ap_flags |= \
435				 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
436
437static void
438iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
439			      struct cfg80211_pmsr_request_peer *peer,
440			      struct iwl_tof_range_req_ap_entry_v6 *target)
441{
442	memcpy(target->bssid, peer->addr, ETH_ALEN);
443	target->burst_period =
444		cpu_to_le16(peer->ftm.burst_period);
445	target->samples_per_burst = peer->ftm.ftms_per_burst;
446	target->num_of_bursts = peer->ftm.num_bursts_exp;
447	target->ftmr_max_retries = peer->ftm.ftmr_retries;
448	target->initiator_ap_flags = cpu_to_le32(0);
449
450	if (peer->ftm.asap)
451		FTM_PUT_FLAG(ASAP);
452
453	if (peer->ftm.request_lci)
454		FTM_PUT_FLAG(LCI_REQUEST);
455
456	if (peer->ftm.request_civicloc)
457		FTM_PUT_FLAG(CIVIC_REQUEST);
458
459	if (IWL_MVM_FTM_INITIATOR_DYNACK)
460		FTM_PUT_FLAG(DYN_ACK);
461
462	if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
463		FTM_PUT_FLAG(ALGO_LR);
464	else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
465		FTM_PUT_FLAG(ALGO_FFT);
466
467	if (peer->ftm.trigger_based)
468		FTM_PUT_FLAG(TB);
469	else if (peer->ftm.non_trigger_based)
470		FTM_PUT_FLAG(NON_TB);
471
472	if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
473	    peer->ftm.lmr_feedback)
474		FTM_PUT_FLAG(LMR_FEEDBACK);
475}
476
477static int
478iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
479			  struct cfg80211_pmsr_request_peer *peer,
480			  struct iwl_tof_range_req_ap_entry_v3 *target)
481{
482	int ret;
483
484	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
485					    &target->bandwidth,
486					    &target->ctrl_ch_position);
487	if (ret)
488		return ret;
489
490	/*
491	 * Versions 3 and 4 has some common fields, so
492	 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
493	 */
494	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
495
496	return 0;
497}
498
499static int
500iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
501			  struct cfg80211_pmsr_request_peer *peer,
502			  struct iwl_tof_range_req_ap_entry_v4 *target)
503{
504	int ret;
505
506	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
507					    &target->format_bw,
508					    &target->ctrl_ch_position);
509	if (ret)
510		return ret;
511
512	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
513
514	return 0;
515}
516
517static int
518iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
519		       struct cfg80211_pmsr_request_peer *peer,
520		       struct iwl_tof_range_req_ap_entry_v6 *target)
521{
522	int ret;
523
524	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
525					    &target->format_bw,
526					    &target->ctrl_ch_position);
527	if (ret)
528		return ret;
529
530	iwl_mvm_ftm_put_target_common(mvm, peer, target);
531
532	if (vif->cfg.assoc) {
533		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
534		struct ieee80211_sta *sta;
535		struct ieee80211_bss_conf *link_conf;
536		unsigned int link_id;
537
538		rcu_read_lock();
539		for_each_vif_active_link(vif, link_conf, link_id) {
540			if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
541				continue;
542
543			target->sta_id = mvmvif->link[link_id]->ap_sta_id;
544			sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
545			if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
546				rcu_read_unlock();
547				return PTR_ERR_OR_ZERO(sta);
548			}
549
550			if (sta->mfp && (peer->ftm.trigger_based ||
551					 peer->ftm.non_trigger_based))
552				FTM_PUT_FLAG(PMF);
553			break;
554		}
555		rcu_read_unlock();
556	} else {
557		target->sta_id = IWL_MVM_INVALID_STA;
558	}
559
560	/*
561	 * TODO: Beacon interval is currently unknown, so use the common value
562	 * of 100 TUs.
563	 */
564	target->beacon_interval = cpu_to_le16(100);
565	return 0;
566}
567
568static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
569{
570	u32 status;
571	int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
572
573	if (!err && status) {
574		IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
575			status);
576		err = iwl_ftm_range_request_status_to_err(status);
577	}
578
579	return err;
580}
581
582static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
583				struct cfg80211_pmsr_request *req)
584{
585	struct iwl_tof_range_req_cmd_v5 cmd_v5;
586	struct iwl_host_cmd hcmd = {
587		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
588		.dataflags[0] = IWL_HCMD_DFL_DUP,
589		.data[0] = &cmd_v5,
590		.len[0] = sizeof(cmd_v5),
591	};
592	u8 i;
593	int err;
594
595	iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
596
597	for (i = 0; i < cmd_v5.num_of_ap; i++) {
598		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
599
600		err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
601		if (err)
602			return err;
603	}
604
605	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
606}
607
608static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
609				struct cfg80211_pmsr_request *req)
610{
611	struct iwl_tof_range_req_cmd_v7 cmd_v7;
612	struct iwl_host_cmd hcmd = {
613		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
614		.dataflags[0] = IWL_HCMD_DFL_DUP,
615		.data[0] = &cmd_v7,
616		.len[0] = sizeof(cmd_v7),
617	};
618	u8 i;
619	int err;
620
621	/*
622	 * Versions 7 and 8 has the same structure except from the responders
623	 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
624	 */
625	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
626
627	for (i = 0; i < cmd_v7.num_of_ap; i++) {
628		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
629
630		err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
631		if (err)
632			return err;
633	}
634
635	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
636}
637
638static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
639				struct cfg80211_pmsr_request *req)
640{
641	struct iwl_tof_range_req_cmd_v8 cmd;
642	struct iwl_host_cmd hcmd = {
643		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
644		.dataflags[0] = IWL_HCMD_DFL_DUP,
645		.data[0] = &cmd,
646		.len[0] = sizeof(cmd),
647	};
648	u8 i;
649	int err;
650
651	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
652
653	for (i = 0; i < cmd.num_of_ap; i++) {
654		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
655
656		err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
657		if (err)
658			return err;
659	}
660
661	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
662}
663
664static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
665				struct cfg80211_pmsr_request *req)
666{
667	struct iwl_tof_range_req_cmd_v9 cmd;
668	struct iwl_host_cmd hcmd = {
669		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
670		.dataflags[0] = IWL_HCMD_DFL_DUP,
671		.data[0] = &cmd,
672		.len[0] = sizeof(cmd),
673	};
674	u8 i;
675	int err;
676
677	iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
678
679	for (i = 0; i < cmd.num_of_ap; i++) {
680		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
681		struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
682
683		err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
684		if (err)
685			return err;
686	}
687
688	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
689}
690
691static void iter(struct ieee80211_hw *hw,
692		 struct ieee80211_vif *vif,
693		 struct ieee80211_sta *sta,
694		 struct ieee80211_key_conf *key,
695		 void *data)
696{
697	struct iwl_tof_range_req_ap_entry_v6 *target = data;
698
699	if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
700		return;
701
702	WARN_ON(!sta->mfp);
703
704	if (WARN_ON(key->keylen > sizeof(target->tk)))
705		return;
706
707	memcpy(target->tk, key->key, key->keylen);
708	target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
709	WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
710}
711
712static void
713iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
714				struct iwl_tof_range_req_ap_entry_v7 *target)
715{
716	struct iwl_mvm_ftm_pasn_entry *entry;
717	u32 flags = le32_to_cpu(target->initiator_ap_flags);
718
719	if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
720		       IWL_INITIATOR_AP_FLAGS_TB)))
721		return;
722
723	lockdep_assert_held(&mvm->mutex);
724
725	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
726		if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
727			continue;
728
729		target->cipher = entry->cipher;
730
731		if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
732			memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
733		else
734			memset(target->hltk, 0, sizeof(target->hltk));
735
736		if (vif->cfg.assoc &&
737		    !memcmp(vif->bss_conf.bssid, target->bssid,
738			    sizeof(target->bssid)))
739			ieee80211_iter_keys(mvm->hw, vif, iter, target);
740		else
741			memcpy(target->tk, entry->tk, sizeof(target->tk));
742
743		memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
744		memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
745
746		target->initiator_ap_flags |=
747			cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
748		return;
749	}
750}
751
752static int
753iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
754			  struct cfg80211_pmsr_request_peer *peer,
755			  struct iwl_tof_range_req_ap_entry_v7 *target)
756{
757	int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
758	if (err)
759		return err;
760
761	iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
762	return err;
763}
764
765static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
766				 struct ieee80211_vif *vif,
767				 struct cfg80211_pmsr_request *req)
768{
769	struct iwl_tof_range_req_cmd_v11 cmd;
770	struct iwl_host_cmd hcmd = {
771		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
772		.dataflags[0] = IWL_HCMD_DFL_DUP,
773		.data[0] = &cmd,
774		.len[0] = sizeof(cmd),
775	};
776	u8 i;
777	int err;
778
779	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
780
781	for (i = 0; i < cmd.num_of_ap; i++) {
782		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
783		struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
784
785		err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
786		if (err)
787			return err;
788	}
789
790	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
791}
792
793static void
794iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
795			   struct iwl_tof_range_req_ap_entry_v8 *target)
796{
797	/* Only 2 STS are supported on Tx */
798	u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
799		IWL_MVM_FTM_I2R_MAX_STS;
800
801	target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
802		(IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
803	target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
804		(i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
805	target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
806	target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
807}
808
809static int
810iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
811			  struct cfg80211_pmsr_request_peer *peer,
812			  struct iwl_tof_range_req_ap_entry_v8 *target)
813{
814	u32 flags;
815	int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
816
817	if (ret)
818		return ret;
819
820	iwl_mvm_ftm_set_ndp_params(mvm, target);
821
822	/*
823	 * If secure LTF is turned off, replace the flag with PMF only
824	 */
825	flags = le32_to_cpu(target->initiator_ap_flags);
826	if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
827		if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
828			flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
829
830		flags |= IWL_INITIATOR_AP_FLAGS_PMF;
831		target->initiator_ap_flags = cpu_to_le32(flags);
832	}
833
834	return 0;
835}
836
837static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
838				 struct ieee80211_vif *vif,
839				 struct cfg80211_pmsr_request *req)
840{
841	struct iwl_tof_range_req_cmd_v12 cmd;
842	struct iwl_host_cmd hcmd = {
843		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
844		.dataflags[0] = IWL_HCMD_DFL_DUP,
845		.data[0] = &cmd,
846		.len[0] = sizeof(cmd),
847	};
848	u8 i;
849	int err;
850
851	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
852
853	for (i = 0; i < cmd.num_of_ap; i++) {
854		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
855		struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
856
857		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
858		if (err)
859			return err;
860	}
861
862	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
863}
864
865static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
866				 struct ieee80211_vif *vif,
867				 struct cfg80211_pmsr_request *req)
868{
869	struct iwl_tof_range_req_cmd_v13 cmd;
870	struct iwl_host_cmd hcmd = {
871		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
872		.dataflags[0] = IWL_HCMD_DFL_DUP,
873		.data[0] = &cmd,
874		.len[0] = sizeof(cmd),
875	};
876	u8 i;
877	int err;
878
879	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
880
881	for (i = 0; i < cmd.num_of_ap; i++) {
882		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
883		struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
884
885		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
886		if (err)
887			return err;
888
889		if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
890			target->bss_color = peer->ftm.bss_color;
891
892		if (peer->ftm.non_trigger_based) {
893			target->min_time_between_msr =
894				cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
895			target->burst_period =
896				cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
897		} else {
898			target->min_time_between_msr = cpu_to_le16(0);
899		}
900
901		target->band =
902			iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
903	}
904
905	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
906}
907
908int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
909		      struct cfg80211_pmsr_request *req)
910{
911	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
912				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
913	int err;
914
915	lockdep_assert_held(&mvm->mutex);
916
917	if (mvm->ftm_initiator.req)
918		return -EBUSY;
919
920	if (new_api) {
921		u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
922						   WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
923						   IWL_FW_CMD_VER_UNKNOWN);
924
925		switch (cmd_ver) {
926		case 13:
927			err = iwl_mvm_ftm_start_v13(mvm, vif, req);
928			break;
929		case 12:
930			err = iwl_mvm_ftm_start_v12(mvm, vif, req);
931			break;
932		case 11:
933			err = iwl_mvm_ftm_start_v11(mvm, vif, req);
934			break;
935		case 9:
936		case 10:
937			err = iwl_mvm_ftm_start_v9(mvm, vif, req);
938			break;
939		case 8:
940			err = iwl_mvm_ftm_start_v8(mvm, vif, req);
941			break;
942		default:
943			err = iwl_mvm_ftm_start_v7(mvm, vif, req);
944			break;
945		}
946	} else {
947		err = iwl_mvm_ftm_start_v5(mvm, vif, req);
948	}
949
950	if (!err) {
951		mvm->ftm_initiator.req = req;
952		mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
953	}
954
955	return err;
956}
957
958void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
959{
960	struct iwl_tof_range_abort_cmd cmd = {
961		.request_id = req->cookie,
962	};
963
964	lockdep_assert_held(&mvm->mutex);
965
966	if (req != mvm->ftm_initiator.req)
967		return;
968
969	iwl_mvm_ftm_reset(mvm);
970
971	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
972				 0, sizeof(cmd), &cmd))
973		IWL_ERR(mvm, "failed to abort FTM process\n");
974}
975
976static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
977				 const u8 *addr)
978{
979	int i;
980
981	for (i = 0; i < req->n_peers; i++) {
982		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
983
984		if (ether_addr_equal_unaligned(peer->addr, addr))
985			return i;
986	}
987
988	return -ENOENT;
989}
990
991static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
992{
993	u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
994	u32 curr_gp2, diff;
995	u64 now_from_boot_ns;
996
997	iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
998			      &now_from_boot_ns, NULL);
999
1000	if (curr_gp2 >= gp2_ts)
1001		diff = curr_gp2 - gp2_ts;
1002	else
1003		diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
1004
1005	return now_from_boot_ns - (u64)diff * 1000;
1006}
1007
1008static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
1009				      struct cfg80211_pmsr_result *res)
1010{
1011	struct iwl_mvm_loc_entry *entry;
1012
1013	list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
1014		if (!ether_addr_equal_unaligned(res->addr, entry->addr))
1015			continue;
1016
1017		if (entry->lci_len) {
1018			res->ftm.lci_len = entry->lci_len;
1019			res->ftm.lci = entry->buf;
1020		}
1021
1022		if (entry->civic_len) {
1023			res->ftm.civicloc_len = entry->civic_len;
1024			res->ftm.civicloc = entry->buf + entry->lci_len;
1025		}
1026
1027		/* we found the entry we needed */
1028		break;
1029	}
1030}
1031
1032static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1033					u8 num_of_aps)
1034{
1035	lockdep_assert_held(&mvm->mutex);
1036
1037	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1038		IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1039			request_id, (u8)mvm->ftm_initiator.req->cookie);
1040		return -EINVAL;
1041	}
1042
1043	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1044		IWL_ERR(mvm, "FTM range response invalid\n");
1045		return -EINVAL;
1046	}
1047
1048	return 0;
1049}
1050
1051static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1052				      struct cfg80211_pmsr_result *res)
1053{
1054	struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1055	s64 rtt_avg, rtt = res->ftm.rtt_avg;
1056	u32 undershoot, overshoot;
1057	u8 alpha;
1058
1059	if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1060		return;
1061
1062	WARN_ON(rtt < 0);
1063
1064	if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1065		IWL_DEBUG_INFO(mvm,
1066			       ": %pM: ignore failed measurement. Status=%u\n",
1067			       res->addr, res->status);
1068		return;
1069	}
1070
1071	list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1072		if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1073			resp = iter;
1074			break;
1075		}
1076	}
1077
1078	if (!resp) {
1079		resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1080		if (!resp)
1081			return;
1082
1083		memcpy(resp->addr, res->addr, ETH_ALEN);
1084		list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1085
1086		resp->rtt_avg = rtt;
1087
1088		IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1089			       resp->addr, resp->rtt_avg);
1090		goto update_time;
1091	}
1092
1093	if (res->host_time - resp->host_time >
1094	    IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1095		resp->rtt_avg = rtt;
1096
1097		IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1098			       resp->addr, resp->rtt_avg);
1099		goto update_time;
1100	}
1101
1102	/* Smooth the results based on the tracked RTT average */
1103	undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1104	overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1105	alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1106
1107	rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1108
1109	IWL_DEBUG_INFO(mvm,
1110		       "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1111		       resp->addr, resp->rtt_avg, rtt_avg, rtt);
1112
1113	/*
1114	 * update the responder's average RTT results regardless of
1115	 * the under/over shoot logic below
1116	 */
1117	resp->rtt_avg = rtt_avg;
1118
1119	/* smooth the results */
1120	if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1121		res->ftm.rtt_avg = rtt_avg;
1122
1123		IWL_DEBUG_INFO(mvm,
1124			       "undershoot: val=%lld\n",
1125			       (rtt_avg - rtt));
1126	} else if (rtt_avg < rtt && (rtt - rtt_avg) >
1127		   overshoot) {
1128		res->ftm.rtt_avg = rtt_avg;
1129		IWL_DEBUG_INFO(mvm,
1130			       "overshoot: val=%lld\n",
1131			       (rtt - rtt_avg));
1132	}
1133
1134update_time:
1135	resp->host_time = res->host_time;
1136}
1137
1138static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1139				     struct cfg80211_pmsr_result *res)
1140{
1141	s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1142
1143	IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1144	IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1145	IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1146	IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1147	IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1148	IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1149	IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1150	IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1151	IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1152	IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1153	IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1154	IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1155}
1156
1157static void
1158iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1159			   struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1160{
1161	struct iwl_mvm_ftm_pasn_entry *entry;
1162
1163	lockdep_assert_held(&mvm->mutex);
1164
1165	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1166		if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1167			continue;
1168
1169		memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1170		memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1171		return;
1172	}
1173}
1174
1175static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1176{
1177	if (!fw_has_api(&mvm->fw->ucode_capa,
1178			IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1179		return 5;
1180
1181	/* Starting from version 8, the FW advertises the version */
1182	if (mvm->cmd_ver.range_resp >= 8)
1183		return mvm->cmd_ver.range_resp;
1184	else if (fw_has_api(&mvm->fw->ucode_capa,
1185			    IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1186		return 7;
1187
1188	/* The first version of the new range request API */
1189	return 6;
1190}
1191
1192static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1193{
1194	switch (ver) {
1195	case 9:
1196	case 8:
1197		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1198	case 7:
1199		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1200	case 6:
1201		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1202	case 5:
1203		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1204	default:
1205		WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1206		return false;
1207	}
1208}
1209
1210void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1211{
1212	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1213	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1214	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1215	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1216	struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1217	struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1218	int i;
1219	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1220				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1221	u8 num_of_aps, last_in_batch;
1222	u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1223
1224	lockdep_assert_held(&mvm->mutex);
1225
1226	if (!mvm->ftm_initiator.req) {
1227		return;
1228	}
1229
1230	if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1231		return;
1232
1233	if (new_api) {
1234		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1235						 fw_resp_v8->num_of_aps))
1236			return;
1237
1238		num_of_aps = fw_resp_v8->num_of_aps;
1239		last_in_batch = fw_resp_v8->last_report;
1240	} else {
1241		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1242						 fw_resp_v5->num_of_aps))
1243			return;
1244
1245		num_of_aps = fw_resp_v5->num_of_aps;
1246		last_in_batch = fw_resp_v5->last_in_batch;
1247	}
1248
1249	IWL_DEBUG_INFO(mvm, "Range response received\n");
1250	IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1251		       mvm->ftm_initiator.req->cookie, num_of_aps);
1252
1253	for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1254		struct cfg80211_pmsr_result result = {};
1255		struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1256		int peer_idx;
1257
1258		if (new_api) {
1259			if (notif_ver >= 8) {
1260				fw_ap = &fw_resp_v8->ap[i];
1261				iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1262			} else if (notif_ver == 7) {
1263				fw_ap = (void *)&fw_resp_v7->ap[i];
1264			} else {
1265				fw_ap = (void *)&fw_resp_v6->ap[i];
1266			}
1267
1268			result.final = fw_ap->last_burst;
1269			result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1270			result.ap_tsf_valid = 1;
1271		} else {
1272			/* the first part is the same for old and new APIs */
1273			fw_ap = (void *)&fw_resp_v5->ap[i];
1274			/*
1275			 * FIXME: the firmware needs to report this, we don't
1276			 * even know the number of bursts the responder picked
1277			 * (if we asked it to)
1278			 */
1279			result.final = 0;
1280		}
1281
1282		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1283						 fw_ap->bssid);
1284		if (peer_idx < 0) {
1285			IWL_WARN(mvm,
1286				 "Unknown address (%pM, target #%d) in FTM response\n",
1287				 fw_ap->bssid, i);
1288			continue;
1289		}
1290
1291		switch (fw_ap->measure_status) {
1292		case IWL_TOF_ENTRY_SUCCESS:
1293			result.status = NL80211_PMSR_STATUS_SUCCESS;
1294			break;
1295		case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1296			result.status = NL80211_PMSR_STATUS_TIMEOUT;
1297			break;
1298		case IWL_TOF_ENTRY_NO_RESPONSE:
1299			result.status = NL80211_PMSR_STATUS_FAILURE;
1300			result.ftm.failure_reason =
1301				NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1302			break;
1303		case IWL_TOF_ENTRY_REQUEST_REJECTED:
1304			result.status = NL80211_PMSR_STATUS_FAILURE;
1305			result.ftm.failure_reason =
1306				NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1307			result.ftm.busy_retry_time = fw_ap->refusal_period;
1308			break;
1309		default:
1310			result.status = NL80211_PMSR_STATUS_FAILURE;
1311			result.ftm.failure_reason =
1312				NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1313			break;
1314		}
1315		memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1316		result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1317							     fw_ap->timestamp);
1318		result.type = NL80211_PMSR_TYPE_FTM;
1319		result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1320		mvm->ftm_initiator.responses[peer_idx]++;
1321		result.ftm.rssi_avg = fw_ap->rssi;
1322		result.ftm.rssi_avg_valid = 1;
1323		result.ftm.rssi_spread = fw_ap->rssi_spread;
1324		result.ftm.rssi_spread_valid = 1;
1325		result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1326		result.ftm.rtt_avg_valid = 1;
1327		result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1328		result.ftm.rtt_variance_valid = 1;
1329		result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1330		result.ftm.rtt_spread_valid = 1;
1331
1332		iwl_mvm_ftm_get_lci_civic(mvm, &result);
1333
1334		iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1335
1336		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1337				     mvm->ftm_initiator.req,
1338				     &result, GFP_KERNEL);
1339
1340		if (fw_has_api(&mvm->fw->ucode_capa,
1341			       IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1342			IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1343				       fw_ap->rttConfidence);
1344
1345		iwl_mvm_debug_range_resp(mvm, i, &result);
1346	}
1347
1348	if (last_in_batch) {
1349		cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1350				       mvm->ftm_initiator.req,
1351				       GFP_KERNEL);
1352		iwl_mvm_ftm_reset(mvm);
1353	}
1354}
1355
1356void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1357{
1358	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1359	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1360	size_t len = iwl_rx_packet_payload_len(pkt);
1361	struct iwl_mvm_loc_entry *entry;
1362	const u8 *ies, *lci, *civic, *msr_ie;
1363	size_t ies_len, lci_len = 0, civic_len = 0;
1364	size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1365			 sizeof(mgmt->u.action.u.ftm);
1366	static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1367	static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1368
1369	if (len <= baselen)
1370		return;
1371
1372	lockdep_assert_held(&mvm->mutex);
1373
1374	ies = mgmt->u.action.u.ftm.variable;
1375	ies_len = len - baselen;
1376
1377	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1378					&rprt_type_lci, 1, 4);
1379	if (msr_ie) {
1380		lci = msr_ie + 2;
1381		lci_len = msr_ie[1];
1382	}
1383
1384	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1385					&rprt_type_civic, 1, 4);
1386	if (msr_ie) {
1387		civic = msr_ie + 2;
1388		civic_len = msr_ie[1];
1389	}
1390
1391	entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1392	if (!entry)
1393		return;
1394
1395	memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1396
1397	entry->lci_len = lci_len;
1398	if (lci_len)
1399		memcpy(entry->buf, lci, lci_len);
1400
1401	entry->civic_len = civic_len;
1402	if (civic_len)
1403		memcpy(entry->buf + lci_len, civic, civic_len);
1404
1405	list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1406}
1407