1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/completion.h>
7#include <linux/device.h>
8#include <linux/debugfs.h>
9#include <linux/idr.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/module.h>
14#include <linux/net.h>
15#include <linux/platform_device.h>
16#include <linux/firmware/qcom/qcom_scm.h>
17#include <linux/soc/qcom/smem.h>
18#include <linux/string.h>
19#include <net/sock.h>
20
21#include "debug.h"
22#include "snoc.h"
23
24#define ATH10K_QMI_CLIENT_ID		0x4b4e454c
25#define ATH10K_QMI_TIMEOUT		30
26#define SMEM_IMAGE_VERSION_TABLE       469
27#define SMEM_IMAGE_TABLE_CNSS_INDEX     13
28#define SMEM_IMAGE_VERSION_ENTRY_SIZE	128
29#define SMEM_IMAGE_VERSION_NAME_SIZE	75
30
31static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
32					 struct ath10k_msa_mem_info *mem_info)
33{
34	struct qcom_scm_vmperm dst_perms[3];
35	struct ath10k *ar = qmi->ar;
36	u64 src_perms;
37	u32 perm_count;
38	int ret;
39
40	src_perms = BIT(QCOM_SCM_VMID_HLOS);
41
42	dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
43	dst_perms[0].perm = QCOM_SCM_PERM_RW;
44	dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
45	dst_perms[1].perm = QCOM_SCM_PERM_RW;
46
47	if (mem_info->secure) {
48		perm_count = 2;
49	} else {
50		dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
51		dst_perms[2].perm = QCOM_SCM_PERM_RW;
52		perm_count = 3;
53	}
54
55	ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
56				  &src_perms, dst_perms, perm_count);
57	if (ret < 0)
58		ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
59
60	return ret;
61}
62
63static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
64					   struct ath10k_msa_mem_info *mem_info)
65{
66	struct qcom_scm_vmperm dst_perms;
67	struct ath10k *ar = qmi->ar;
68	u64 src_perms;
69	int ret;
70
71	src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
72
73	if (!mem_info->secure)
74		src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
75
76	dst_perms.vmid = QCOM_SCM_VMID_HLOS;
77	dst_perms.perm = QCOM_SCM_PERM_RW;
78
79	ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
80				  &src_perms, &dst_perms, 1);
81	if (ret < 0)
82		ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
83
84	return ret;
85}
86
87static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
88{
89	int ret;
90	int i;
91
92	if (qmi->msa_fixed_perm)
93		return 0;
94
95	for (i = 0; i < qmi->nr_mem_region; i++) {
96		ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
97		if (ret)
98			goto err_unmap;
99	}
100
101	return 0;
102
103err_unmap:
104	for (i--; i >= 0; i--)
105		ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
106	return ret;
107}
108
109static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
110{
111	int i;
112
113	if (qmi->msa_fixed_perm)
114		return;
115
116	for (i = 0; i < qmi->nr_mem_region; i++)
117		ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
118}
119
120static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
121{
122	struct wlfw_msa_info_resp_msg_v01 resp = {};
123	struct wlfw_msa_info_req_msg_v01 req = {};
124	struct ath10k *ar = qmi->ar;
125	phys_addr_t max_mapped_addr;
126	struct qmi_txn txn;
127	int ret;
128	int i;
129
130	req.msa_addr = ar->msa.paddr;
131	req.size = ar->msa.mem_size;
132
133	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
134			   wlfw_msa_info_resp_msg_v01_ei, &resp);
135	if (ret < 0)
136		goto out;
137
138	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
139			       QMI_WLFW_MSA_INFO_REQ_V01,
140			       WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
141			       wlfw_msa_info_req_msg_v01_ei, &req);
142	if (ret < 0) {
143		qmi_txn_cancel(&txn);
144		ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
145		goto out;
146	}
147
148	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
149	if (ret < 0)
150		goto out;
151
152	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
153		ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
154		ret = -EINVAL;
155		goto out;
156	}
157
158	if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
159		ath10k_err(ar, "invalid memory region length received: %d\n",
160			   resp.mem_region_info_len);
161		ret = -EINVAL;
162		goto out;
163	}
164
165	max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
166	qmi->nr_mem_region = resp.mem_region_info_len;
167	for (i = 0; i < resp.mem_region_info_len; i++) {
168		if (resp.mem_region_info[i].size > ar->msa.mem_size ||
169		    resp.mem_region_info[i].region_addr > max_mapped_addr ||
170		    resp.mem_region_info[i].region_addr < ar->msa.paddr ||
171		    resp.mem_region_info[i].size +
172		    resp.mem_region_info[i].region_addr > max_mapped_addr) {
173			ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
174				   resp.mem_region_info[i].region_addr,
175				   resp.mem_region_info[i].size);
176			ret = -EINVAL;
177			goto fail_unwind;
178		}
179		qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
180		qmi->mem_region[i].size = resp.mem_region_info[i].size;
181		qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
182		ath10k_dbg(ar, ATH10K_DBG_QMI,
183			   "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
184			   i, &qmi->mem_region[i].addr,
185			   qmi->mem_region[i].size,
186			   qmi->mem_region[i].secure);
187	}
188
189	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
190	return 0;
191
192fail_unwind:
193	memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
194out:
195	return ret;
196}
197
198static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
199{
200	struct wlfw_msa_ready_resp_msg_v01 resp = {};
201	struct wlfw_msa_ready_req_msg_v01 req = {};
202	struct ath10k *ar = qmi->ar;
203	struct qmi_txn txn;
204	int ret;
205
206	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
207			   wlfw_msa_ready_resp_msg_v01_ei, &resp);
208	if (ret < 0)
209		goto out;
210
211	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
212			       QMI_WLFW_MSA_READY_REQ_V01,
213			       WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
214			       wlfw_msa_ready_req_msg_v01_ei, &req);
215	if (ret < 0) {
216		qmi_txn_cancel(&txn);
217		ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
218		goto out;
219	}
220
221	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
222	if (ret < 0)
223		goto out;
224
225	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
226		ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
227		ret = -EINVAL;
228	}
229
230	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
231	return 0;
232
233out:
234	return ret;
235}
236
237static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
238{
239	struct wlfw_bdf_download_resp_msg_v01 resp = {};
240	struct wlfw_bdf_download_req_msg_v01 *req;
241	struct ath10k *ar = qmi->ar;
242	unsigned int remaining;
243	struct qmi_txn txn;
244	const u8 *temp;
245	int ret;
246
247	req = kzalloc(sizeof(*req), GFP_KERNEL);
248	if (!req)
249		return -ENOMEM;
250
251	temp = ar->normal_mode_fw.board_data;
252	remaining = ar->normal_mode_fw.board_len;
253
254	while (remaining) {
255		req->valid = 1;
256		req->file_id_valid = 1;
257		req->file_id = 0;
258		req->total_size_valid = 1;
259		req->total_size = ar->normal_mode_fw.board_len;
260		req->seg_id_valid = 1;
261		req->data_valid = 1;
262		req->end_valid = 1;
263
264		if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
265			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
266		} else {
267			req->data_len = remaining;
268			req->end = 1;
269		}
270
271		memcpy(req->data, temp, req->data_len);
272
273		ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
274				   wlfw_bdf_download_resp_msg_v01_ei,
275				   &resp);
276		if (ret < 0)
277			goto out;
278
279		ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
280				       QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
281				       WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
282				       wlfw_bdf_download_req_msg_v01_ei, req);
283		if (ret < 0) {
284			qmi_txn_cancel(&txn);
285			goto out;
286		}
287
288		ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
289
290		if (ret < 0)
291			goto out;
292
293		/* end = 1 triggers a CRC check on the BDF.  If this fails, we
294		 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
295		 * willing to use the BDF.  For some platforms, all the valid
296		 * released BDFs fail this CRC check, so attempt to detect this
297		 * scenario and treat it as non-fatal.
298		 */
299		if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
300		    !(req->end == 1 &&
301		      resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
302			ath10k_err(ar, "failed to download board data file: %d\n",
303				   resp.resp.error);
304			ret = -EINVAL;
305			goto out;
306		}
307
308		remaining -= req->data_len;
309		temp += req->data_len;
310		req->seg_id++;
311	}
312
313	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
314
315	kfree(req);
316	return 0;
317
318out:
319	kfree(req);
320	return ret;
321}
322
323static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
324{
325	struct wlfw_cal_report_resp_msg_v01 resp = {};
326	struct wlfw_cal_report_req_msg_v01 req = {};
327	struct ath10k *ar = qmi->ar;
328	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
329	struct qmi_txn txn;
330	int i, j = 0;
331	int ret;
332
333	if (ar_snoc->xo_cal_supported) {
334		req.xo_cal_data_valid = 1;
335		req.xo_cal_data = ar_snoc->xo_cal_data;
336	}
337
338	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
339			   &resp);
340	if (ret < 0)
341		goto out;
342
343	for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
344		if (qmi->cal_data[i].total_size &&
345		    qmi->cal_data[i].data) {
346			req.meta_data[j] = qmi->cal_data[i].cal_id;
347			j++;
348		}
349	}
350	req.meta_data_len = j;
351
352	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
353			       QMI_WLFW_CAL_REPORT_REQ_V01,
354			       WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
355			       wlfw_cal_report_req_msg_v01_ei, &req);
356	if (ret < 0) {
357		qmi_txn_cancel(&txn);
358		ath10k_err(ar, "failed to send calibration request: %d\n", ret);
359		goto out;
360	}
361
362	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
363	if (ret < 0)
364		goto out;
365
366	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
367		ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
368		ret = -EINVAL;
369		goto out;
370	}
371
372	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
373	return 0;
374
375out:
376	return ret;
377}
378
379static int
380ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
381{
382	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
383	struct ath10k_qmi *qmi = ar_snoc->qmi;
384	struct wlfw_wlan_mode_resp_msg_v01 resp = {};
385	struct wlfw_wlan_mode_req_msg_v01 req = {};
386	struct qmi_txn txn;
387	int ret;
388
389	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
390			   wlfw_wlan_mode_resp_msg_v01_ei,
391			   &resp);
392	if (ret < 0)
393		goto out;
394
395	req.mode = mode;
396	req.hw_debug_valid = 1;
397	req.hw_debug = 0;
398
399	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
400			       QMI_WLFW_WLAN_MODE_REQ_V01,
401			       WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
402			       wlfw_wlan_mode_req_msg_v01_ei, &req);
403	if (ret < 0) {
404		qmi_txn_cancel(&txn);
405		ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
406		goto out;
407	}
408
409	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
410	if (ret < 0)
411		goto out;
412
413	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
414		ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
415		ret = -EINVAL;
416		goto out;
417	}
418
419	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
420	return 0;
421
422out:
423	return ret;
424}
425
426static int
427ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
428			     struct ath10k_qmi_wlan_enable_cfg *config,
429			     const char *version)
430{
431	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
432	struct ath10k_qmi *qmi = ar_snoc->qmi;
433	struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
434	struct wlfw_wlan_cfg_req_msg_v01 *req;
435	struct qmi_txn txn;
436	int ret;
437	u32 i;
438
439	req = kzalloc(sizeof(*req), GFP_KERNEL);
440	if (!req)
441		return -ENOMEM;
442
443	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
444			   wlfw_wlan_cfg_resp_msg_v01_ei,
445			   &resp);
446	if (ret < 0)
447		goto out;
448
449	req->host_version_valid = 0;
450
451	req->tgt_cfg_valid = 1;
452	if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
453		req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
454	else
455		req->tgt_cfg_len = config->num_ce_tgt_cfg;
456	for (i = 0; i < req->tgt_cfg_len; i++) {
457		req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
458		req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
459		req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
460		req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
461		req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
462	}
463
464	req->svc_cfg_valid = 1;
465	if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
466		req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
467	else
468		req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
469	for (i = 0; i < req->svc_cfg_len; i++) {
470		req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
471		req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
472		req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
473	}
474
475	req->shadow_reg_valid = 1;
476	if (config->num_shadow_reg_cfg >
477	    QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
478		req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
479	else
480		req->shadow_reg_len = config->num_shadow_reg_cfg;
481
482	memcpy(req->shadow_reg, config->shadow_reg_cfg,
483	       sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
484
485	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
486			       QMI_WLFW_WLAN_CFG_REQ_V01,
487			       WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
488			       wlfw_wlan_cfg_req_msg_v01_ei, req);
489	if (ret < 0) {
490		qmi_txn_cancel(&txn);
491		ath10k_err(ar, "failed to send config request: %d\n", ret);
492		goto out;
493	}
494
495	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
496	if (ret < 0)
497		goto out;
498
499	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
500		ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
501		ret = -EINVAL;
502		goto out;
503	}
504
505	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
506	kfree(req);
507	return 0;
508
509out:
510	kfree(req);
511	return ret;
512}
513
514int ath10k_qmi_wlan_enable(struct ath10k *ar,
515			   struct ath10k_qmi_wlan_enable_cfg *config,
516			   enum wlfw_driver_mode_enum_v01 mode,
517			   const char *version)
518{
519	int ret;
520
521	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
522		   mode, config);
523
524	ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
525	if (ret) {
526		ath10k_err(ar, "failed to send qmi config: %d\n", ret);
527		return ret;
528	}
529
530	ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
531	if (ret) {
532		ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
533		return ret;
534	}
535
536	return 0;
537}
538
539int ath10k_qmi_wlan_disable(struct ath10k *ar)
540{
541	return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
542}
543
544static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
545{
546	u8 *table_ptr;
547	size_t smem_item_size;
548	const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
549				      SMEM_IMAGE_VERSION_ENTRY_SIZE;
550
551	table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
552				  SMEM_IMAGE_VERSION_TABLE,
553				  &smem_item_size);
554
555	if (IS_ERR(table_ptr)) {
556		ath10k_err(ar, "smem image version table not found\n");
557		return;
558	}
559
560	if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
561	    smem_item_size) {
562		ath10k_err(ar, "smem block size too small: %zu\n",
563			   smem_item_size);
564		return;
565	}
566
567	strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
568		SMEM_IMAGE_VERSION_NAME_SIZE);
569}
570
571static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
572{
573	struct wlfw_cap_resp_msg_v01 *resp;
574	struct wlfw_cap_req_msg_v01 req = {};
575	struct ath10k *ar = qmi->ar;
576	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
577	struct qmi_txn txn;
578	int ret;
579
580	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
581	if (!resp)
582		return -ENOMEM;
583
584	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
585	if (ret < 0)
586		goto out;
587
588	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
589			       QMI_WLFW_CAP_REQ_V01,
590			       WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
591			       wlfw_cap_req_msg_v01_ei, &req);
592	if (ret < 0) {
593		qmi_txn_cancel(&txn);
594		ath10k_err(ar, "failed to send capability request: %d\n", ret);
595		goto out;
596	}
597
598	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
599	if (ret < 0)
600		goto out;
601
602	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
603		ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
604		ret = -EINVAL;
605		goto out;
606	}
607
608	if (resp->chip_info_valid) {
609		qmi->chip_info.chip_id = resp->chip_info.chip_id;
610		qmi->chip_info.chip_family = resp->chip_info.chip_family;
611	} else {
612		qmi->chip_info.chip_id = 0xFF;
613	}
614
615	if (resp->board_info_valid)
616		qmi->board_info.board_id = resp->board_info.board_id;
617	else
618		qmi->board_info.board_id = 0xFF;
619
620	if (resp->soc_info_valid)
621		qmi->soc_info.soc_id = resp->soc_info.soc_id;
622
623	if (resp->fw_version_info_valid) {
624		qmi->fw_version = resp->fw_version_info.fw_version;
625		strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
626			sizeof(qmi->fw_build_timestamp));
627	}
628
629	if (resp->fw_build_id_valid)
630		strscpy(qmi->fw_build_id, resp->fw_build_id,
631			MAX_BUILD_ID_LEN + 1);
632
633	if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
634		ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
635			    qmi->chip_info.chip_id, qmi->chip_info.chip_family,
636			    qmi->board_info.board_id, qmi->soc_info.soc_id);
637		ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
638			    qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
639	}
640
641	if (resp->fw_build_id_valid)
642		ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
643
644	kfree(resp);
645	return 0;
646
647out:
648	kfree(resp);
649	return ret;
650}
651
652static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
653{
654	struct wlfw_host_cap_resp_msg_v01 resp = {};
655	struct wlfw_host_cap_req_msg_v01 req = {};
656	const struct qmi_elem_info *req_ei;
657	struct ath10k *ar = qmi->ar;
658	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
659	struct qmi_txn txn;
660	int ret;
661
662	req.daemon_support_valid = 1;
663	req.daemon_support = 0;
664
665	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
666			   &resp);
667	if (ret < 0)
668		goto out;
669
670	if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
671		req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
672	else
673		req_ei = wlfw_host_cap_req_msg_v01_ei;
674
675	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
676			       QMI_WLFW_HOST_CAP_REQ_V01,
677			       WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
678			       req_ei, &req);
679	if (ret < 0) {
680		qmi_txn_cancel(&txn);
681		ath10k_err(ar, "failed to send host capability request: %d\n", ret);
682		goto out;
683	}
684
685	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
686	if (ret < 0)
687		goto out;
688
689	/* older FW didn't support this request, which is not fatal */
690	if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
691	    resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
692		ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
693		ret = -EINVAL;
694		goto out;
695	}
696
697	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
698	return 0;
699
700out:
701	return ret;
702}
703
704int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
705{
706	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
707	struct wlfw_ini_resp_msg_v01 resp = {};
708	struct ath10k_qmi *qmi = ar_snoc->qmi;
709	struct wlfw_ini_req_msg_v01 req = {};
710	struct qmi_txn txn;
711	int ret;
712
713	req.enablefwlog_valid = 1;
714	req.enablefwlog = fw_log_mode;
715
716	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
717			   &resp);
718	if (ret < 0)
719		goto out;
720
721	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
722			       QMI_WLFW_INI_REQ_V01,
723			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
724			       wlfw_ini_req_msg_v01_ei, &req);
725	if (ret < 0) {
726		qmi_txn_cancel(&txn);
727		ath10k_err(ar, "failed to send fw log request: %d\n", ret);
728		goto out;
729	}
730
731	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
732	if (ret < 0)
733		goto out;
734
735	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
736		ath10k_err(ar, "fw log request rejected: %d\n",
737			   resp.resp.error);
738		ret = -EINVAL;
739		goto out;
740	}
741	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
742		   fw_log_mode);
743	return 0;
744
745out:
746	return ret;
747}
748
749static int
750ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
751{
752	struct wlfw_ind_register_resp_msg_v01 resp = {};
753	struct wlfw_ind_register_req_msg_v01 req = {};
754	struct ath10k *ar = qmi->ar;
755	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
756	struct qmi_txn txn;
757	int ret;
758
759	req.client_id_valid = 1;
760	req.client_id = ATH10K_QMI_CLIENT_ID;
761	req.fw_ready_enable_valid = 1;
762	req.fw_ready_enable = 1;
763	req.msa_ready_enable_valid = 1;
764	req.msa_ready_enable = 1;
765
766	if (ar_snoc->xo_cal_supported) {
767		req.xo_cal_enable_valid = 1;
768		req.xo_cal_enable = 1;
769	}
770
771	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
772			   wlfw_ind_register_resp_msg_v01_ei, &resp);
773	if (ret < 0)
774		goto out;
775
776	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
777			       QMI_WLFW_IND_REGISTER_REQ_V01,
778			       WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
779			       wlfw_ind_register_req_msg_v01_ei, &req);
780	if (ret < 0) {
781		qmi_txn_cancel(&txn);
782		ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
783		goto out;
784	}
785
786	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
787	if (ret < 0)
788		goto out;
789
790	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
791		ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
792		ret = -EINVAL;
793		goto out;
794	}
795
796	if (resp.fw_status_valid) {
797		if (resp.fw_status & QMI_WLFW_FW_READY_V01)
798			qmi->fw_ready = true;
799	}
800	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
801	return 0;
802
803out:
804	return ret;
805}
806
807static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
808{
809	struct ath10k *ar = qmi->ar;
810	int ret;
811
812	ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
813	if (ret)
814		return;
815
816	if (qmi->fw_ready) {
817		ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
818		return;
819	}
820
821	ret = ath10k_qmi_host_cap_send_sync(qmi);
822	if (ret)
823		return;
824
825	ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
826	if (ret)
827		return;
828
829	/*
830	 * HACK: sleep for a while between receiving the msa info response
831	 * and the XPU update to prevent SDM845 from crashing due to a security
832	 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
833	 */
834	msleep(20);
835
836	ret = ath10k_qmi_setup_msa_permissions(qmi);
837	if (ret)
838		return;
839
840	ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
841	if (ret)
842		goto err_setup_msa;
843
844	ret = ath10k_qmi_cap_send_sync_msg(qmi);
845	if (ret)
846		goto err_setup_msa;
847
848	return;
849
850err_setup_msa:
851	ath10k_qmi_remove_msa_permission(qmi);
852}
853
854static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
855{
856	struct ath10k *ar = qmi->ar;
857	int ret;
858
859	ar->hif.bus = ATH10K_BUS_SNOC;
860	ar->id.qmi_ids_valid = true;
861	ar->id.qmi_board_id = qmi->board_info.board_id;
862	ar->id.qmi_chip_id = qmi->chip_info.chip_id;
863	ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
864
865	ret = ath10k_core_check_dt(ar);
866	if (ret)
867		ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
868
869	return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
870}
871
872static int
873ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
874			     enum ath10k_qmi_driver_event_type type,
875			     void *data)
876{
877	struct ath10k_qmi_driver_event *event;
878
879	event = kzalloc(sizeof(*event), GFP_ATOMIC);
880	if (!event)
881		return -ENOMEM;
882
883	event->type = type;
884	event->data = data;
885
886	spin_lock(&qmi->event_lock);
887	list_add_tail(&event->list, &qmi->event_list);
888	spin_unlock(&qmi->event_lock);
889
890	queue_work(qmi->event_wq, &qmi->event_work);
891
892	return 0;
893}
894
895static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
896{
897	struct ath10k *ar = qmi->ar;
898	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
899
900	ath10k_qmi_remove_msa_permission(qmi);
901	ath10k_core_free_board_files(ar);
902	if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) &&
903	    !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags))
904		ath10k_snoc_fw_crashed_dump(ar);
905
906	ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
907	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
908}
909
910static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
911{
912	int ret;
913
914	ret = ath10k_qmi_fetch_board_file(qmi);
915	if (ret)
916		goto out;
917
918	ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
919	if (ret)
920		goto out;
921
922	ret = ath10k_qmi_send_cal_report_req(qmi);
923
924out:
925	return;
926}
927
928static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
929{
930	struct ath10k *ar = qmi->ar;
931
932	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
933	ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
934
935	return 0;
936}
937
938static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
939				    struct sockaddr_qrtr *sq,
940				    struct qmi_txn *txn, const void *data)
941{
942	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
943
944	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
945}
946
947static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
948				     struct sockaddr_qrtr *sq,
949				     struct qmi_txn *txn, const void *data)
950{
951	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
952
953	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
954}
955
956static const struct qmi_msg_handler qmi_msg_handler[] = {
957	{
958		.type = QMI_INDICATION,
959		.msg_id = QMI_WLFW_FW_READY_IND_V01,
960		.ei = wlfw_fw_ready_ind_msg_v01_ei,
961		.decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
962		.fn = ath10k_qmi_fw_ready_ind,
963	},
964	{
965		.type = QMI_INDICATION,
966		.msg_id = QMI_WLFW_MSA_READY_IND_V01,
967		.ei = wlfw_msa_ready_ind_msg_v01_ei,
968		.decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
969		.fn = ath10k_qmi_msa_ready_ind,
970	},
971	{}
972};
973
974static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
975				 struct qmi_service *service)
976{
977	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
978	struct sockaddr_qrtr *sq = &qmi->sq;
979	struct ath10k *ar = qmi->ar;
980	int ret;
981
982	sq->sq_family = AF_QIPCRTR;
983	sq->sq_node = service->node;
984	sq->sq_port = service->port;
985
986	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
987
988	ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
989			     sizeof(qmi->sq), 0);
990	if (ret) {
991		ath10k_err(ar, "failed to connect to a remote QMI service port\n");
992		return ret;
993	}
994
995	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
996	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
997
998	return ret;
999}
1000
1001static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
1002				  struct qmi_service *service)
1003{
1004	struct ath10k_qmi *qmi =
1005		container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
1006
1007	qmi->fw_ready = false;
1008
1009	/*
1010	 * The del_server event is to be processed only if coming from
1011	 * the qmi server. The qmi infrastructure sends del_server, when
1012	 * any client releases the qmi handle. In this case do not process
1013	 * this del_server event.
1014	 */
1015	if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
1016		ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
1017					     NULL);
1018}
1019
1020static const struct qmi_ops ath10k_qmi_ops = {
1021	.new_server = ath10k_qmi_new_server,
1022	.del_server = ath10k_qmi_del_server,
1023};
1024
1025static void ath10k_qmi_driver_event_work(struct work_struct *work)
1026{
1027	struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
1028					      event_work);
1029	struct ath10k_qmi_driver_event *event;
1030	struct ath10k *ar = qmi->ar;
1031
1032	spin_lock(&qmi->event_lock);
1033	while (!list_empty(&qmi->event_list)) {
1034		event = list_first_entry(&qmi->event_list,
1035					 struct ath10k_qmi_driver_event, list);
1036		list_del(&event->list);
1037		spin_unlock(&qmi->event_lock);
1038
1039		switch (event->type) {
1040		case ATH10K_QMI_EVENT_SERVER_ARRIVE:
1041			ath10k_qmi_event_server_arrive(qmi);
1042			break;
1043		case ATH10K_QMI_EVENT_SERVER_EXIT:
1044			ath10k_qmi_event_server_exit(qmi);
1045			break;
1046		case ATH10K_QMI_EVENT_FW_READY_IND:
1047			ath10k_qmi_event_fw_ready_ind(qmi);
1048			break;
1049		case ATH10K_QMI_EVENT_MSA_READY_IND:
1050			ath10k_qmi_event_msa_ready(qmi);
1051			break;
1052		default:
1053			ath10k_warn(ar, "invalid event type: %d", event->type);
1054			break;
1055		}
1056		kfree(event);
1057		spin_lock(&qmi->event_lock);
1058	}
1059	spin_unlock(&qmi->event_lock);
1060}
1061
1062int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1063{
1064	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1065	struct device *dev = ar->dev;
1066	struct ath10k_qmi *qmi;
1067	int ret;
1068
1069	qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1070	if (!qmi)
1071		return -ENOMEM;
1072
1073	qmi->ar = ar;
1074	ar_snoc->qmi = qmi;
1075
1076	if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
1077		qmi->msa_fixed_perm = true;
1078
1079	ret = qmi_handle_init(&qmi->qmi_hdl,
1080			      WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1081			      &ath10k_qmi_ops, qmi_msg_handler);
1082	if (ret)
1083		goto err;
1084
1085	qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
1086	if (!qmi->event_wq) {
1087		ath10k_err(ar, "failed to allocate workqueue\n");
1088		ret = -EFAULT;
1089		goto err_release_qmi_handle;
1090	}
1091
1092	INIT_LIST_HEAD(&qmi->event_list);
1093	spin_lock_init(&qmi->event_lock);
1094	INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1095
1096	ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1097			     WLFW_SERVICE_VERS_V01, 0);
1098	if (ret)
1099		goto err_qmi_lookup;
1100
1101	qmi->state = ATH10K_QMI_STATE_INIT_DONE;
1102	return 0;
1103
1104err_qmi_lookup:
1105	destroy_workqueue(qmi->event_wq);
1106
1107err_release_qmi_handle:
1108	qmi_handle_release(&qmi->qmi_hdl);
1109
1110err:
1111	kfree(qmi);
1112	return ret;
1113}
1114
1115int ath10k_qmi_deinit(struct ath10k *ar)
1116{
1117	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1118	struct ath10k_qmi *qmi = ar_snoc->qmi;
1119
1120	qmi->state = ATH10K_QMI_STATE_DEINIT;
1121	qmi_handle_release(&qmi->qmi_hdl);
1122	cancel_work_sync(&qmi->event_work);
1123	destroy_workqueue(qmi->event_wq);
1124	kfree(qmi);
1125	ar_snoc->qmi = NULL;
1126
1127	return 0;
1128}
1129