1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 */
9
10#include "wlcore.h"
11#include "debug.h"
12#include "io.h"
13#include "event.h"
14#include "ps.h"
15#include "scan.h"
16#include "wl12xx_80211.h"
17#include "hw_ops.h"
18
19#define WL18XX_LOGGER_SDIO_BUFF_MAX	(0x1020)
20#define WL18XX_DATA_RAM_BASE_ADDRESS	(0x20000000)
21#define WL18XX_LOGGER_SDIO_BUFF_ADDR	(0x40159c)
22#define WL18XX_LOGGER_BUFF_OFFSET	(sizeof(struct fw_logger_information))
23#define WL18XX_LOGGER_READ_POINT_OFFSET		(12)
24
25int wlcore_event_fw_logger(struct wl1271 *wl)
26{
27	int ret;
28	struct fw_logger_information fw_log;
29	u8  *buffer;
30	u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
31	u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
32	u32 addr_ptr;
33	u32 buff_start_ptr;
34	u32 buff_read_ptr;
35	u32 buff_end_ptr;
36	u32 available_len;
37	u32 actual_len;
38	u32 clear_ptr;
39	size_t len;
40	u32 start_loc;
41
42	buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
43	if (!buffer) {
44		wl1271_error("Fail to allocate fw logger memory");
45		actual_len = 0;
46		goto out;
47	}
48
49	ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
50			  false);
51	if (ret < 0) {
52		wl1271_error("Fail to read logger buffer, error_id = %d",
53			     ret);
54		actual_len = 0;
55		goto free_out;
56	}
57
58	memcpy(&fw_log, buffer, sizeof(fw_log));
59
60	actual_len = le32_to_cpu(fw_log.actual_buff_size);
61	if (actual_len == 0)
62		goto free_out;
63
64	/* Calculate the internal pointer to the fwlog structure */
65	addr_ptr = internal_fw_addrbase + addr;
66
67	/* Calculate the internal pointers to the start and end of log buffer */
68	buff_start_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET;
69	buff_end_ptr = buff_start_ptr + le32_to_cpu(fw_log.max_buff_size);
70
71	/* Read the read pointer and validate it */
72	buff_read_ptr = le32_to_cpu(fw_log.buff_read_ptr);
73	if (buff_read_ptr < buff_start_ptr ||
74	    buff_read_ptr >= buff_end_ptr) {
75		wl1271_error("buffer read pointer out of bounds: %x not in (%x-%x)\n",
76			     buff_read_ptr, buff_start_ptr, buff_end_ptr);
77		goto free_out;
78	}
79
80	start_loc = buff_read_ptr - addr_ptr;
81	available_len = buff_end_ptr - buff_read_ptr;
82
83	/* Copy initial part up to the end of ring buffer */
84	len = min(actual_len, available_len);
85	wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
86	clear_ptr = addr_ptr + start_loc + actual_len;
87	if (clear_ptr == buff_end_ptr)
88		clear_ptr = buff_start_ptr;
89
90	/* Copy any remaining part from beginning of ring buffer */
91	len = actual_len - len;
92	if (len) {
93		wl12xx_copy_fwlog(wl,
94				  &buffer[WL18XX_LOGGER_BUFF_OFFSET],
95				  len);
96		clear_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET + len;
97	}
98
99	/* Update the read pointer */
100	ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
101			     clear_ptr);
102free_out:
103	kfree(buffer);
104out:
105	return actual_len;
106}
107EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
108
109void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
110{
111	struct wl12xx_vif *wlvif;
112	struct ieee80211_vif *vif;
113	enum nl80211_cqm_rssi_threshold_event event;
114	s8 metric = metric_arr[0];
115
116	wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
117
118	/* TODO: check actual multi-role support */
119	wl12xx_for_each_wlvif_sta(wl, wlvif) {
120		if (metric <= wlvif->rssi_thold)
121			event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
122		else
123			event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
124
125		vif = wl12xx_wlvif_to_vif(wlvif);
126		if (event != wlvif->last_rssi_event)
127			ieee80211_cqm_rssi_notify(vif, event, metric,
128						  GFP_KERNEL);
129		wlvif->last_rssi_event = event;
130	}
131}
132EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
133
134static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
135{
136	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
137
138	if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
139		u8 hlid = wlvif->sta.hlid;
140		if (!wl->links[hlid].ba_bitmap)
141			return;
142		ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
143					     vif->bss_conf.bssid);
144	} else {
145		u8 hlid;
146		struct wl1271_link *lnk;
147		for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
148				 wl->num_links) {
149			lnk = &wl->links[hlid];
150			if (!lnk->ba_bitmap)
151				continue;
152
153			ieee80211_stop_rx_ba_session(vif,
154						     lnk->ba_bitmap,
155						     lnk->addr);
156		}
157	}
158}
159
160void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
161{
162	struct wl12xx_vif *wlvif;
163
164	if (enable) {
165		set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
166	} else {
167		clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
168		wl12xx_for_each_wlvif_sta(wl, wlvif) {
169			wl1271_recalc_rx_streaming(wl, wlvif);
170		}
171	}
172}
173EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
174
175void wlcore_event_sched_scan_completed(struct wl1271 *wl,
176				       u8 status)
177{
178	wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
179		     status);
180
181	if (wl->sched_vif) {
182		ieee80211_sched_scan_stopped(wl->hw);
183		wl->sched_vif = NULL;
184	}
185}
186EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
187
188void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
189				   unsigned long roles_bitmap,
190				   unsigned long allowed_bitmap)
191{
192	struct wl12xx_vif *wlvif;
193
194	wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
195		     __func__, roles_bitmap, allowed_bitmap);
196
197	wl12xx_for_each_wlvif(wl, wlvif) {
198		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
199		    !test_bit(wlvif->role_id , &roles_bitmap))
200			continue;
201
202		wlvif->ba_allowed = !!test_bit(wlvif->role_id,
203					       &allowed_bitmap);
204		if (!wlvif->ba_allowed)
205			wl1271_stop_ba_event(wl, wlvif);
206	}
207}
208EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
209
210void wlcore_event_channel_switch(struct wl1271 *wl,
211				 unsigned long roles_bitmap,
212				 bool success)
213{
214	struct wl12xx_vif *wlvif;
215	struct ieee80211_vif *vif;
216
217	wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
218		     __func__, roles_bitmap, success);
219
220	wl12xx_for_each_wlvif(wl, wlvif) {
221		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
222		    !test_bit(wlvif->role_id , &roles_bitmap))
223			continue;
224
225		if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
226					&wlvif->flags))
227			continue;
228
229		vif = wl12xx_wlvif_to_vif(wlvif);
230
231		if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
232			ieee80211_chswitch_done(vif, success, 0);
233			cancel_delayed_work(&wlvif->channel_switch_work);
234		} else {
235			set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
236			ieee80211_csa_finish(vif, 0);
237		}
238	}
239}
240EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
241
242void wlcore_event_dummy_packet(struct wl1271 *wl)
243{
244	if (wl->plt) {
245		wl1271_info("Got DUMMY_PACKET event in PLT mode.  FW bug, ignoring.");
246		return;
247	}
248
249	wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
250	wl1271_tx_dummy_packet(wl);
251}
252EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
253
254static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
255{
256	u32 num_packets = wl->conf.tx.max_tx_retries;
257	struct wl12xx_vif *wlvif;
258	struct ieee80211_vif *vif;
259	struct ieee80211_sta *sta;
260	const u8 *addr;
261	int h;
262
263	for_each_set_bit(h, &sta_bitmap, wl->num_links) {
264		bool found = false;
265		/* find the ap vif connected to this sta */
266		wl12xx_for_each_wlvif_ap(wl, wlvif) {
267			if (!test_bit(h, wlvif->ap.sta_hlid_map))
268				continue;
269			found = true;
270			break;
271		}
272		if (!found)
273			continue;
274
275		vif = wl12xx_wlvif_to_vif(wlvif);
276		addr = wl->links[h].addr;
277
278		rcu_read_lock();
279		sta = ieee80211_find_sta(vif, addr);
280		if (sta) {
281			wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
282			ieee80211_report_low_ack(sta, num_packets);
283		}
284		rcu_read_unlock();
285	}
286}
287
288void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
289{
290	wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
291	wlcore_disconnect_sta(wl, sta_bitmap);
292}
293EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
294
295void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
296{
297	wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
298	wlcore_disconnect_sta(wl, sta_bitmap);
299}
300EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
301
302void wlcore_event_roc_complete(struct wl1271 *wl)
303{
304	wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
305	if (wl->roc_vif)
306		ieee80211_ready_on_channel(wl->hw);
307}
308EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
309
310void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
311{
312	/*
313	 * We are HW_MONITOR device. On beacon loss - queue
314	 * connection loss work. Cancel it on REGAINED event.
315	 */
316	struct wl12xx_vif *wlvif;
317	struct ieee80211_vif *vif;
318	int delay = wl->conf.conn.synch_fail_thold *
319				wl->conf.conn.bss_lose_timeout;
320
321	wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
322
323	wl12xx_for_each_wlvif_sta(wl, wlvif) {
324		if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
325		    !test_bit(wlvif->role_id , &roles_bitmap))
326			continue;
327
328		vif = wl12xx_wlvif_to_vif(wlvif);
329
330		/* don't attempt roaming in case of p2p */
331		if (wlvif->p2p) {
332			ieee80211_connection_loss(vif);
333			continue;
334		}
335
336		/*
337		 * if the work is already queued, it should take place.
338		 * We don't want to delay the connection loss
339		 * indication any more.
340		 */
341		ieee80211_queue_delayed_work(wl->hw,
342					     &wlvif->connection_loss_work,
343					     msecs_to_jiffies(delay));
344
345		ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
346	}
347}
348EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
349
350int wl1271_event_unmask(struct wl1271 *wl)
351{
352	int ret;
353
354	wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
355	ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
356	if (ret < 0)
357		return ret;
358
359	return 0;
360}
361
362int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
363{
364	int ret;
365
366	wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
367
368	if (mbox_num > 1)
369		return -EINVAL;
370
371	/* first we read the mbox descriptor */
372	ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
373			  wl->mbox_size, false);
374	if (ret < 0)
375		return ret;
376
377	/* process the descriptor */
378	ret = wl->ops->process_mailbox_events(wl);
379	if (ret < 0)
380		return ret;
381
382	/*
383	 * TODO: we just need this because one bit is in a different
384	 * place.  Is there any better way?
385	 */
386	ret = wl->ops->ack_event(wl);
387
388	return ret;
389}
390