1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "wlcore.h"
25#include "debug.h"
26#include "io.h"
27#include "event.h"
28#include "ps.h"
29#include "scan.h"
30#include "wl12xx_80211.h"
31#include "hw_ops.h"
32
33#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
34#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
35#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
36#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
37#define WL18XX_LOGGER_READ_POINT_OFFSET (12)
38
39int wlcore_event_fw_logger(struct wl1271 *wl)
40{
41 u32 ret;
42 struct fw_logger_information fw_log;
43 u8 *buffer;
44 u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
45 u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
46 u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
47 WL18XX_LOGGER_BUFF_OFFSET;
48 u32 available_len;
49 u32 actual_len;
50 u32 clear_addr;
51 size_t len;
52 u32 start_loc;
53
54 buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
55 if (!buffer) {
56 wl1271_error("Fail to allocate fw logger memory");
57 fw_log.actual_buff_size = cpu_to_le32(0);
58 goto out;
59 }
60
61 ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
62 false);
63 if (ret < 0) {
64 wl1271_error("Fail to read logger buffer, error_id = %d",
65 ret);
66 fw_log.actual_buff_size = cpu_to_le32(0);
67 goto free_out;
68 }
69
70 memcpy(&fw_log, buffer, sizeof(fw_log));
71
72 if (le32_to_cpu(fw_log.actual_buff_size) == 0)
73 goto free_out;
74
75 actual_len = le32_to_cpu(fw_log.actual_buff_size);
76 start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
77 internal_fw_addrbase) - addr;
78 end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
79 available_len = end_buff_addr -
80 (le32_to_cpu(fw_log.buff_read_ptr) -
81 internal_fw_addrbase);
82 actual_len = min(actual_len, available_len);
83 len = actual_len;
84
85 wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
86 clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
87 internal_fw_addrbase;
88
89 len = le32_to_cpu(fw_log.actual_buff_size) - len;
90 if (len) {
91 wl12xx_copy_fwlog(wl,
92 &buffer[WL18XX_LOGGER_BUFF_OFFSET],
93 len);
94 clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
95 internal_fw_addrbase;
96 }
97
98
99 if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
100 wl1271_error("Calculate of clear addr Clear = %x, write = %x",
101 clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
102 }
103
104
105 ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
106 fw_log.buff_write_ptr);
107free_out:
108 kfree(buffer);
109out:
110 return le32_to_cpu(fw_log.actual_buff_size);
111}
112EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
113
114void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
115{
116 struct wl12xx_vif *wlvif;
117 struct ieee80211_vif *vif;
118 enum nl80211_cqm_rssi_threshold_event event;
119 s8 metric = metric_arr[0];
120
121 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
122
123
124 wl12xx_for_each_wlvif_sta(wl, wlvif) {
125 if (metric <= wlvif->rssi_thold)
126 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
127 else
128 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
129
130 vif = wl12xx_wlvif_to_vif(wlvif);
131 if (event != wlvif->last_rssi_event)
132 ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
133 wlvif->last_rssi_event = event;
134 }
135}
136EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
137
138static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
139{
140 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
141
142 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
143 u8 hlid = wlvif->sta.hlid;
144 if (!wl->links[hlid].ba_bitmap)
145 return;
146 ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
147 vif->bss_conf.bssid);
148 } else {
149 u8 hlid;
150 struct wl1271_link *lnk;
151 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
152 wl->num_links) {
153 lnk = &wl->links[hlid];
154 if (!lnk->ba_bitmap)
155 continue;
156
157 ieee80211_stop_rx_ba_session(vif,
158 lnk->ba_bitmap,
159 lnk->addr);
160 }
161 }
162}
163
164void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
165{
166 struct wl12xx_vif *wlvif;
167
168 if (enable) {
169 set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
170 } else {
171 clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
172 wl12xx_for_each_wlvif_sta(wl, wlvif) {
173 wl1271_recalc_rx_streaming(wl, wlvif);
174 }
175 }
176}
177EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
178
179void wlcore_event_sched_scan_completed(struct wl1271 *wl,
180 u8 status)
181{
182 wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
183 status);
184
185 if (wl->sched_vif) {
186 ieee80211_sched_scan_stopped(wl->hw);
187 wl->sched_vif = NULL;
188 }
189}
190EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
191
192void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
193 unsigned long roles_bitmap,
194 unsigned long allowed_bitmap)
195{
196 struct wl12xx_vif *wlvif;
197
198 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
199 __func__, roles_bitmap, allowed_bitmap);
200
201 wl12xx_for_each_wlvif(wl, wlvif) {
202 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
203 !test_bit(wlvif->role_id , &roles_bitmap))
204 continue;
205
206 wlvif->ba_allowed = !!test_bit(wlvif->role_id,
207 &allowed_bitmap);
208 if (!wlvif->ba_allowed)
209 wl1271_stop_ba_event(wl, wlvif);
210 }
211}
212EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
213
214void wlcore_event_channel_switch(struct wl1271 *wl,
215 unsigned long roles_bitmap,
216 bool success)
217{
218 struct wl12xx_vif *wlvif;
219 struct ieee80211_vif *vif;
220
221 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
222 __func__, roles_bitmap, success);
223
224 wl12xx_for_each_wlvif(wl, wlvif) {
225 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
226 !test_bit(wlvif->role_id , &roles_bitmap))
227 continue;
228
229 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
230 &wlvif->flags))
231 continue;
232
233 vif = wl12xx_wlvif_to_vif(wlvif);
234
235 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
236 ieee80211_chswitch_done(vif, success);
237 cancel_delayed_work(&wlvif->channel_switch_work);
238 } else {
239 set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
240 ieee80211_csa_finish(vif);
241 }
242 }
243}
244EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
245
246void wlcore_event_dummy_packet(struct wl1271 *wl)
247{
248 if (wl->plt) {
249 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
250 return;
251 }
252
253 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
254 wl1271_tx_dummy_packet(wl);
255}
256EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
257
258static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
259{
260 u32 num_packets = wl->conf.tx.max_tx_retries;
261 struct wl12xx_vif *wlvif;
262 struct ieee80211_vif *vif;
263 struct ieee80211_sta *sta;
264 const u8 *addr;
265 int h;
266
267 for_each_set_bit(h, &sta_bitmap, wl->num_links) {
268 bool found = false;
269
270 wl12xx_for_each_wlvif_ap(wl, wlvif) {
271 if (!test_bit(h, wlvif->ap.sta_hlid_map))
272 continue;
273 found = true;
274 break;
275 }
276 if (!found)
277 continue;
278
279 vif = wl12xx_wlvif_to_vif(wlvif);
280 addr = wl->links[h].addr;
281
282 rcu_read_lock();
283 sta = ieee80211_find_sta(vif, addr);
284 if (sta) {
285 wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
286 ieee80211_report_low_ack(sta, num_packets);
287 }
288 rcu_read_unlock();
289 }
290}
291
292void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
293{
294 wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
295 wlcore_disconnect_sta(wl, sta_bitmap);
296}
297EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
298
299void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
300{
301 wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
302 wlcore_disconnect_sta(wl, sta_bitmap);
303}
304EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
305
306void wlcore_event_roc_complete(struct wl1271 *wl)
307{
308 wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
309 if (wl->roc_vif)
310 ieee80211_ready_on_channel(wl->hw);
311}
312EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
313
314void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
315{
316
317
318
319
320 struct wl12xx_vif *wlvif;
321 struct ieee80211_vif *vif;
322 int delay = wl->conf.conn.synch_fail_thold *
323 wl->conf.conn.bss_lose_timeout;
324
325 wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
326
327 wl12xx_for_each_wlvif_sta(wl, wlvif) {
328 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
329 !test_bit(wlvif->role_id , &roles_bitmap))
330 continue;
331
332 vif = wl12xx_wlvif_to_vif(wlvif);
333
334
335 if (wlvif->p2p) {
336 ieee80211_connection_loss(vif);
337 continue;
338 }
339
340
341
342
343
344
345 ieee80211_queue_delayed_work(wl->hw,
346 &wlvif->connection_loss_work,
347 msecs_to_jiffies(delay));
348
349 ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
350 }
351}
352EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
353
354int wl1271_event_unmask(struct wl1271 *wl)
355{
356 int ret;
357
358 wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
359 ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
360 if (ret < 0)
361 return ret;
362
363 return 0;
364}
365
366int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
367{
368 int ret;
369
370 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
371
372 if (mbox_num > 1)
373 return -EINVAL;
374
375
376 ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
377 wl->mbox_size, false);
378 if (ret < 0)
379 return ret;
380
381
382 ret = wl->ops->process_mailbox_events(wl);
383 if (ret < 0)
384 return ret;
385
386
387
388
389
390 ret = wl->ops->ack_event(wl);
391
392 return ret;
393}
394