1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/firmware.h>
19#include <linux/delay.h>
20
21#include "mt76x2.h"
22#include "mt76x2_mcu.h"
23#include "mt76x2_dma.h"
24#include "mt76x2_eeprom.h"
25
26struct mt76x2_fw_header {
27 __le32 ilm_len;
28 __le32 dlm_len;
29 __le16 build_ver;
30 __le16 fw_ver;
31 u8 pad[4];
32 char build_time[16];
33};
34
35struct mt76x2_patch_header {
36 char build_time[16];
37 char platform[4];
38 char hw_version[4];
39 char patch_version[4];
40 u8 pad[2];
41};
42
43static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
44{
45 struct sk_buff *skb;
46
47 skb = alloc_skb(len, GFP_KERNEL);
48 if (!skb)
49 return NULL;
50 memcpy(skb_put(skb, len), data, len);
51
52 return skb;
53}
54
55static struct sk_buff *
56mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
57{
58 unsigned long timeout;
59
60 if (!time_is_after_jiffies(expires))
61 return NULL;
62
63 timeout = expires - jiffies;
64 wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
65 timeout);
66 return skb_dequeue(&dev->mcu.res_q);
67}
68
69static int
70mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
71 enum mcu_cmd cmd)
72{
73 unsigned long expires = jiffies + HZ;
74 int ret;
75 u8 seq;
76
77 if (!skb)
78 return -EINVAL;
79
80 mutex_lock(&dev->mcu.mutex);
81
82 seq = ++dev->mcu.msg_seq & 0xf;
83 if (!seq)
84 seq = ++dev->mcu.msg_seq & 0xf;
85
86 ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
87 if (ret)
88 goto out;
89
90 while (1) {
91 u32 *rxfce;
92 bool check_seq = false;
93
94 skb = mt76x2_mcu_get_response(dev, expires);
95 if (!skb) {
96 dev_err(dev->mt76.dev,
97 "MCU message %d (seq %d) timed out\n", cmd,
98 seq);
99 ret = -ETIMEDOUT;
100 break;
101 }
102
103 rxfce = (u32 *) skb->cb;
104
105 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
106 check_seq = true;
107
108 dev_kfree_skb(skb);
109 if (check_seq)
110 break;
111 }
112
113out:
114 mutex_unlock(&dev->mcu.mutex);
115
116 return ret;
117}
118
119static int
120mt76pci_load_rom_patch(struct mt76x2_dev *dev)
121{
122 const struct firmware *fw = NULL;
123 struct mt76x2_patch_header *hdr;
124 bool rom_protect = !is_mt7612(dev);
125 int len, ret = 0;
126 __le32 *cur;
127 u32 patch_mask, patch_reg;
128
129 if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
130 dev_err(dev->mt76.dev,
131 "Could not get hardware semaphore for ROM PATCH\n");
132 return -ETIMEDOUT;
133 }
134
135 if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
136 patch_mask = BIT(0);
137 patch_reg = MT_MCU_CLOCK_CTL;
138 } else {
139 patch_mask = BIT(1);
140 patch_reg = MT_MCU_COM_REG0;
141 }
142
143 if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
144 dev_info(dev->mt76.dev, "ROM patch already applied\n");
145 goto out;
146 }
147
148 ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
149 if (ret)
150 goto out;
151
152 if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
153 ret = -EIO;
154 dev_err(dev->mt76.dev, "Failed to load firmware\n");
155 goto out;
156 }
157
158 hdr = (struct mt76x2_patch_header *) fw->data;
159 dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
160
161 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
162
163 cur = (__le32 *) (fw->data + sizeof(*hdr));
164 len = fw->size - sizeof(*hdr);
165 mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
166
167 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
168
169
170 mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
171
172 if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
173 dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
174 ret = -ETIMEDOUT;
175 }
176
177out:
178
179 if (rom_protect)
180 mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
181 release_firmware(fw);
182 return ret;
183}
184
185static int
186mt76pci_load_firmware(struct mt76x2_dev *dev)
187{
188 const struct firmware *fw;
189 const struct mt76x2_fw_header *hdr;
190 int len, ret;
191 __le32 *cur;
192 u32 offset, val;
193
194 ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
195 if (ret)
196 return ret;
197
198 if (!fw || !fw->data || fw->size < sizeof(*hdr))
199 goto error;
200
201 hdr = (const struct mt76x2_fw_header *) fw->data;
202
203 len = sizeof(*hdr);
204 len += le32_to_cpu(hdr->ilm_len);
205 len += le32_to_cpu(hdr->dlm_len);
206
207 if (fw->size != len)
208 goto error;
209
210 val = le16_to_cpu(hdr->fw_ver);
211 dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
212 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
213
214 val = le16_to_cpu(hdr->build_ver);
215 dev_info(dev->mt76.dev, "Build: %x\n", val);
216 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
217
218 cur = (__le32 *) (fw->data + sizeof(*hdr));
219 len = le32_to_cpu(hdr->ilm_len);
220
221 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
222 mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
223
224 cur += len / sizeof(*cur);
225 len = le32_to_cpu(hdr->dlm_len);
226
227 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
228 offset = MT_MCU_DLM_ADDR_E3;
229 else
230 offset = MT_MCU_DLM_ADDR;
231
232 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
233 mt76_wr_copy(dev, offset, cur, len);
234
235 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
236
237 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
238 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
239 mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
240
241
242 mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
243 if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
244 dev_err(dev->mt76.dev, "Firmware failed to start\n");
245 release_firmware(fw);
246 return -ETIMEDOUT;
247 }
248
249 dev_info(dev->mt76.dev, "Firmware running!\n");
250
251 release_firmware(fw);
252
253 return ret;
254
255error:
256 dev_err(dev->mt76.dev, "Invalid firmware\n");
257 release_firmware(fw);
258 return -ENOENT;
259}
260
261static int
262mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
263 u32 val)
264{
265 struct sk_buff *skb;
266 struct {
267 __le32 id;
268 __le32 value;
269 } __packed __aligned(4) msg = {
270 .id = cpu_to_le32(func),
271 .value = cpu_to_le32(val),
272 };
273
274 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
275 return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
276}
277
278int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
279 u8 channel)
280{
281 struct sk_buff *skb;
282 struct {
283 u8 cr_mode;
284 u8 temp;
285 u8 ch;
286 u8 _pad0;
287
288 __le32 cfg;
289 } __packed __aligned(4) msg = {
290 .cr_mode = type,
291 .temp = temp_level,
292 .ch = channel,
293 };
294 u32 val;
295
296 val = BIT(31);
297 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
298 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
299 msg.cfg = cpu_to_le32(val);
300
301
302 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
303 return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
304}
305
306int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
307 u8 bw_index, bool scan)
308{
309 struct sk_buff *skb;
310 struct {
311 u8 idx;
312 u8 scan;
313 u8 bw;
314 u8 _pad0;
315
316 __le16 chainmask;
317 u8 ext_chan;
318 u8 _pad1;
319
320 } __packed __aligned(4) msg = {
321 .idx = channel,
322 .scan = scan,
323 .bw = bw,
324 .chainmask = cpu_to_le16(dev->chainmask),
325 };
326
327
328 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
329 mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
330
331 usleep_range(5000, 10000);
332
333 msg.ext_chan = 0xe0 + bw_index;
334 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
335 return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
336}
337
338int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
339{
340 struct sk_buff *skb;
341 struct {
342 __le32 mode;
343 __le32 level;
344 } __packed __aligned(4) msg = {
345 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
346 .level = cpu_to_le32(0),
347 };
348
349 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
350 return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
351}
352
353int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
354 u32 param)
355{
356 struct sk_buff *skb;
357 struct {
358 __le32 id;
359 __le32 value;
360 } __packed __aligned(4) msg = {
361 .id = cpu_to_le32(type),
362 .value = cpu_to_le32(param),
363 };
364 int ret;
365
366 mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
367
368 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
369 ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
370 if (ret)
371 return ret;
372
373 if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
374 BIT(31), BIT(31), 100)))
375 return -ETIMEDOUT;
376
377 return 0;
378}
379
380int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
381 struct mt76x2_tssi_comp *tssi_data)
382{
383 struct sk_buff *skb;
384 struct {
385 __le32 id;
386 struct mt76x2_tssi_comp data;
387 } __packed __aligned(4) msg = {
388 .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
389 .data = *tssi_data,
390 };
391
392 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
393 return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
394}
395
396int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
397 bool force)
398{
399 struct sk_buff *skb;
400 struct {
401 __le32 channel;
402 __le32 gain_val;
403 } __packed __aligned(4) msg = {
404 .channel = cpu_to_le32(channel),
405 .gain_val = cpu_to_le32(gain),
406 };
407
408 if (force)
409 msg.channel |= cpu_to_le32(BIT(31));
410
411 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
412 return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
413}
414
415int mt76x2_mcu_init(struct mt76x2_dev *dev)
416{
417 int ret;
418
419 mutex_init(&dev->mcu.mutex);
420
421 ret = mt76pci_load_rom_patch(dev);
422 if (ret)
423 return ret;
424
425 ret = mt76pci_load_firmware(dev);
426 if (ret)
427 return ret;
428
429 mt76x2_mcu_function_select(dev, Q_SELECT, 1);
430 return 0;
431}
432
433int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
434{
435 struct sk_buff *skb;
436
437 mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
438 usleep_range(20000, 30000);
439
440 while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
441 dev_kfree_skb(skb);
442
443 return 0;
444}
445