1
2
3
4
5#include <linux/bcd.h>
6
7#include "main.h"
8#include "reg.h"
9#include "fw.h"
10#include "phy.h"
11#include "debug.h"
12#include "regd.h"
13
14struct phy_cfg_pair {
15 u32 addr;
16 u32 data;
17};
18
19union phy_table_tile {
20 struct rtw_phy_cond cond;
21 struct phy_cfg_pair cfg;
22};
23
24static const u32 db_invert_table[12][8] = {
25 {10, 13, 16, 20,
26 25, 32, 40, 50},
27 {64, 80, 101, 128,
28 160, 201, 256, 318},
29 {401, 505, 635, 800,
30 1007, 1268, 1596, 2010},
31 {316, 398, 501, 631,
32 794, 1000, 1259, 1585},
33 {1995, 2512, 3162, 3981,
34 5012, 6310, 7943, 10000},
35 {12589, 15849, 19953, 25119,
36 31623, 39811, 50119, 63098},
37 {79433, 100000, 125893, 158489,
38 199526, 251189, 316228, 398107},
39 {501187, 630957, 794328, 1000000,
40 1258925, 1584893, 1995262, 2511886},
41 {3162278, 3981072, 5011872, 6309573,
42 7943282, 1000000, 12589254, 15848932},
43 {19952623, 25118864, 31622777, 39810717,
44 50118723, 63095734, 79432823, 100000000},
45 {125892541, 158489319, 199526232, 251188643,
46 316227766, 398107171, 501187234, 630957345},
47 {794328235, 1000000000, 1258925412, 1584893192,
48 1995262315, 2511886432U, 3162277660U, 3981071706U}
49};
50
51u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
52u8 rtw_ofdm_rates[] = {
53 DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
54 DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
55 DESC_RATE48M, DESC_RATE54M
56};
57u8 rtw_ht_1s_rates[] = {
58 DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
59 DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
60 DESC_RATEMCS6, DESC_RATEMCS7
61};
62u8 rtw_ht_2s_rates[] = {
63 DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
64 DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
65 DESC_RATEMCS14, DESC_RATEMCS15
66};
67u8 rtw_vht_1s_rates[] = {
68 DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
69 DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
70 DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
71 DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
72 DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
73};
74u8 rtw_vht_2s_rates[] = {
75 DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
76 DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
77 DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
78 DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
79 DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
80};
81u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
82 rtw_cck_rates, rtw_ofdm_rates,
83 rtw_ht_1s_rates, rtw_ht_2s_rates,
84 rtw_vht_1s_rates, rtw_vht_2s_rates
85};
86EXPORT_SYMBOL(rtw_rate_section);
87
88u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
89 ARRAY_SIZE(rtw_cck_rates),
90 ARRAY_SIZE(rtw_ofdm_rates),
91 ARRAY_SIZE(rtw_ht_1s_rates),
92 ARRAY_SIZE(rtw_ht_2s_rates),
93 ARRAY_SIZE(rtw_vht_1s_rates),
94 ARRAY_SIZE(rtw_vht_2s_rates)
95};
96EXPORT_SYMBOL(rtw_rate_size);
97
98static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
99static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
100static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
101static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
102static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
103static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
104
105enum rtw_phy_band_type {
106 PHY_BAND_2G = 0,
107 PHY_BAND_5G = 1,
108};
109
110static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
111{
112 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
113 u8 i, j;
114
115 for (i = 0; i <= RTW_CHANNEL_WIDTH_40; i++) {
116 for (j = 0; j < RTW_RF_PATH_MAX; j++)
117 dm_info->cck_pd_lv[i][j] = CCK_PD_LV0;
118 }
119
120 dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
121}
122
123void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l)
124{
125 struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
126
127 rtw_write32_mask(rtwdev,
128 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
129 edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask,
130 l2h + edcca_th[EDCCA_TH_L2H_IDX].offset);
131 rtw_write32_mask(rtwdev,
132 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
133 edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask,
134 h2l + edcca_th[EDCCA_TH_H2L_IDX].offset);
135}
136EXPORT_SYMBOL(rtw_phy_set_edcca_th);
137
138void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
139{
140 struct rtw_chip_info *chip = rtwdev->chip;
141 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
142
143
144 if (!rtw_edcca_enabled) {
145 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
146 rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n");
147 return;
148 }
149
150 switch (rtwdev->regd.dfs_region) {
151 case NL80211_DFS_ETSI:
152 dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
153 dm_info->l2h_th_ini = chip->l2h_th_ini_ad;
154 break;
155 case NL80211_DFS_JP:
156 dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
157 dm_info->l2h_th_ini = chip->l2h_th_ini_cs;
158 break;
159 default:
160 dm_info->edcca_mode = RTW_EDCCA_NORMAL;
161 break;
162 }
163}
164
165static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
166{
167 struct rtw_chip_info *chip = rtwdev->chip;
168
169 rtw_phy_adaptivity_set_mode(rtwdev);
170 if (chip->ops->adaptivity_init)
171 chip->ops->adaptivity_init(rtwdev);
172}
173
174static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
175{
176 if (rtwdev->chip->ops->adaptivity)
177 rtwdev->chip->ops->adaptivity(rtwdev);
178}
179
180static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
181{
182 struct rtw_chip_info *chip = rtwdev->chip;
183
184 if (chip->ops->cfo_init)
185 chip->ops->cfo_init(rtwdev);
186}
187
188static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
189{
190 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
191
192 path_div->current_tx_path = rtwdev->chip->default_1ss_tx_path;
193 path_div->path_a_cnt = 0;
194 path_div->path_a_sum = 0;
195 path_div->path_b_cnt = 0;
196 path_div->path_b_sum = 0;
197}
198
199void rtw_phy_init(struct rtw_dev *rtwdev)
200{
201 struct rtw_chip_info *chip = rtwdev->chip;
202 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
203 u32 addr, mask;
204
205 dm_info->fa_history[3] = 0;
206 dm_info->fa_history[2] = 0;
207 dm_info->fa_history[1] = 0;
208 dm_info->fa_history[0] = 0;
209 dm_info->igi_bitmap = 0;
210 dm_info->igi_history[3] = 0;
211 dm_info->igi_history[2] = 0;
212 dm_info->igi_history[1] = 0;
213
214 addr = chip->dig[0].addr;
215 mask = chip->dig[0].mask;
216 dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
217 rtw_phy_cck_pd_init(rtwdev);
218
219 dm_info->iqk.done = false;
220 rtw_phy_adaptivity_init(rtwdev);
221 rtw_phy_cfo_init(rtwdev);
222 rtw_phy_tx_path_div_init(rtwdev);
223}
224EXPORT_SYMBOL(rtw_phy_init);
225
226void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
227{
228 struct rtw_chip_info *chip = rtwdev->chip;
229 struct rtw_hal *hal = &rtwdev->hal;
230 u32 addr, mask;
231 u8 path;
232
233 if (chip->dig_cck) {
234 const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
235 rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
236 }
237
238 for (path = 0; path < hal->rf_path_num; path++) {
239 addr = chip->dig[path].addr;
240 mask = chip->dig[path].mask;
241 rtw_write32_mask(rtwdev, addr, mask, igi);
242 }
243}
244
245static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
246{
247 struct rtw_chip_info *chip = rtwdev->chip;
248
249 chip->ops->false_alarm_statistics(rtwdev);
250}
251
252#define RA_FLOOR_TABLE_SIZE 7
253#define RA_FLOOR_UP_GAP 3
254
255static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
256{
257 u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
258 u8 new_level = 0;
259 int i;
260
261 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
262 if (i >= old_level)
263 table[i] += RA_FLOOR_UP_GAP;
264
265 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
266 if (rssi < table[i]) {
267 new_level = i;
268 break;
269 }
270 }
271
272 return new_level;
273}
274
275struct rtw_phy_stat_iter_data {
276 struct rtw_dev *rtwdev;
277 u8 min_rssi;
278};
279
280static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
281{
282 struct rtw_phy_stat_iter_data *iter_data = data;
283 struct rtw_dev *rtwdev = iter_data->rtwdev;
284 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
285 u8 rssi;
286
287 rssi = ewma_rssi_read(&si->avg_rssi);
288 si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
289
290 rtw_fw_send_rssi_info(rtwdev, si);
291
292 iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
293}
294
295static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
296{
297 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
298 struct rtw_phy_stat_iter_data data = {};
299
300 data.rtwdev = rtwdev;
301 data.min_rssi = U8_MAX;
302 rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
303
304 dm_info->pre_min_rssi = dm_info->min_rssi;
305 dm_info->min_rssi = data.min_rssi;
306}
307
308static void rtw_phy_stat_rate_cnt(struct rtw_dev *rtwdev)
309{
310 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
311
312 dm_info->last_pkt_count = dm_info->cur_pkt_count;
313 memset(&dm_info->cur_pkt_count, 0, sizeof(dm_info->cur_pkt_count));
314}
315
316static void rtw_phy_statistics(struct rtw_dev *rtwdev)
317{
318 rtw_phy_stat_rssi(rtwdev);
319 rtw_phy_stat_false_alarm(rtwdev);
320 rtw_phy_stat_rate_cnt(rtwdev);
321}
322
323#define DIG_PERF_FA_TH_LOW 250
324#define DIG_PERF_FA_TH_HIGH 500
325#define DIG_PERF_FA_TH_EXTRA_HIGH 750
326#define DIG_PERF_MAX 0x5a
327#define DIG_PERF_MID 0x40
328#define DIG_CVRG_FA_TH_LOW 2000
329#define DIG_CVRG_FA_TH_HIGH 4000
330#define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
331#define DIG_CVRG_MAX 0x2a
332#define DIG_CVRG_MID 0x26
333#define DIG_CVRG_MIN 0x1c
334#define DIG_RSSI_GAIN_OFFSET 15
335
336static bool
337rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
338{
339 u16 fa_lo = DIG_PERF_FA_TH_LOW;
340 u16 fa_hi = DIG_PERF_FA_TH_HIGH;
341 u16 *fa_history;
342 u8 *igi_history;
343 u8 damping_rssi;
344 u8 min_rssi;
345 u8 diff;
346 u8 igi_bitmap;
347 bool damping = false;
348
349 min_rssi = dm_info->min_rssi;
350 if (dm_info->damping) {
351 damping_rssi = dm_info->damping_rssi;
352 diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
353 damping_rssi - min_rssi;
354 if (diff > 3 || dm_info->damping_cnt++ > 20) {
355 dm_info->damping = false;
356 return false;
357 }
358
359 return true;
360 }
361
362 igi_history = dm_info->igi_history;
363 fa_history = dm_info->fa_history;
364 igi_bitmap = dm_info->igi_bitmap & 0xf;
365 switch (igi_bitmap) {
366 case 5:
367
368 if (igi_history[0] > igi_history[1] &&
369 igi_history[2] > igi_history[3] &&
370 igi_history[0] - igi_history[1] >= 2 &&
371 igi_history[2] - igi_history[3] >= 2 &&
372 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
373 fa_history[2] > fa_hi && fa_history[3] < fa_lo)
374 damping = true;
375 break;
376 case 9:
377
378 if (igi_history[0] > igi_history[1] &&
379 igi_history[3] > igi_history[2] &&
380 igi_history[0] - igi_history[1] >= 4 &&
381 igi_history[3] - igi_history[2] >= 2 &&
382 fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
383 fa_history[2] < fa_lo && fa_history[3] > fa_hi)
384 damping = true;
385 break;
386 default:
387 return false;
388 }
389
390 if (damping) {
391 dm_info->damping = true;
392 dm_info->damping_cnt = 0;
393 dm_info->damping_rssi = min_rssi;
394 }
395
396 return damping;
397}
398
399static void rtw_phy_dig_get_boundary(struct rtw_dev *rtwdev,
400 struct rtw_dm_info *dm_info,
401 u8 *upper, u8 *lower, bool linked)
402{
403 u8 dig_max, dig_min, dig_mid;
404 u8 min_rssi;
405
406 if (linked) {
407 dig_max = DIG_PERF_MAX;
408 dig_mid = DIG_PERF_MID;
409 dig_min = rtwdev->chip->dig_min;
410 min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
411 } else {
412 dig_max = DIG_CVRG_MAX;
413 dig_mid = DIG_CVRG_MID;
414 dig_min = DIG_CVRG_MIN;
415 min_rssi = dig_min;
416 }
417
418
419 dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
420
421 *lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
422 *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
423}
424
425static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
426 u16 *fa_th, u8 *step, bool linked)
427{
428 u8 min_rssi, pre_min_rssi;
429
430 min_rssi = dm_info->min_rssi;
431 pre_min_rssi = dm_info->pre_min_rssi;
432 step[0] = 4;
433 step[1] = 3;
434 step[2] = 2;
435
436 if (linked) {
437 fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
438 fa_th[1] = DIG_PERF_FA_TH_HIGH;
439 fa_th[2] = DIG_PERF_FA_TH_LOW;
440 if (pre_min_rssi > min_rssi) {
441 step[0] = 6;
442 step[1] = 4;
443 step[2] = 2;
444 }
445 } else {
446 fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
447 fa_th[1] = DIG_CVRG_FA_TH_HIGH;
448 fa_th[2] = DIG_CVRG_FA_TH_LOW;
449 }
450}
451
452static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
453{
454 u8 *igi_history;
455 u16 *fa_history;
456 u8 igi_bitmap;
457 bool up;
458
459 igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
460 igi_history = dm_info->igi_history;
461 fa_history = dm_info->fa_history;
462
463 up = igi > igi_history[0];
464 igi_bitmap |= up;
465
466 igi_history[3] = igi_history[2];
467 igi_history[2] = igi_history[1];
468 igi_history[1] = igi_history[0];
469 igi_history[0] = igi;
470
471 fa_history[3] = fa_history[2];
472 fa_history[2] = fa_history[1];
473 fa_history[1] = fa_history[0];
474 fa_history[0] = fa;
475
476 dm_info->igi_bitmap = igi_bitmap;
477}
478
479static void rtw_phy_dig(struct rtw_dev *rtwdev)
480{
481 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
482 u8 upper_bound, lower_bound;
483 u8 pre_igi, cur_igi;
484 u16 fa_th[3], fa_cnt;
485 u8 level;
486 u8 step[3];
487 bool linked;
488
489 if (test_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags))
490 return;
491
492 if (rtw_phy_dig_check_damping(dm_info))
493 return;
494
495 linked = !!rtwdev->sta_cnt;
496
497 fa_cnt = dm_info->total_fa_cnt;
498 pre_igi = dm_info->igi_history[0];
499
500 rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
501
502
503
504
505
506
507 cur_igi = pre_igi;
508 for (level = 0; level < 3; level++) {
509 if (fa_cnt > fa_th[level]) {
510 cur_igi += step[level];
511 break;
512 }
513 }
514 cur_igi -= 2;
515
516
517
518
519
520 rtw_phy_dig_get_boundary(rtwdev, dm_info, &upper_bound, &lower_bound,
521 linked);
522 cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
523
524
525
526
527 rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
528
529 if (cur_igi != pre_igi)
530 rtw_phy_dig_write(rtwdev, cur_igi);
531}
532
533static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
534{
535 struct rtw_dev *rtwdev = data;
536 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
537
538 rtw_update_sta_info(rtwdev, si);
539}
540
541static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
542{
543 if (rtwdev->watch_dog_cnt & 0x3)
544 return;
545
546 rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
547}
548
549static u32 rtw_phy_get_rrsr_mask(struct rtw_dev *rtwdev, u8 rate_idx)
550{
551 u8 rate_order;
552
553 rate_order = rate_idx;
554
555 if (rate_idx >= DESC_RATEVHT4SS_MCS0)
556 rate_order -= DESC_RATEVHT4SS_MCS0;
557 else if (rate_idx >= DESC_RATEVHT3SS_MCS0)
558 rate_order -= DESC_RATEVHT3SS_MCS0;
559 else if (rate_idx >= DESC_RATEVHT2SS_MCS0)
560 rate_order -= DESC_RATEVHT2SS_MCS0;
561 else if (rate_idx >= DESC_RATEVHT1SS_MCS0)
562 rate_order -= DESC_RATEVHT1SS_MCS0;
563 else if (rate_idx >= DESC_RATEMCS24)
564 rate_order -= DESC_RATEMCS24;
565 else if (rate_idx >= DESC_RATEMCS16)
566 rate_order -= DESC_RATEMCS16;
567 else if (rate_idx >= DESC_RATEMCS8)
568 rate_order -= DESC_RATEMCS8;
569 else if (rate_idx >= DESC_RATEMCS0)
570 rate_order -= DESC_RATEMCS0;
571 else if (rate_idx >= DESC_RATE6M)
572 rate_order -= DESC_RATE6M;
573 else
574 rate_order -= DESC_RATE1M;
575
576 if (rate_idx >= DESC_RATEMCS0 || rate_order == 0)
577 rate_order++;
578
579 return GENMASK(rate_order + RRSR_RATE_ORDER_CCK_LEN - 1, 0);
580}
581
582static void rtw_phy_rrsr_mask_min_iter(void *data, struct ieee80211_sta *sta)
583{
584 struct rtw_dev *rtwdev = (struct rtw_dev *)data;
585 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
586 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
587 u32 mask = 0;
588
589 mask = rtw_phy_get_rrsr_mask(rtwdev, si->ra_report.desc_rate);
590 if (mask < dm_info->rrsr_mask_min)
591 dm_info->rrsr_mask_min = mask;
592}
593
594static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
595{
596 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
597
598 dm_info->rrsr_mask_min = RRSR_RATE_ORDER_MAX;
599 rtw_iterate_stas_atomic(rtwdev, rtw_phy_rrsr_mask_min_iter, rtwdev);
600 rtw_write32(rtwdev, REG_RRSR, dm_info->rrsr_val_init & dm_info->rrsr_mask_min);
601}
602
603static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
604{
605 struct rtw_chip_info *chip = rtwdev->chip;
606
607 if (chip->ops->dpk_track)
608 chip->ops->dpk_track(rtwdev);
609}
610
611struct rtw_rx_addr_match_data {
612 struct rtw_dev *rtwdev;
613 struct ieee80211_hdr *hdr;
614 struct rtw_rx_pkt_stat *pkt_stat;
615 u8 *bssid;
616};
617
618static void rtw_phy_parsing_cfo_iter(void *data, u8 *mac,
619 struct ieee80211_vif *vif)
620{
621 struct rtw_rx_addr_match_data *iter_data = data;
622 struct rtw_dev *rtwdev = iter_data->rtwdev;
623 struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
624 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
625 struct rtw_cfo_track *cfo = &dm_info->cfo_track;
626 u8 *bssid = iter_data->bssid;
627 u8 i;
628
629 if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
630 return;
631
632 for (i = 0; i < rtwdev->hal.rf_path_num; i++) {
633 cfo->cfo_tail[i] += pkt_stat->cfo_tail[i];
634 cfo->cfo_cnt[i]++;
635 }
636
637 cfo->packet_count++;
638}
639
640void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
641 struct rtw_rx_pkt_stat *pkt_stat)
642{
643 struct ieee80211_hdr *hdr = pkt_stat->hdr;
644 struct rtw_rx_addr_match_data data = {};
645
646 if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
647 ieee80211_is_ctl(hdr->frame_control))
648 return;
649
650 data.rtwdev = rtwdev;
651 data.hdr = hdr;
652 data.pkt_stat = pkt_stat;
653 data.bssid = get_hdr_bssid(hdr);
654
655 rtw_iterate_vifs_atomic(rtwdev, rtw_phy_parsing_cfo_iter, &data);
656}
657EXPORT_SYMBOL(rtw_phy_parsing_cfo);
658
659static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
660{
661 struct rtw_chip_info *chip = rtwdev->chip;
662
663 if (chip->ops->cfo_track)
664 chip->ops->cfo_track(rtwdev);
665}
666
667#define CCK_PD_FA_LV1_MIN 1000
668#define CCK_PD_FA_LV0_MAX 500
669
670static u8 rtw_phy_cck_pd_lv_unlink(struct rtw_dev *rtwdev)
671{
672 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
673 u32 cck_fa_avg = dm_info->cck_fa_avg;
674
675 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
676 return CCK_PD_LV1;
677
678 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
679 return CCK_PD_LV0;
680
681 return CCK_PD_LV_MAX;
682}
683
684#define CCK_PD_IGI_LV4_VAL 0x38
685#define CCK_PD_IGI_LV3_VAL 0x2a
686#define CCK_PD_IGI_LV2_VAL 0x24
687#define CCK_PD_RSSI_LV4_VAL 32
688#define CCK_PD_RSSI_LV3_VAL 32
689#define CCK_PD_RSSI_LV2_VAL 24
690
691static u8 rtw_phy_cck_pd_lv_link(struct rtw_dev *rtwdev)
692{
693 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
694 u8 igi = dm_info->igi_history[0];
695 u8 rssi = dm_info->min_rssi;
696 u32 cck_fa_avg = dm_info->cck_fa_avg;
697
698 if (igi > CCK_PD_IGI_LV4_VAL && rssi > CCK_PD_RSSI_LV4_VAL)
699 return CCK_PD_LV4;
700 if (igi > CCK_PD_IGI_LV3_VAL && rssi > CCK_PD_RSSI_LV3_VAL)
701 return CCK_PD_LV3;
702 if (igi > CCK_PD_IGI_LV2_VAL || rssi > CCK_PD_RSSI_LV2_VAL)
703 return CCK_PD_LV2;
704 if (cck_fa_avg > CCK_PD_FA_LV1_MIN)
705 return CCK_PD_LV1;
706 if (cck_fa_avg < CCK_PD_FA_LV0_MAX)
707 return CCK_PD_LV0;
708
709 return CCK_PD_LV_MAX;
710}
711
712static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
713{
714 if (!rtw_is_assoc(rtwdev))
715 return rtw_phy_cck_pd_lv_unlink(rtwdev);
716 else
717 return rtw_phy_cck_pd_lv_link(rtwdev);
718}
719
720static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
721{
722 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
723 struct rtw_chip_info *chip = rtwdev->chip;
724 u32 cck_fa = dm_info->cck_fa_cnt;
725 u8 level;
726
727 if (rtwdev->hal.current_band_type != RTW_BAND_2G)
728 return;
729
730 if (dm_info->cck_fa_avg == CCK_FA_AVG_RESET)
731 dm_info->cck_fa_avg = cck_fa;
732 else
733 dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2;
734
735 rtw_dbg(rtwdev, RTW_DBG_PHY, "IGI=0x%x, rssi_min=%d, cck_fa=%d\n",
736 dm_info->igi_history[0], dm_info->min_rssi,
737 dm_info->fa_history[0]);
738 rtw_dbg(rtwdev, RTW_DBG_PHY, "cck_fa_avg=%d, cck_pd_default=%d\n",
739 dm_info->cck_fa_avg, dm_info->cck_pd_default);
740
741 level = rtw_phy_cck_pd_lv(rtwdev);
742
743 if (level >= CCK_PD_LV_MAX)
744 return;
745
746 if (chip->ops->cck_pd_set)
747 chip->ops->cck_pd_set(rtwdev, level);
748}
749
750static void rtw_phy_pwr_track(struct rtw_dev *rtwdev)
751{
752 rtwdev->chip->ops->pwr_track(rtwdev);
753}
754
755static void rtw_phy_ra_track(struct rtw_dev *rtwdev)
756{
757 rtw_fw_update_wl_phy_info(rtwdev);
758 rtw_phy_ra_info_update(rtwdev);
759 rtw_phy_rrsr_update(rtwdev);
760}
761
762void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
763{
764
765 rtw_phy_statistics(rtwdev);
766 rtw_phy_dig(rtwdev);
767 rtw_phy_cck_pd(rtwdev);
768 rtw_phy_ra_track(rtwdev);
769 rtw_phy_tx_path_diversity(rtwdev);
770 rtw_phy_cfo_track(rtwdev);
771 rtw_phy_dpk_track(rtwdev);
772 rtw_phy_pwr_track(rtwdev);
773
774 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY))
775 rtw_fw_adaptivity(rtwdev);
776 else
777 rtw_phy_adaptivity(rtwdev);
778}
779
780#define FRAC_BITS 3
781
782static u8 rtw_phy_power_2_db(s8 power)
783{
784 if (power <= -100 || power >= 20)
785 return 0;
786 else if (power >= 0)
787 return 100;
788 else
789 return 100 + power;
790}
791
792static u64 rtw_phy_db_2_linear(u8 power_db)
793{
794 u8 i, j;
795 u64 linear;
796
797 if (power_db > 96)
798 power_db = 96;
799 else if (power_db < 1)
800 return 1;
801
802
803 i = (power_db - 1) >> 3;
804 j = (power_db - 1) - (i << 3);
805
806 linear = db_invert_table[i][j];
807 linear = i > 2 ? linear << FRAC_BITS : linear;
808
809 return linear;
810}
811
812static u8 rtw_phy_linear_2_db(u64 linear)
813{
814 u8 i;
815 u8 j;
816 u32 dB;
817
818 if (linear >= db_invert_table[11][7])
819 return 96;
820
821 for (i = 0; i < 12; i++) {
822 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
823 break;
824 else if (i > 2 && linear <= db_invert_table[i][7])
825 break;
826 }
827
828 for (j = 0; j < 8; j++) {
829 if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
830 break;
831 else if (i > 2 && linear <= db_invert_table[i][j])
832 break;
833 }
834
835 if (j == 0 && i == 0)
836 goto end;
837
838 if (j == 0) {
839 if (i != 3) {
840 if (db_invert_table[i][0] - linear >
841 linear - db_invert_table[i - 1][7]) {
842 i = i - 1;
843 j = 7;
844 }
845 } else {
846 if (db_invert_table[3][0] - linear >
847 linear - db_invert_table[2][7]) {
848 i = 2;
849 j = 7;
850 }
851 }
852 } else {
853 if (db_invert_table[i][j] - linear >
854 linear - db_invert_table[i][j - 1]) {
855 j = j - 1;
856 }
857 }
858end:
859 dB = (i << 3) + j + 1;
860
861 return dB;
862}
863
864u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
865{
866 s8 power;
867 u8 power_db;
868 u64 linear;
869 u64 sum = 0;
870 u8 path;
871
872 for (path = 0; path < path_num; path++) {
873 power = rf_power[path];
874 power_db = rtw_phy_power_2_db(power);
875 linear = rtw_phy_db_2_linear(power_db);
876 sum += linear;
877 }
878
879 sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
880 switch (path_num) {
881 case 2:
882 sum >>= 1;
883 break;
884 case 3:
885 sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
886 break;
887 case 4:
888 sum >>= 2;
889 break;
890 default:
891 break;
892 }
893
894 return rtw_phy_linear_2_db(sum);
895}
896EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi);
897
898u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
899 u32 addr, u32 mask)
900{
901 struct rtw_hal *hal = &rtwdev->hal;
902 struct rtw_chip_info *chip = rtwdev->chip;
903 const u32 *base_addr = chip->rf_base_addr;
904 u32 val, direct_addr;
905
906 if (rf_path >= hal->rf_phy_num) {
907 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
908 return INV_RF_DATA;
909 }
910
911 addr &= 0xff;
912 direct_addr = base_addr[rf_path] + (addr << 2);
913 mask &= RFREG_MASK;
914
915 val = rtw_read32_mask(rtwdev, direct_addr, mask);
916
917 return val;
918}
919EXPORT_SYMBOL(rtw_phy_read_rf);
920
921u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
922 u32 addr, u32 mask)
923{
924 struct rtw_hal *hal = &rtwdev->hal;
925 struct rtw_chip_info *chip = rtwdev->chip;
926 const struct rtw_rf_sipi_addr *rf_sipi_addr;
927 const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
928 u32 val32;
929 u32 en_pi;
930 u32 r_addr;
931 u32 shift;
932
933 if (rf_path >= hal->rf_phy_num) {
934 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
935 return INV_RF_DATA;
936 }
937
938 if (!chip->rf_sipi_read_addr) {
939 rtw_err(rtwdev, "rf_sipi_read_addr isn't defined\n");
940 return INV_RF_DATA;
941 }
942
943 rf_sipi_addr = &chip->rf_sipi_read_addr[rf_path];
944 rf_sipi_addr_a = &chip->rf_sipi_read_addr[RF_PATH_A];
945
946 addr &= 0xff;
947
948 val32 = rtw_read32(rtwdev, rf_sipi_addr->hssi_2);
949 val32 = (val32 & ~LSSI_READ_ADDR_MASK) | (addr << 23);
950 rtw_write32(rtwdev, rf_sipi_addr->hssi_2, val32);
951
952
953 val32 = rtw_read32(rtwdev, rf_sipi_addr_a->hssi_2);
954 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 & ~LSSI_READ_EDGE_MASK);
955 rtw_write32(rtwdev, rf_sipi_addr_a->hssi_2, val32 | LSSI_READ_EDGE_MASK);
956
957 udelay(120);
958
959 en_pi = rtw_read32_mask(rtwdev, rf_sipi_addr->hssi_1, BIT(8));
960 r_addr = en_pi ? rf_sipi_addr->lssi_read_pi : rf_sipi_addr->lssi_read;
961
962 val32 = rtw_read32_mask(rtwdev, r_addr, LSSI_READ_DATA_MASK);
963
964 shift = __ffs(mask);
965
966 return (val32 & mask) >> shift;
967}
968EXPORT_SYMBOL(rtw_phy_read_rf_sipi);
969
970bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
971 u32 addr, u32 mask, u32 data)
972{
973 struct rtw_hal *hal = &rtwdev->hal;
974 struct rtw_chip_info *chip = rtwdev->chip;
975 u32 *sipi_addr = chip->rf_sipi_addr;
976 u32 data_and_addr;
977 u32 old_data = 0;
978 u32 shift;
979
980 if (rf_path >= hal->rf_phy_num) {
981 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
982 return false;
983 }
984
985 addr &= 0xff;
986 mask &= RFREG_MASK;
987
988 if (mask != RFREG_MASK) {
989 old_data = chip->ops->read_rf(rtwdev, rf_path, addr, RFREG_MASK);
990
991 if (old_data == INV_RF_DATA) {
992 rtw_err(rtwdev, "Write fail, rf is disabled\n");
993 return false;
994 }
995
996 shift = __ffs(mask);
997 data = ((old_data) & (~mask)) | (data << shift);
998 }
999
1000 data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
1001
1002 rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
1003
1004 udelay(13);
1005
1006 return true;
1007}
1008EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi);
1009
1010bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
1011 u32 addr, u32 mask, u32 data)
1012{
1013 struct rtw_hal *hal = &rtwdev->hal;
1014 struct rtw_chip_info *chip = rtwdev->chip;
1015 const u32 *base_addr = chip->rf_base_addr;
1016 u32 direct_addr;
1017
1018 if (rf_path >= hal->rf_phy_num) {
1019 rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1020 return false;
1021 }
1022
1023 addr &= 0xff;
1024 direct_addr = base_addr[rf_path] + (addr << 2);
1025 mask &= RFREG_MASK;
1026
1027 rtw_write32_mask(rtwdev, direct_addr, mask, data);
1028
1029 udelay(1);
1030
1031 return true;
1032}
1033
1034bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
1035 u32 addr, u32 mask, u32 data)
1036{
1037 if (addr != 0x00)
1038 return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
1039
1040 return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
1041}
1042EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix);
1043
1044void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
1045{
1046 struct rtw_hal *hal = &rtwdev->hal;
1047 struct rtw_efuse *efuse = &rtwdev->efuse;
1048 struct rtw_phy_cond cond = {0};
1049
1050 cond.cut = hal->cut_version ? hal->cut_version : 15;
1051 cond.pkg = pkg ? pkg : 15;
1052 cond.plat = 0x04;
1053 cond.rfe = efuse->rfe_option;
1054
1055 switch (rtw_hci_type(rtwdev)) {
1056 case RTW_HCI_TYPE_USB:
1057 cond.intf = INTF_USB;
1058 break;
1059 case RTW_HCI_TYPE_SDIO:
1060 cond.intf = INTF_SDIO;
1061 break;
1062 case RTW_HCI_TYPE_PCIE:
1063 default:
1064 cond.intf = INTF_PCIE;
1065 break;
1066 }
1067
1068 hal->phy_cond = cond;
1069
1070 rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
1071}
1072
1073static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
1074{
1075 struct rtw_hal *hal = &rtwdev->hal;
1076 struct rtw_phy_cond drv_cond = hal->phy_cond;
1077
1078 if (cond.cut && cond.cut != drv_cond.cut)
1079 return false;
1080
1081 if (cond.pkg && cond.pkg != drv_cond.pkg)
1082 return false;
1083
1084 if (cond.intf && cond.intf != drv_cond.intf)
1085 return false;
1086
1087 if (cond.rfe != drv_cond.rfe)
1088 return false;
1089
1090 return true;
1091}
1092
1093void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
1094{
1095 const union phy_table_tile *p = tbl->data;
1096 const union phy_table_tile *end = p + tbl->size / 2;
1097 struct rtw_phy_cond pos_cond = {0};
1098 bool is_matched = true, is_skipped = false;
1099
1100 BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
1101
1102 for (; p < end; p++) {
1103 if (p->cond.pos) {
1104 switch (p->cond.branch) {
1105 case BRANCH_ENDIF:
1106 is_matched = true;
1107 is_skipped = false;
1108 break;
1109 case BRANCH_ELSE:
1110 is_matched = is_skipped ? false : true;
1111 break;
1112 case BRANCH_IF:
1113 case BRANCH_ELIF:
1114 default:
1115 pos_cond = p->cond;
1116 break;
1117 }
1118 } else if (p->cond.neg) {
1119 if (!is_skipped) {
1120 if (check_positive(rtwdev, pos_cond)) {
1121 is_matched = true;
1122 is_skipped = true;
1123 } else {
1124 is_matched = false;
1125 is_skipped = false;
1126 }
1127 } else {
1128 is_matched = false;
1129 }
1130 } else if (is_matched) {
1131 (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
1132 }
1133 }
1134}
1135EXPORT_SYMBOL(rtw_parse_tbl_phy_cond);
1136
1137#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
1138
1139static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
1140{
1141 if (rtwdev->chip->is_pwr_by_rate_dec)
1142 return bcd_to_dec_pwr_by_rate(hex, i);
1143
1144 return (hex >> (i * 8)) & 0xFF;
1145}
1146
1147static void
1148rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
1149 u32 addr, u32 mask, u32 val, u8 *rate,
1150 u8 *pwr_by_rate, u8 *rate_num)
1151{
1152 int i;
1153
1154 switch (addr) {
1155 case 0xE00:
1156 case 0x830:
1157 rate[0] = DESC_RATE6M;
1158 rate[1] = DESC_RATE9M;
1159 rate[2] = DESC_RATE12M;
1160 rate[3] = DESC_RATE18M;
1161 for (i = 0; i < 4; ++i)
1162 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1163 *rate_num = 4;
1164 break;
1165 case 0xE04:
1166 case 0x834:
1167 rate[0] = DESC_RATE24M;
1168 rate[1] = DESC_RATE36M;
1169 rate[2] = DESC_RATE48M;
1170 rate[3] = DESC_RATE54M;
1171 for (i = 0; i < 4; ++i)
1172 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1173 *rate_num = 4;
1174 break;
1175 case 0xE08:
1176 rate[0] = DESC_RATE1M;
1177 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
1178 *rate_num = 1;
1179 break;
1180 case 0x86C:
1181 if (mask == 0xffffff00) {
1182 rate[0] = DESC_RATE2M;
1183 rate[1] = DESC_RATE5_5M;
1184 rate[2] = DESC_RATE11M;
1185 for (i = 1; i < 4; ++i)
1186 pwr_by_rate[i - 1] =
1187 tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1188 *rate_num = 3;
1189 } else if (mask == 0x000000ff) {
1190 rate[0] = DESC_RATE11M;
1191 pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
1192 *rate_num = 1;
1193 }
1194 break;
1195 case 0xE10:
1196 case 0x83C:
1197 rate[0] = DESC_RATEMCS0;
1198 rate[1] = DESC_RATEMCS1;
1199 rate[2] = DESC_RATEMCS2;
1200 rate[3] = DESC_RATEMCS3;
1201 for (i = 0; i < 4; ++i)
1202 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1203 *rate_num = 4;
1204 break;
1205 case 0xE14:
1206 case 0x848:
1207 rate[0] = DESC_RATEMCS4;
1208 rate[1] = DESC_RATEMCS5;
1209 rate[2] = DESC_RATEMCS6;
1210 rate[3] = DESC_RATEMCS7;
1211 for (i = 0; i < 4; ++i)
1212 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1213 *rate_num = 4;
1214 break;
1215 case 0xE18:
1216 case 0x84C:
1217 rate[0] = DESC_RATEMCS8;
1218 rate[1] = DESC_RATEMCS9;
1219 rate[2] = DESC_RATEMCS10;
1220 rate[3] = DESC_RATEMCS11;
1221 for (i = 0; i < 4; ++i)
1222 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1223 *rate_num = 4;
1224 break;
1225 case 0xE1C:
1226 case 0x868:
1227 rate[0] = DESC_RATEMCS12;
1228 rate[1] = DESC_RATEMCS13;
1229 rate[2] = DESC_RATEMCS14;
1230 rate[3] = DESC_RATEMCS15;
1231 for (i = 0; i < 4; ++i)
1232 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1233 *rate_num = 4;
1234 break;
1235 case 0x838:
1236 rate[0] = DESC_RATE1M;
1237 rate[1] = DESC_RATE2M;
1238 rate[2] = DESC_RATE5_5M;
1239 for (i = 1; i < 4; ++i)
1240 pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
1241 val, i);
1242 *rate_num = 3;
1243 break;
1244 case 0xC20:
1245 case 0xE20:
1246 case 0x1820:
1247 case 0x1A20:
1248 rate[0] = DESC_RATE1M;
1249 rate[1] = DESC_RATE2M;
1250 rate[2] = DESC_RATE5_5M;
1251 rate[3] = DESC_RATE11M;
1252 for (i = 0; i < 4; ++i)
1253 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1254 *rate_num = 4;
1255 break;
1256 case 0xC24:
1257 case 0xE24:
1258 case 0x1824:
1259 case 0x1A24:
1260 rate[0] = DESC_RATE6M;
1261 rate[1] = DESC_RATE9M;
1262 rate[2] = DESC_RATE12M;
1263 rate[3] = DESC_RATE18M;
1264 for (i = 0; i < 4; ++i)
1265 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1266 *rate_num = 4;
1267 break;
1268 case 0xC28:
1269 case 0xE28:
1270 case 0x1828:
1271 case 0x1A28:
1272 rate[0] = DESC_RATE24M;
1273 rate[1] = DESC_RATE36M;
1274 rate[2] = DESC_RATE48M;
1275 rate[3] = DESC_RATE54M;
1276 for (i = 0; i < 4; ++i)
1277 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1278 *rate_num = 4;
1279 break;
1280 case 0xC2C:
1281 case 0xE2C:
1282 case 0x182C:
1283 case 0x1A2C:
1284 rate[0] = DESC_RATEMCS0;
1285 rate[1] = DESC_RATEMCS1;
1286 rate[2] = DESC_RATEMCS2;
1287 rate[3] = DESC_RATEMCS3;
1288 for (i = 0; i < 4; ++i)
1289 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1290 *rate_num = 4;
1291 break;
1292 case 0xC30:
1293 case 0xE30:
1294 case 0x1830:
1295 case 0x1A30:
1296 rate[0] = DESC_RATEMCS4;
1297 rate[1] = DESC_RATEMCS5;
1298 rate[2] = DESC_RATEMCS6;
1299 rate[3] = DESC_RATEMCS7;
1300 for (i = 0; i < 4; ++i)
1301 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1302 *rate_num = 4;
1303 break;
1304 case 0xC34:
1305 case 0xE34:
1306 case 0x1834:
1307 case 0x1A34:
1308 rate[0] = DESC_RATEMCS8;
1309 rate[1] = DESC_RATEMCS9;
1310 rate[2] = DESC_RATEMCS10;
1311 rate[3] = DESC_RATEMCS11;
1312 for (i = 0; i < 4; ++i)
1313 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1314 *rate_num = 4;
1315 break;
1316 case 0xC38:
1317 case 0xE38:
1318 case 0x1838:
1319 case 0x1A38:
1320 rate[0] = DESC_RATEMCS12;
1321 rate[1] = DESC_RATEMCS13;
1322 rate[2] = DESC_RATEMCS14;
1323 rate[3] = DESC_RATEMCS15;
1324 for (i = 0; i < 4; ++i)
1325 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1326 *rate_num = 4;
1327 break;
1328 case 0xC3C:
1329 case 0xE3C:
1330 case 0x183C:
1331 case 0x1A3C:
1332 rate[0] = DESC_RATEVHT1SS_MCS0;
1333 rate[1] = DESC_RATEVHT1SS_MCS1;
1334 rate[2] = DESC_RATEVHT1SS_MCS2;
1335 rate[3] = DESC_RATEVHT1SS_MCS3;
1336 for (i = 0; i < 4; ++i)
1337 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1338 *rate_num = 4;
1339 break;
1340 case 0xC40:
1341 case 0xE40:
1342 case 0x1840:
1343 case 0x1A40:
1344 rate[0] = DESC_RATEVHT1SS_MCS4;
1345 rate[1] = DESC_RATEVHT1SS_MCS5;
1346 rate[2] = DESC_RATEVHT1SS_MCS6;
1347 rate[3] = DESC_RATEVHT1SS_MCS7;
1348 for (i = 0; i < 4; ++i)
1349 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1350 *rate_num = 4;
1351 break;
1352 case 0xC44:
1353 case 0xE44:
1354 case 0x1844:
1355 case 0x1A44:
1356 rate[0] = DESC_RATEVHT1SS_MCS8;
1357 rate[1] = DESC_RATEVHT1SS_MCS9;
1358 rate[2] = DESC_RATEVHT2SS_MCS0;
1359 rate[3] = DESC_RATEVHT2SS_MCS1;
1360 for (i = 0; i < 4; ++i)
1361 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1362 *rate_num = 4;
1363 break;
1364 case 0xC48:
1365 case 0xE48:
1366 case 0x1848:
1367 case 0x1A48:
1368 rate[0] = DESC_RATEVHT2SS_MCS2;
1369 rate[1] = DESC_RATEVHT2SS_MCS3;
1370 rate[2] = DESC_RATEVHT2SS_MCS4;
1371 rate[3] = DESC_RATEVHT2SS_MCS5;
1372 for (i = 0; i < 4; ++i)
1373 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1374 *rate_num = 4;
1375 break;
1376 case 0xC4C:
1377 case 0xE4C:
1378 case 0x184C:
1379 case 0x1A4C:
1380 rate[0] = DESC_RATEVHT2SS_MCS6;
1381 rate[1] = DESC_RATEVHT2SS_MCS7;
1382 rate[2] = DESC_RATEVHT2SS_MCS8;
1383 rate[3] = DESC_RATEVHT2SS_MCS9;
1384 for (i = 0; i < 4; ++i)
1385 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1386 *rate_num = 4;
1387 break;
1388 case 0xCD8:
1389 case 0xED8:
1390 case 0x18D8:
1391 case 0x1AD8:
1392 rate[0] = DESC_RATEMCS16;
1393 rate[1] = DESC_RATEMCS17;
1394 rate[2] = DESC_RATEMCS18;
1395 rate[3] = DESC_RATEMCS19;
1396 for (i = 0; i < 4; ++i)
1397 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1398 *rate_num = 4;
1399 break;
1400 case 0xCDC:
1401 case 0xEDC:
1402 case 0x18DC:
1403 case 0x1ADC:
1404 rate[0] = DESC_RATEMCS20;
1405 rate[1] = DESC_RATEMCS21;
1406 rate[2] = DESC_RATEMCS22;
1407 rate[3] = DESC_RATEMCS23;
1408 for (i = 0; i < 4; ++i)
1409 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1410 *rate_num = 4;
1411 break;
1412 case 0xCE0:
1413 case 0xEE0:
1414 case 0x18E0:
1415 case 0x1AE0:
1416 rate[0] = DESC_RATEVHT3SS_MCS0;
1417 rate[1] = DESC_RATEVHT3SS_MCS1;
1418 rate[2] = DESC_RATEVHT3SS_MCS2;
1419 rate[3] = DESC_RATEVHT3SS_MCS3;
1420 for (i = 0; i < 4; ++i)
1421 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1422 *rate_num = 4;
1423 break;
1424 case 0xCE4:
1425 case 0xEE4:
1426 case 0x18E4:
1427 case 0x1AE4:
1428 rate[0] = DESC_RATEVHT3SS_MCS4;
1429 rate[1] = DESC_RATEVHT3SS_MCS5;
1430 rate[2] = DESC_RATEVHT3SS_MCS6;
1431 rate[3] = DESC_RATEVHT3SS_MCS7;
1432 for (i = 0; i < 4; ++i)
1433 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1434 *rate_num = 4;
1435 break;
1436 case 0xCE8:
1437 case 0xEE8:
1438 case 0x18E8:
1439 case 0x1AE8:
1440 rate[0] = DESC_RATEVHT3SS_MCS8;
1441 rate[1] = DESC_RATEVHT3SS_MCS9;
1442 for (i = 0; i < 2; ++i)
1443 pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1444 *rate_num = 2;
1445 break;
1446 default:
1447 rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
1448 break;
1449 }
1450}
1451
1452static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev,
1453 u32 band, u32 rfpath, u32 txnum,
1454 u32 regaddr, u32 bitmask, u32 data)
1455{
1456 struct rtw_hal *hal = &rtwdev->hal;
1457 u8 rate_num = 0;
1458 u8 rate;
1459 u8 rates[RTW_RF_PATH_MAX] = {0};
1460 s8 offset;
1461 s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
1462 int i;
1463
1464 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
1465 rates, pwr_by_rate, &rate_num);
1466
1467 if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
1468 (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
1469 rate_num > RTW_RF_PATH_MAX))
1470 return;
1471
1472 for (i = 0; i < rate_num; i++) {
1473 offset = pwr_by_rate[i];
1474 rate = rates[i];
1475 if (band == PHY_BAND_2G)
1476 hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
1477 else if (band == PHY_BAND_5G)
1478 hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
1479 else
1480 continue;
1481 }
1482}
1483
1484void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
1485{
1486 const struct rtw_phy_pg_cfg_pair *p = tbl->data;
1487 const struct rtw_phy_pg_cfg_pair *end = p + tbl->size;
1488
1489 for (; p < end; p++) {
1490 if (p->addr == 0xfe || p->addr == 0xffe) {
1491 msleep(50);
1492 continue;
1493 }
1494 rtw_phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
1495 p->tx_num, p->addr, p->bitmask,
1496 p->data);
1497 }
1498}
1499EXPORT_SYMBOL(rtw_parse_tbl_bb_pg);
1500
1501static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
1502 36, 38, 40, 42, 44, 46, 48,
1503 52, 54, 56, 58, 60, 62, 64,
1504 100, 102, 104, 106, 108, 110, 112,
1505 116, 118, 120, 122, 124, 126, 128,
1506 132, 134, 136, 138, 140, 142, 144,
1507 149, 151, 153, 155, 157, 159, 161,
1508 165, 167, 169, 171, 173, 175, 177};
1509
1510static int rtw_channel_to_idx(u8 band, u8 channel)
1511{
1512 int ch_idx;
1513 u8 n_channel;
1514
1515 if (band == PHY_BAND_2G) {
1516 ch_idx = channel - 1;
1517 n_channel = RTW_MAX_CHANNEL_NUM_2G;
1518 } else if (band == PHY_BAND_5G) {
1519 n_channel = RTW_MAX_CHANNEL_NUM_5G;
1520 for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
1521 if (rtw_channel_idx_5g[ch_idx] == channel)
1522 break;
1523 } else {
1524 return -1;
1525 }
1526
1527 if (ch_idx >= n_channel)
1528 return -1;
1529
1530 return ch_idx;
1531}
1532
1533static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
1534 u8 bw, u8 rs, u8 ch, s8 pwr_limit)
1535{
1536 struct rtw_hal *hal = &rtwdev->hal;
1537 u8 max_power_index = rtwdev->chip->max_power_index;
1538 s8 ww;
1539 int ch_idx;
1540
1541 pwr_limit = clamp_t(s8, pwr_limit,
1542 -max_power_index, max_power_index);
1543 ch_idx = rtw_channel_to_idx(band, ch);
1544
1545 if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
1546 rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
1547 WARN(1,
1548 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
1549 regd, band, bw, rs, ch_idx, pwr_limit);
1550 return;
1551 }
1552
1553 if (band == PHY_BAND_2G) {
1554 hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
1555 ww = hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx];
1556 ww = min_t(s8, ww, pwr_limit);
1557 hal->tx_pwr_limit_2g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1558 } else if (band == PHY_BAND_5G) {
1559 hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
1560 ww = hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx];
1561 ww = min_t(s8, ww, pwr_limit);
1562 hal->tx_pwr_limit_5g[RTW_REGD_WW][bw][rs][ch_idx] = ww;
1563 }
1564}
1565
1566
1567static void
1568rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
1569 u8 bw, u8 ch_idx, u8 rs_ht, u8 rs_vht)
1570{
1571 struct rtw_hal *hal = &rtwdev->hal;
1572 u8 max_power_index = rtwdev->chip->max_power_index;
1573 s8 lmt_ht = hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx];
1574 s8 lmt_vht = hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx];
1575
1576 if (lmt_ht == lmt_vht)
1577 return;
1578
1579 if (lmt_ht == max_power_index)
1580 hal->tx_pwr_limit_5g[regd][bw][rs_ht][ch_idx] = lmt_vht;
1581
1582 else if (lmt_vht == max_power_index)
1583 hal->tx_pwr_limit_5g[regd][bw][rs_vht][ch_idx] = lmt_ht;
1584}
1585
1586
1587static void
1588rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
1589{
1590 u8 rs_idx, rs_ht, rs_vht;
1591 u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
1592 {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
1593
1594 for (rs_idx = 0; rs_idx < 2; rs_idx++) {
1595 rs_ht = rs_cmp[rs_idx][0];
1596 rs_vht = rs_cmp[rs_idx][1];
1597
1598 rtw_xref_5g_txpwr_lmt(rtwdev, regd, bw, ch_idx, rs_ht, rs_vht);
1599 }
1600}
1601
1602
1603static void
1604rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev *rtwdev, u8 regd, u8 bw)
1605{
1606 u8 ch_idx;
1607
1608 for (ch_idx = 0; ch_idx < RTW_MAX_CHANNEL_NUM_5G; ch_idx++)
1609 rtw_xref_txpwr_lmt_by_rs(rtwdev, regd, bw, ch_idx);
1610}
1611
1612
1613static void
1614rtw_xref_txpwr_lmt_by_bw(struct rtw_dev *rtwdev, u8 regd)
1615{
1616 u8 bw;
1617
1618 for (bw = RTW_CHANNEL_WIDTH_20; bw <= RTW_CHANNEL_WIDTH_40; bw++)
1619 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev, regd, bw);
1620}
1621
1622
1623static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
1624{
1625 u8 regd;
1626
1627 for (regd = 0; regd < RTW_REGD_MAX; regd++)
1628 rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
1629}
1630
1631static void
1632__cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs)
1633{
1634 u8 ch;
1635
1636 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
1637 hal->tx_pwr_limit_2g[regd][bw][rs][ch] =
1638 hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch];
1639
1640 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
1641 hal->tx_pwr_limit_5g[regd][bw][rs][ch] =
1642 hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch];
1643}
1644
1645static void
1646rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
1647{
1648 u8 bw, rs;
1649
1650 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1651 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1652 __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
1653 bw, rs);
1654}
1655
1656void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
1657 const struct rtw_table *tbl)
1658{
1659 const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
1660 const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
1661 u32 regd_cfg_flag = 0;
1662 u8 regd_alt;
1663 u8 i;
1664
1665 for (; p < end; p++) {
1666 regd_cfg_flag |= BIT(p->regd);
1667 rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
1668 p->bw, p->rs, p->ch, p->txpwr_lmt);
1669 }
1670
1671 for (i = 0; i < RTW_REGD_MAX; i++) {
1672 if (i == RTW_REGD_WW)
1673 continue;
1674
1675 if (regd_cfg_flag & BIT(i))
1676 continue;
1677
1678 rtw_dbg(rtwdev, RTW_DBG_REGD,
1679 "txpwr regd %d does not be configured\n", i);
1680
1681 if (rtw_regd_has_alt(i, ®d_alt) &&
1682 regd_cfg_flag & BIT(regd_alt)) {
1683 rtw_dbg(rtwdev, RTW_DBG_REGD,
1684 "cfg txpwr regd %d by regd %d as alternative\n",
1685 i, regd_alt);
1686
1687 rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt);
1688 continue;
1689 }
1690
1691 rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i);
1692 rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW);
1693 }
1694
1695 rtw_xref_txpwr_lmt(rtwdev);
1696}
1697EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
1698
1699void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1700 u32 addr, u32 data)
1701{
1702 rtw_write8(rtwdev, addr, data);
1703}
1704EXPORT_SYMBOL(rtw_phy_cfg_mac);
1705
1706void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1707 u32 addr, u32 data)
1708{
1709 rtw_write32(rtwdev, addr, data);
1710}
1711EXPORT_SYMBOL(rtw_phy_cfg_agc);
1712
1713void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1714 u32 addr, u32 data)
1715{
1716 if (addr == 0xfe)
1717 msleep(50);
1718 else if (addr == 0xfd)
1719 mdelay(5);
1720 else if (addr == 0xfc)
1721 mdelay(1);
1722 else if (addr == 0xfb)
1723 usleep_range(50, 60);
1724 else if (addr == 0xfa)
1725 udelay(5);
1726 else if (addr == 0xf9)
1727 udelay(1);
1728 else
1729 rtw_write32(rtwdev, addr, data);
1730}
1731EXPORT_SYMBOL(rtw_phy_cfg_bb);
1732
1733void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
1734 u32 addr, u32 data)
1735{
1736 if (addr == 0xffe) {
1737 msleep(50);
1738 } else if (addr == 0xfe) {
1739 usleep_range(100, 110);
1740 } else {
1741 rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
1742 udelay(1);
1743 }
1744}
1745EXPORT_SYMBOL(rtw_phy_cfg_rf);
1746
1747static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
1748{
1749 struct rtw_chip_info *chip = rtwdev->chip;
1750 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
1751
1752 if (!chip->rfk_init_tbl)
1753 return;
1754
1755 rtw_write32_mask(rtwdev, 0x1e24, BIT(17), 0x1);
1756 rtw_write32_mask(rtwdev, 0x1cd0, BIT(28), 0x1);
1757 rtw_write32_mask(rtwdev, 0x1cd0, BIT(29), 0x1);
1758 rtw_write32_mask(rtwdev, 0x1cd0, BIT(30), 0x1);
1759 rtw_write32_mask(rtwdev, 0x1cd0, BIT(31), 0x0);
1760
1761 rtw_load_table(rtwdev, chip->rfk_init_tbl);
1762
1763 dpk_info->is_dpk_pwr_on = true;
1764}
1765
1766void rtw_phy_load_tables(struct rtw_dev *rtwdev)
1767{
1768 struct rtw_chip_info *chip = rtwdev->chip;
1769 u8 rf_path;
1770
1771 rtw_load_table(rtwdev, chip->mac_tbl);
1772 rtw_load_table(rtwdev, chip->bb_tbl);
1773 rtw_load_table(rtwdev, chip->agc_tbl);
1774 rtw_load_rfk_table(rtwdev);
1775
1776 for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
1777 const struct rtw_table *tbl;
1778
1779 tbl = chip->rf_tbl[rf_path];
1780 rtw_load_table(rtwdev, tbl);
1781 }
1782}
1783EXPORT_SYMBOL(rtw_phy_load_tables);
1784
1785static u8 rtw_get_channel_group(u8 channel, u8 rate)
1786{
1787 switch (channel) {
1788 default:
1789 WARN_ON(1);
1790 fallthrough;
1791 case 1:
1792 case 2:
1793 case 36:
1794 case 38:
1795 case 40:
1796 case 42:
1797 return 0;
1798 case 3:
1799 case 4:
1800 case 5:
1801 case 44:
1802 case 46:
1803 case 48:
1804 case 50:
1805 return 1;
1806 case 6:
1807 case 7:
1808 case 8:
1809 case 52:
1810 case 54:
1811 case 56:
1812 case 58:
1813 return 2;
1814 case 9:
1815 case 10:
1816 case 11:
1817 case 60:
1818 case 62:
1819 case 64:
1820 return 3;
1821 case 12:
1822 case 13:
1823 case 100:
1824 case 102:
1825 case 104:
1826 case 106:
1827 return 4;
1828 case 14:
1829 return rate <= DESC_RATE11M ? 5 : 4;
1830 case 108:
1831 case 110:
1832 case 112:
1833 case 114:
1834 return 5;
1835 case 116:
1836 case 118:
1837 case 120:
1838 case 122:
1839 return 6;
1840 case 124:
1841 case 126:
1842 case 128:
1843 case 130:
1844 return 7;
1845 case 132:
1846 case 134:
1847 case 136:
1848 case 138:
1849 return 8;
1850 case 140:
1851 case 142:
1852 case 144:
1853 return 9;
1854 case 149:
1855 case 151:
1856 case 153:
1857 case 155:
1858 return 10;
1859 case 157:
1860 case 159:
1861 case 161:
1862 return 11;
1863 case 165:
1864 case 167:
1865 case 169:
1866 case 171:
1867 return 12;
1868 case 173:
1869 case 175:
1870 case 177:
1871 return 13;
1872 }
1873}
1874
1875static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
1876{
1877 struct rtw_chip_info *chip = rtwdev->chip;
1878 s8 dpd_diff = 0;
1879
1880 if (!chip->en_dis_dpd)
1881 return 0;
1882
1883#define RTW_DPD_RATE_CHECK(_rate) \
1884 case DESC_RATE ## _rate: \
1885 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \
1886 dpd_diff = -6 * chip->txgi_factor; \
1887 break
1888
1889 switch (rate) {
1890 RTW_DPD_RATE_CHECK(6M);
1891 RTW_DPD_RATE_CHECK(9M);
1892 RTW_DPD_RATE_CHECK(MCS0);
1893 RTW_DPD_RATE_CHECK(MCS1);
1894 RTW_DPD_RATE_CHECK(MCS8);
1895 RTW_DPD_RATE_CHECK(MCS9);
1896 RTW_DPD_RATE_CHECK(VHT1SS_MCS0);
1897 RTW_DPD_RATE_CHECK(VHT1SS_MCS1);
1898 RTW_DPD_RATE_CHECK(VHT2SS_MCS0);
1899 RTW_DPD_RATE_CHECK(VHT2SS_MCS1);
1900 }
1901#undef RTW_DPD_RATE_CHECK
1902
1903 return dpd_diff;
1904}
1905
1906static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
1907 struct rtw_2g_txpwr_idx *pwr_idx_2g,
1908 enum rtw_bandwidth bandwidth,
1909 u8 rate, u8 group)
1910{
1911 struct rtw_chip_info *chip = rtwdev->chip;
1912 u8 tx_power;
1913 bool mcs_rate;
1914 bool above_2ss;
1915 u8 factor = chip->txgi_factor;
1916
1917 if (rate <= DESC_RATE11M)
1918 tx_power = pwr_idx_2g->cck_base[group];
1919 else
1920 tx_power = pwr_idx_2g->bw40_base[group];
1921
1922 if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1923 tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
1924
1925 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1926 (rate >= DESC_RATEVHT1SS_MCS0 &&
1927 rate <= DESC_RATEVHT2SS_MCS9);
1928 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1929 (rate >= DESC_RATEVHT2SS_MCS0);
1930
1931 if (!mcs_rate)
1932 return tx_power;
1933
1934 switch (bandwidth) {
1935 default:
1936 WARN_ON(1);
1937 fallthrough;
1938 case RTW_CHANNEL_WIDTH_20:
1939 tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
1940 if (above_2ss)
1941 tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
1942 break;
1943 case RTW_CHANNEL_WIDTH_40:
1944
1945 if (above_2ss)
1946 tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
1947 break;
1948 }
1949
1950 return tx_power;
1951}
1952
1953static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
1954 struct rtw_5g_txpwr_idx *pwr_idx_5g,
1955 enum rtw_bandwidth bandwidth,
1956 u8 rate, u8 group)
1957{
1958 struct rtw_chip_info *chip = rtwdev->chip;
1959 u8 tx_power;
1960 u8 upper, lower;
1961 bool mcs_rate;
1962 bool above_2ss;
1963 u8 factor = chip->txgi_factor;
1964
1965 tx_power = pwr_idx_5g->bw40_base[group];
1966
1967 mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1968 (rate >= DESC_RATEVHT1SS_MCS0 &&
1969 rate <= DESC_RATEVHT2SS_MCS9);
1970 above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1971 (rate >= DESC_RATEVHT2SS_MCS0);
1972
1973 if (!mcs_rate) {
1974 tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
1975 return tx_power;
1976 }
1977
1978 switch (bandwidth) {
1979 default:
1980 WARN_ON(1);
1981 fallthrough;
1982 case RTW_CHANNEL_WIDTH_20:
1983 tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
1984 if (above_2ss)
1985 tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
1986 break;
1987 case RTW_CHANNEL_WIDTH_40:
1988
1989 if (above_2ss)
1990 tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
1991 break;
1992 case RTW_CHANNEL_WIDTH_80:
1993
1994 lower = pwr_idx_5g->bw40_base[group];
1995 upper = pwr_idx_5g->bw40_base[group + 1];
1996
1997 tx_power = (lower + upper) / 2;
1998 tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
1999 if (above_2ss)
2000 tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
2001 break;
2002 }
2003
2004 return tx_power;
2005}
2006
2007static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
2008 enum rtw_bandwidth bw, u8 rf_path,
2009 u8 rate, u8 channel, u8 regd)
2010{
2011 struct rtw_hal *hal = &rtwdev->hal;
2012 u8 *cch_by_bw = hal->cch_by_bw;
2013 s8 power_limit = (s8)rtwdev->chip->max_power_index;
2014 u8 rs;
2015 int ch_idx;
2016 u8 cur_bw, cur_ch;
2017 s8 cur_lmt;
2018
2019 if (regd > RTW_REGD_WW)
2020 return power_limit;
2021
2022 if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
2023 rs = RTW_RATE_SECTION_CCK;
2024 else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
2025 rs = RTW_RATE_SECTION_OFDM;
2026 else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
2027 rs = RTW_RATE_SECTION_HT_1S;
2028 else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
2029 rs = RTW_RATE_SECTION_HT_2S;
2030 else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
2031 rs = RTW_RATE_SECTION_VHT_1S;
2032 else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
2033 rs = RTW_RATE_SECTION_VHT_2S;
2034 else
2035 goto err;
2036
2037
2038 if (rs == RTW_RATE_SECTION_CCK || rs == RTW_RATE_SECTION_OFDM)
2039 bw = RTW_CHANNEL_WIDTH_20;
2040
2041
2042 if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
2043 bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
2044
2045
2046 for (cur_bw = RTW_CHANNEL_WIDTH_20; cur_bw <= bw; cur_bw++) {
2047 cur_ch = cch_by_bw[cur_bw];
2048
2049 ch_idx = rtw_channel_to_idx(band, cur_ch);
2050 if (ch_idx < 0)
2051 goto err;
2052
2053 cur_lmt = cur_ch <= RTW_MAX_CHANNEL_NUM_2G ?
2054 hal->tx_pwr_limit_2g[regd][cur_bw][rs][ch_idx] :
2055 hal->tx_pwr_limit_5g[regd][cur_bw][rs][ch_idx];
2056
2057 power_limit = min_t(s8, cur_lmt, power_limit);
2058 }
2059
2060 return power_limit;
2061
2062err:
2063 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
2064 band, bw, rf_path, rate, channel);
2065 return (s8)rtwdev->chip->max_power_index;
2066}
2067
2068void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
2069 u8 ch, u8 regd, struct rtw_power_params *pwr_param)
2070{
2071 struct rtw_hal *hal = &rtwdev->hal;
2072 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2073 struct rtw_txpwr_idx *pwr_idx;
2074 u8 group, band;
2075 u8 *base = &pwr_param->pwr_base;
2076 s8 *offset = &pwr_param->pwr_offset;
2077 s8 *limit = &pwr_param->pwr_limit;
2078 s8 *remnant = &pwr_param->pwr_remnant;
2079
2080 pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
2081 group = rtw_get_channel_group(ch, rate);
2082
2083
2084 if (IS_CH_2G_BAND(ch)) {
2085 band = PHY_BAND_2G;
2086 *base = rtw_phy_get_2g_tx_power_index(rtwdev,
2087 &pwr_idx->pwr_idx_2g,
2088 bw, rate, group);
2089 *offset = hal->tx_pwr_by_rate_offset_2g[path][rate];
2090 } else {
2091 band = PHY_BAND_5G;
2092 *base = rtw_phy_get_5g_tx_power_index(rtwdev,
2093 &pwr_idx->pwr_idx_5g,
2094 bw, rate, group);
2095 *offset = hal->tx_pwr_by_rate_offset_5g[path][rate];
2096 }
2097
2098 *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path,
2099 rate, ch, regd);
2100 *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck :
2101 dm_info->txagc_remnant_ofdm);
2102}
2103
2104u8
2105rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate,
2106 enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
2107{
2108 struct rtw_power_params pwr_param = {0};
2109 u8 tx_power;
2110 s8 offset;
2111
2112 rtw_get_tx_power_params(rtwdev, rf_path, rate, bandwidth,
2113 channel, regd, &pwr_param);
2114
2115 tx_power = pwr_param.pwr_base;
2116 offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit);
2117
2118 if (rtwdev->chip->en_dis_dpd)
2119 offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate);
2120
2121 tx_power += offset + pwr_param.pwr_remnant;
2122
2123 if (tx_power > rtwdev->chip->max_power_index)
2124 tx_power = rtwdev->chip->max_power_index;
2125
2126 return tx_power;
2127}
2128EXPORT_SYMBOL(rtw_phy_get_tx_power_index);
2129
2130static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
2131 u8 ch, u8 path, u8 rs)
2132{
2133 struct rtw_hal *hal = &rtwdev->hal;
2134 u8 regd = rtw_regd_get(rtwdev);
2135 u8 *rates;
2136 u8 size;
2137 u8 rate;
2138 u8 pwr_idx;
2139 u8 bw;
2140 int i;
2141
2142 if (rs >= RTW_RATE_SECTION_MAX)
2143 return;
2144
2145 rates = rtw_rate_section[rs];
2146 size = rtw_rate_size[rs];
2147 bw = hal->current_band_width;
2148 for (i = 0; i < size; i++) {
2149 rate = rates[i];
2150 pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, rate,
2151 bw, ch, regd);
2152 hal->tx_pwr_tbl[path][rate] = pwr_idx;
2153 }
2154}
2155
2156
2157
2158
2159
2160
2161static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
2162 u8 ch, u8 path)
2163{
2164 struct rtw_hal *hal = &rtwdev->hal;
2165 u8 rs;
2166
2167
2168 if (hal->current_band_type == RTW_BAND_2G)
2169 rs = RTW_RATE_SECTION_CCK;
2170 else
2171 rs = RTW_RATE_SECTION_OFDM;
2172
2173 for (; rs < RTW_RATE_SECTION_MAX; rs++)
2174 rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
2175}
2176
2177void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
2178{
2179 struct rtw_chip_info *chip = rtwdev->chip;
2180 struct rtw_hal *hal = &rtwdev->hal;
2181 u8 path;
2182
2183 mutex_lock(&hal->tx_power_mutex);
2184
2185 for (path = 0; path < hal->rf_path_num; path++)
2186 rtw_phy_set_tx_power_level_by_path(rtwdev, channel, path);
2187
2188 chip->ops->set_tx_power_index(rtwdev);
2189 mutex_unlock(&hal->tx_power_mutex);
2190}
2191EXPORT_SYMBOL(rtw_phy_set_tx_power_level);
2192
2193static void
2194rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
2195 u8 rs, u8 size, u8 *rates)
2196{
2197 u8 rate;
2198 u8 base_idx, rate_idx;
2199 s8 base_2g, base_5g;
2200
2201 if (rs >= RTW_RATE_SECTION_VHT_1S)
2202 base_idx = rates[size - 3];
2203 else
2204 base_idx = rates[size - 1];
2205 base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
2206 base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
2207 hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
2208 hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
2209 for (rate = 0; rate < size; rate++) {
2210 rate_idx = rates[rate];
2211 hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
2212 hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
2213 }
2214}
2215
2216void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
2217{
2218 u8 path;
2219
2220 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
2221 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2222 RTW_RATE_SECTION_CCK,
2223 rtw_cck_size, rtw_cck_rates);
2224 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2225 RTW_RATE_SECTION_OFDM,
2226 rtw_ofdm_size, rtw_ofdm_rates);
2227 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2228 RTW_RATE_SECTION_HT_1S,
2229 rtw_ht_1s_size, rtw_ht_1s_rates);
2230 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2231 RTW_RATE_SECTION_HT_2S,
2232 rtw_ht_2s_size, rtw_ht_2s_rates);
2233 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2234 RTW_RATE_SECTION_VHT_1S,
2235 rtw_vht_1s_size, rtw_vht_1s_rates);
2236 rtw_phy_tx_power_by_rate_config_by_path(hal, path,
2237 RTW_RATE_SECTION_VHT_2S,
2238 rtw_vht_2s_size, rtw_vht_2s_rates);
2239 }
2240}
2241
2242static void
2243__rtw_phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
2244{
2245 s8 base;
2246 u8 ch;
2247
2248 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
2249 base = hal->tx_pwr_by_rate_base_2g[0][rs];
2250 hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
2251 }
2252
2253 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
2254 base = hal->tx_pwr_by_rate_base_5g[0][rs];
2255 hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
2256 }
2257}
2258
2259void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
2260{
2261 u8 regd, bw, rs;
2262
2263
2264 hal->cch_by_bw[RTW_CHANNEL_WIDTH_20] = 1;
2265
2266 for (regd = 0; regd < RTW_REGD_MAX; regd++)
2267 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
2268 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
2269 __rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
2270}
2271
2272static void rtw_phy_init_tx_power_limit(struct rtw_dev *rtwdev,
2273 u8 regd, u8 bw, u8 rs)
2274{
2275 struct rtw_hal *hal = &rtwdev->hal;
2276 s8 max_power_index = (s8)rtwdev->chip->max_power_index;
2277 u8 ch;
2278
2279
2280 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
2281 hal->tx_pwr_limit_2g[regd][bw][rs][ch] = max_power_index;
2282
2283
2284 for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
2285 hal->tx_pwr_limit_5g[regd][bw][rs][ch] = max_power_index;
2286}
2287
2288void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
2289{
2290 struct rtw_hal *hal = &rtwdev->hal;
2291 u8 regd, path, rate, rs, bw;
2292
2293
2294 for (path = 0; path < RTW_RF_PATH_MAX; path++) {
2295 for (rate = 0; rate < DESC_RATE_MAX; rate++) {
2296 hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
2297 hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
2298 }
2299 }
2300
2301
2302 for (regd = 0; regd < RTW_REGD_MAX; regd++)
2303 for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
2304 for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
2305 rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
2306 rs);
2307}
2308
2309void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
2310 struct rtw_swing_table *swing_table)
2311{
2312 const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
2313 u8 channel = rtwdev->hal.current_channel;
2314
2315 if (IS_CH_2G_BAND(channel)) {
2316 if (rtwdev->dm_info.tx_rate <= DESC_RATE11M) {
2317 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2g_ccka_p;
2318 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
2319 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
2320 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
2321 } else {
2322 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
2323 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
2324 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
2325 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
2326 }
2327 } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
2328 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
2329 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
2330 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
2331 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
2332 } else if (IS_CH_5G_BAND_3(channel)) {
2333 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
2334 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
2335 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
2336 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
2337 } else if (IS_CH_5G_BAND_4(channel)) {
2338 swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
2339 swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
2340 swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
2341 swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
2342 } else {
2343 swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
2344 swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
2345 swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
2346 swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
2347 }
2348}
2349EXPORT_SYMBOL(rtw_phy_config_swing_table);
2350
2351void rtw_phy_pwrtrack_avg(struct rtw_dev *rtwdev, u8 thermal, u8 path)
2352{
2353 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2354
2355 ewma_thermal_add(&dm_info->avg_thermal[path], thermal);
2356 dm_info->thermal_avg[path] =
2357 ewma_thermal_read(&dm_info->avg_thermal[path]);
2358}
2359EXPORT_SYMBOL(rtw_phy_pwrtrack_avg);
2360
2361bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev *rtwdev, u8 thermal,
2362 u8 path)
2363{
2364 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2365 u8 avg = ewma_thermal_read(&dm_info->avg_thermal[path]);
2366
2367 if (avg == thermal)
2368 return false;
2369
2370 return true;
2371}
2372EXPORT_SYMBOL(rtw_phy_pwrtrack_thermal_changed);
2373
2374u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path)
2375{
2376 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2377 u8 therm_avg, therm_efuse, therm_delta;
2378
2379 therm_avg = dm_info->thermal_avg[path];
2380 therm_efuse = rtwdev->efuse.thermal_meter[path];
2381 therm_delta = abs(therm_avg - therm_efuse);
2382
2383 return min_t(u8, therm_delta, RTW_PWR_TRK_TBL_SZ - 1);
2384}
2385EXPORT_SYMBOL(rtw_phy_pwrtrack_get_delta);
2386
2387s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
2388 struct rtw_swing_table *swing_table,
2389 u8 tbl_path, u8 therm_path, u8 delta)
2390{
2391 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2392 const u8 *delta_swing_table_idx_pos;
2393 const u8 *delta_swing_table_idx_neg;
2394
2395 if (delta >= RTW_PWR_TRK_TBL_SZ) {
2396 rtw_warn(rtwdev, "power track table overflow\n");
2397 return 0;
2398 }
2399
2400 if (!swing_table) {
2401 rtw_warn(rtwdev, "swing table not configured\n");
2402 return 0;
2403 }
2404
2405 delta_swing_table_idx_pos = swing_table->p[tbl_path];
2406 delta_swing_table_idx_neg = swing_table->n[tbl_path];
2407
2408 if (!delta_swing_table_idx_pos || !delta_swing_table_idx_neg) {
2409 rtw_warn(rtwdev, "invalid swing table index\n");
2410 return 0;
2411 }
2412
2413 if (dm_info->thermal_avg[therm_path] >
2414 rtwdev->efuse.thermal_meter[therm_path])
2415 return delta_swing_table_idx_pos[delta];
2416 else
2417 return -delta_swing_table_idx_neg[delta];
2418}
2419EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
2420
2421bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
2422{
2423 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2424 u8 delta_lck;
2425
2426 delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
2427 if (delta_lck >= rtwdev->chip->lck_threshold) {
2428 dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
2429 return true;
2430 }
2431 return false;
2432}
2433EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
2434
2435bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
2436{
2437 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
2438 u8 delta_iqk;
2439
2440 delta_iqk = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_k);
2441 if (delta_iqk >= rtwdev->chip->iqk_threshold) {
2442 dm_info->thermal_meter_k = dm_info->thermal_avg[0];
2443 return true;
2444 }
2445 return false;
2446}
2447EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk);
2448
2449static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
2450 enum rtw_bb_path tx_path_sel_1ss)
2451{
2452 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
2453 enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
2454 struct rtw_chip_info *chip = rtwdev->chip;
2455
2456 if (tx_path_sel_1ss == path_div->current_tx_path)
2457 return;
2458
2459 path_div->current_tx_path = tx_path_sel_1ss;
2460 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "Switch TX path=%s\n",
2461 tx_path_sel_1ss == BB_PATH_A ? "A" : "B");
2462 chip->ops->config_tx_path(rtwdev, rtwdev->hal.antenna_tx,
2463 tx_path_sel_1ss, tx_path_sel_cck, false);
2464}
2465
2466static void rtw_phy_tx_path_div_select(struct rtw_dev *rtwdev)
2467{
2468 struct rtw_path_div *path_div = &rtwdev->dm_path_div;
2469 enum rtw_bb_path path = path_div->current_tx_path;
2470 s32 rssi_a = 0, rssi_b = 0;
2471
2472 if (path_div->path_a_cnt)
2473 rssi_a = path_div->path_a_sum / path_div->path_a_cnt;
2474 else
2475 rssi_a = 0;
2476 if (path_div->path_b_cnt)
2477 rssi_b = path_div->path_b_sum / path_div->path_b_cnt;
2478 else
2479 rssi_b = 0;
2480
2481 if (rssi_a != rssi_b)
2482 path = (rssi_a > rssi_b) ? BB_PATH_A : BB_PATH_B;
2483
2484 path_div->path_a_cnt = 0;
2485 path_div->path_a_sum = 0;
2486 path_div->path_b_cnt = 0;
2487 path_div->path_b_sum = 0;
2488 rtw_phy_set_tx_path_by_reg(rtwdev, path);
2489}
2490
2491static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
2492{
2493 if (rtwdev->hal.antenna_rx != BB_PATH_AB) {
2494 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV,
2495 "[Return] tx_Path_en=%d, rx_Path_en=%d\n",
2496 rtwdev->hal.antenna_tx, rtwdev->hal.antenna_rx);
2497 return;
2498 }
2499 if (rtwdev->sta_cnt == 0) {
2500 rtw_dbg(rtwdev, RTW_DBG_PATH_DIV, "No Link\n");
2501 return;
2502 }
2503
2504 rtw_phy_tx_path_div_select(rtwdev);
2505}
2506
2507void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
2508{
2509 struct rtw_chip_info *chip = rtwdev->chip;
2510
2511 if (!chip->path_div_supported)
2512 return;
2513
2514 rtw_phy_tx_path_diversity_2ss(rtwdev);
2515}
2516