1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include "e1000.h"
42
43
44
45union ich8_hws_flash_status {
46 struct ich8_hsfsts {
47 u16 flcdone:1;
48 u16 flcerr:1;
49 u16 dael:1;
50 u16 berasesz:2;
51 u16 flcinprog:1;
52 u16 reserved1:2;
53 u16 reserved2:6;
54 u16 fldesvalid:1;
55 u16 flockdn:1;
56 } hsf_status;
57 u16 regval;
58};
59
60
61
62union ich8_hws_flash_ctrl {
63 struct ich8_hsflctl {
64 u16 flcgo:1;
65 u16 flcycle:2;
66 u16 reserved:5;
67 u16 fldbcount:2;
68 u16 flockdn:6;
69 } hsf_ctrl;
70 u16 regval;
71};
72
73
74union ich8_hws_flash_regacc {
75 struct ich8_flracc {
76 u32 grra:8;
77 u32 grwa:8;
78 u32 gmrag:8;
79 u32 gmwag:8;
80 } hsf_flregacc;
81 u16 regval;
82};
83
84
85union ich8_flash_protected_range {
86 struct ich8_pr {
87 u32 base:13;
88 u32 reserved1:2;
89 u32 rpe:1;
90 u32 limit:13;
91 u32 reserved2:2;
92 u32 wpe:1;
93 } range;
94 u32 regval;
95};
96
97static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
98static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
99static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
100static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
101 u32 offset, u8 byte);
102static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
103 u8 *data);
104static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
105 u16 *data);
106static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
107 u8 size, u16 *data);
108static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
109 u32 *data);
110static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
111 u32 offset, u32 *data);
112static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
113 u32 offset, u32 data);
114static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
115 u32 offset, u32 dword);
116static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
117static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
118static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
119static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
120static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
121static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
122static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
123static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
124static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
125static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
126static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
127static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
128static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
129static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
130static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
131static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
132static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
133static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
134static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
135static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
136static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
137static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
138static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
139static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
140
141static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
142{
143 return readw(hw->flash_address + reg);
144}
145
146static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
147{
148 return readl(hw->flash_address + reg);
149}
150
151static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
152{
153 writew(val, hw->flash_address + reg);
154}
155
156static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
157{
158 writel(val, hw->flash_address + reg);
159}
160
161#define er16flash(reg) __er16flash(hw, (reg))
162#define er32flash(reg) __er32flash(hw, (reg))
163#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
164#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
165
166
167
168
169
170
171
172
173
174
175
176static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
177{
178 u16 phy_reg = 0;
179 u32 phy_id = 0;
180 s32 ret_val = 0;
181 u16 retry_count;
182 u32 mac_reg = 0;
183
184 for (retry_count = 0; retry_count < 2; retry_count++) {
185 ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
186 if (ret_val || (phy_reg == 0xFFFF))
187 continue;
188 phy_id = (u32)(phy_reg << 16);
189
190 ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
191 if (ret_val || (phy_reg == 0xFFFF)) {
192 phy_id = 0;
193 continue;
194 }
195 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
196 break;
197 }
198
199 if (hw->phy.id) {
200 if (hw->phy.id == phy_id)
201 goto out;
202 } else if (phy_id) {
203 hw->phy.id = phy_id;
204 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
205 goto out;
206 }
207
208
209
210
211 if (hw->mac.type < e1000_pch_lpt) {
212 hw->phy.ops.release(hw);
213 ret_val = e1000_set_mdio_slow_mode_hv(hw);
214 if (!ret_val)
215 ret_val = e1000e_get_phy_id(hw);
216 hw->phy.ops.acquire(hw);
217 }
218
219 if (ret_val)
220 return false;
221out:
222 if (hw->mac.type >= e1000_pch_lpt) {
223
224 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
225
226 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
227 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
228 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
229
230
231 mac_reg = er32(CTRL_EXT);
232 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
233 ew32(CTRL_EXT, mac_reg);
234 }
235 }
236
237 return true;
238}
239
240
241
242
243
244
245
246
247static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
248{
249 u32 mac_reg;
250
251
252 mac_reg = er32(FEXTNVM3);
253 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
254 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
255 ew32(FEXTNVM3, mac_reg);
256
257
258 mac_reg = er32(CTRL);
259 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
260 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
261 ew32(CTRL, mac_reg);
262 e1e_flush();
263 usleep_range(10, 20);
264 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
265 ew32(CTRL, mac_reg);
266 e1e_flush();
267
268 if (hw->mac.type < e1000_pch_lpt) {
269 msleep(50);
270 } else {
271 u16 count = 20;
272
273 do {
274 usleep_range(5000, 6000);
275 } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
276
277 msleep(30);
278 }
279}
280
281
282
283
284
285
286
287
288static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
289{
290 struct e1000_adapter *adapter = hw->adapter;
291 u32 mac_reg, fwsm = er32(FWSM);
292 s32 ret_val;
293
294
295
296
297 e1000_gate_hw_phy_config_ich8lan(hw, true);
298
299
300
301
302 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
303 ret_val = e1000_disable_ulp_lpt_lp(hw, true);
304 if (ret_val)
305 e_warn("Failed to disable ULP\n");
306
307 ret_val = hw->phy.ops.acquire(hw);
308 if (ret_val) {
309 e_dbg("Failed to initialize PHY flow\n");
310 goto out;
311 }
312
313
314
315
316
317 switch (hw->mac.type) {
318 case e1000_pch_lpt:
319 case e1000_pch_spt:
320 case e1000_pch_cnp:
321 case e1000_pch_tgp:
322 case e1000_pch_adp:
323 case e1000_pch_mtp:
324 case e1000_pch_lnp:
325 if (e1000_phy_is_accessible_pchlan(hw))
326 break;
327
328
329
330
331 mac_reg = er32(CTRL_EXT);
332 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
333 ew32(CTRL_EXT, mac_reg);
334
335
336
337
338
339 msleep(50);
340
341 fallthrough;
342 case e1000_pch2lan:
343 if (e1000_phy_is_accessible_pchlan(hw))
344 break;
345
346 fallthrough;
347 case e1000_pchlan:
348 if ((hw->mac.type == e1000_pchlan) &&
349 (fwsm & E1000_ICH_FWSM_FW_VALID))
350 break;
351
352 if (hw->phy.ops.check_reset_block(hw)) {
353 e_dbg("Required LANPHYPC toggle blocked by ME\n");
354 ret_val = -E1000_ERR_PHY;
355 break;
356 }
357
358
359 e1000_toggle_lanphypc_pch_lpt(hw);
360 if (hw->mac.type >= e1000_pch_lpt) {
361 if (e1000_phy_is_accessible_pchlan(hw))
362 break;
363
364
365
366
367 mac_reg = er32(CTRL_EXT);
368 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
369 ew32(CTRL_EXT, mac_reg);
370
371 if (e1000_phy_is_accessible_pchlan(hw))
372 break;
373
374 ret_val = -E1000_ERR_PHY;
375 }
376 break;
377 default:
378 break;
379 }
380
381 hw->phy.ops.release(hw);
382 if (!ret_val) {
383
384
385 if (hw->phy.ops.check_reset_block(hw)) {
386 e_err("Reset blocked by ME\n");
387 goto out;
388 }
389
390
391
392
393
394
395 ret_val = e1000e_phy_hw_reset_generic(hw);
396 if (ret_val)
397 goto out;
398
399
400
401
402
403
404
405 ret_val = hw->phy.ops.check_reset_block(hw);
406 if (ret_val)
407 e_err("ME blocked access to PHY after reset\n");
408 }
409
410out:
411
412 if ((hw->mac.type == e1000_pch2lan) &&
413 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
414 usleep_range(10000, 11000);
415 e1000_gate_hw_phy_config_ich8lan(hw, false);
416 }
417
418 return ret_val;
419}
420
421
422
423
424
425
426
427static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
428{
429 struct e1000_phy_info *phy = &hw->phy;
430 s32 ret_val;
431
432 phy->addr = 1;
433 phy->reset_delay_us = 100;
434
435 phy->ops.set_page = e1000_set_page_igp;
436 phy->ops.read_reg = e1000_read_phy_reg_hv;
437 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
438 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
439 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
440 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
441 phy->ops.write_reg = e1000_write_phy_reg_hv;
442 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
443 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
444 phy->ops.power_up = e1000_power_up_phy_copper;
445 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
446 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
447
448 phy->id = e1000_phy_unknown;
449
450 ret_val = e1000_init_phy_workarounds_pchlan(hw);
451 if (ret_val)
452 return ret_val;
453
454 if (phy->id == e1000_phy_unknown)
455 switch (hw->mac.type) {
456 default:
457 ret_val = e1000e_get_phy_id(hw);
458 if (ret_val)
459 return ret_val;
460 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
461 break;
462 fallthrough;
463 case e1000_pch2lan:
464 case e1000_pch_lpt:
465 case e1000_pch_spt:
466 case e1000_pch_cnp:
467 case e1000_pch_tgp:
468 case e1000_pch_adp:
469 case e1000_pch_mtp:
470 case e1000_pch_lnp:
471
472
473
474 ret_val = e1000_set_mdio_slow_mode_hv(hw);
475 if (ret_val)
476 return ret_val;
477 ret_val = e1000e_get_phy_id(hw);
478 if (ret_val)
479 return ret_val;
480 break;
481 }
482 phy->type = e1000e_get_phy_type_from_id(phy->id);
483
484 switch (phy->type) {
485 case e1000_phy_82577:
486 case e1000_phy_82579:
487 case e1000_phy_i217:
488 phy->ops.check_polarity = e1000_check_polarity_82577;
489 phy->ops.force_speed_duplex =
490 e1000_phy_force_speed_duplex_82577;
491 phy->ops.get_cable_length = e1000_get_cable_length_82577;
492 phy->ops.get_info = e1000_get_phy_info_82577;
493 phy->ops.commit = e1000e_phy_sw_reset;
494 break;
495 case e1000_phy_82578:
496 phy->ops.check_polarity = e1000_check_polarity_m88;
497 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
498 phy->ops.get_cable_length = e1000e_get_cable_length_m88;
499 phy->ops.get_info = e1000e_get_phy_info_m88;
500 break;
501 default:
502 ret_val = -E1000_ERR_PHY;
503 break;
504 }
505
506 return ret_val;
507}
508
509
510
511
512
513
514
515static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
516{
517 struct e1000_phy_info *phy = &hw->phy;
518 s32 ret_val;
519 u16 i = 0;
520
521 phy->addr = 1;
522 phy->reset_delay_us = 100;
523
524 phy->ops.power_up = e1000_power_up_phy_copper;
525 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
526
527
528
529
530 ret_val = e1000e_determine_phy_address(hw);
531 if (ret_val) {
532 phy->ops.write_reg = e1000e_write_phy_reg_bm;
533 phy->ops.read_reg = e1000e_read_phy_reg_bm;
534 ret_val = e1000e_determine_phy_address(hw);
535 if (ret_val) {
536 e_dbg("Cannot determine PHY addr. Erroring out\n");
537 return ret_val;
538 }
539 }
540
541 phy->id = 0;
542 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
543 (i++ < 100)) {
544 usleep_range(1000, 1100);
545 ret_val = e1000e_get_phy_id(hw);
546 if (ret_val)
547 return ret_val;
548 }
549
550
551 switch (phy->id) {
552 case IGP03E1000_E_PHY_ID:
553 phy->type = e1000_phy_igp_3;
554 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
555 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
556 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
557 phy->ops.get_info = e1000e_get_phy_info_igp;
558 phy->ops.check_polarity = e1000_check_polarity_igp;
559 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
560 break;
561 case IFE_E_PHY_ID:
562 case IFE_PLUS_E_PHY_ID:
563 case IFE_C_E_PHY_ID:
564 phy->type = e1000_phy_ife;
565 phy->autoneg_mask = E1000_ALL_NOT_GIG;
566 phy->ops.get_info = e1000_get_phy_info_ife;
567 phy->ops.check_polarity = e1000_check_polarity_ife;
568 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
569 break;
570 case BME1000_E_PHY_ID:
571 phy->type = e1000_phy_bm;
572 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
573 phy->ops.read_reg = e1000e_read_phy_reg_bm;
574 phy->ops.write_reg = e1000e_write_phy_reg_bm;
575 phy->ops.commit = e1000e_phy_sw_reset;
576 phy->ops.get_info = e1000e_get_phy_info_m88;
577 phy->ops.check_polarity = e1000_check_polarity_m88;
578 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
579 break;
580 default:
581 return -E1000_ERR_PHY;
582 }
583
584 return 0;
585}
586
587
588
589
590
591
592
593
594static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
595{
596 struct e1000_nvm_info *nvm = &hw->nvm;
597 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
598 u32 gfpreg, sector_base_addr, sector_end_addr;
599 u16 i;
600 u32 nvm_size;
601
602 nvm->type = e1000_nvm_flash_sw;
603
604 if (hw->mac.type >= e1000_pch_spt) {
605
606
607
608
609
610
611 nvm->flash_base_addr = 0;
612 nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
613 * NVM_SIZE_MULTIPLIER;
614 nvm->flash_bank_size = nvm_size / 2;
615
616 nvm->flash_bank_size /= sizeof(u16);
617
618 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
619 } else {
620
621 if (!hw->flash_address) {
622 e_dbg("ERROR: Flash registers not mapped\n");
623 return -E1000_ERR_CONFIG;
624 }
625
626 gfpreg = er32flash(ICH_FLASH_GFPREG);
627
628
629
630
631
632 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
633 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
634
635
636 nvm->flash_base_addr = sector_base_addr
637 << FLASH_SECTOR_ADDR_SHIFT;
638
639
640
641
642 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
643 << FLASH_SECTOR_ADDR_SHIFT);
644 nvm->flash_bank_size /= 2;
645
646 nvm->flash_bank_size /= sizeof(u16);
647 }
648
649 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
650
651
652 for (i = 0; i < nvm->word_size; i++) {
653 dev_spec->shadow_ram[i].modified = false;
654 dev_spec->shadow_ram[i].value = 0xFFFF;
655 }
656
657 return 0;
658}
659
660
661
662
663
664
665
666
667static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
668{
669 struct e1000_mac_info *mac = &hw->mac;
670
671
672 hw->phy.media_type = e1000_media_type_copper;
673
674
675 mac->mta_reg_count = 32;
676
677 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
678 if (mac->type == e1000_ich8lan)
679 mac->rar_entry_count--;
680
681 mac->has_fwsm = true;
682
683 mac->arc_subsystem_valid = false;
684
685 mac->adaptive_ifs = true;
686
687
688 switch (mac->type) {
689 case e1000_ich8lan:
690 case e1000_ich9lan:
691 case e1000_ich10lan:
692
693 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
694
695 mac->ops.id_led_init = e1000e_id_led_init_generic;
696
697 mac->ops.blink_led = e1000e_blink_led_generic;
698
699 mac->ops.setup_led = e1000e_setup_led_generic;
700
701 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
702
703 mac->ops.led_on = e1000_led_on_ich8lan;
704 mac->ops.led_off = e1000_led_off_ich8lan;
705 break;
706 case e1000_pch2lan:
707 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
708 mac->ops.rar_set = e1000_rar_set_pch2lan;
709 fallthrough;
710 case e1000_pch_lpt:
711 case e1000_pch_spt:
712 case e1000_pch_cnp:
713 case e1000_pch_tgp:
714 case e1000_pch_adp:
715 case e1000_pch_mtp:
716 case e1000_pch_lnp:
717 case e1000_pchlan:
718
719 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
720
721 mac->ops.id_led_init = e1000_id_led_init_pchlan;
722
723 mac->ops.setup_led = e1000_setup_led_pchlan;
724
725 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
726
727 mac->ops.led_on = e1000_led_on_pchlan;
728 mac->ops.led_off = e1000_led_off_pchlan;
729 break;
730 default:
731 break;
732 }
733
734 if (mac->type >= e1000_pch_lpt) {
735 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
736 mac->ops.rar_set = e1000_rar_set_pch_lpt;
737 mac->ops.setup_physical_interface =
738 e1000_setup_copper_link_pch_lpt;
739 mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
740 }
741
742
743 if (mac->type == e1000_ich8lan)
744 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
745
746 return 0;
747}
748
749
750
751
752
753
754
755
756
757
758static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
759 u16 *data, bool read)
760{
761 s32 ret_val;
762
763 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
764 if (ret_val)
765 return ret_val;
766
767 if (read)
768 ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
769 else
770 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
771
772 return ret_val;
773}
774
775
776
777
778
779
780
781
782
783s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
784{
785 return __e1000_access_emi_reg_locked(hw, addr, data, true);
786}
787
788
789
790
791
792
793
794
795
796s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
797{
798 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
799}
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
816{
817 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
818 s32 ret_val;
819 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
820
821 switch (hw->phy.type) {
822 case e1000_phy_82579:
823 lpa = I82579_EEE_LP_ABILITY;
824 pcs_status = I82579_EEE_PCS_STATUS;
825 adv_addr = I82579_EEE_ADVERTISEMENT;
826 break;
827 case e1000_phy_i217:
828 lpa = I217_EEE_LP_ABILITY;
829 pcs_status = I217_EEE_PCS_STATUS;
830 adv_addr = I217_EEE_ADVERTISEMENT;
831 break;
832 default:
833 return 0;
834 }
835
836 ret_val = hw->phy.ops.acquire(hw);
837 if (ret_val)
838 return ret_val;
839
840 ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
841 if (ret_val)
842 goto release;
843
844
845 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
846
847
848 if (!dev_spec->eee_disable) {
849
850 ret_val = e1000_read_emi_reg_locked(hw, lpa,
851 &dev_spec->eee_lp_ability);
852 if (ret_val)
853 goto release;
854
855
856 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
857 if (ret_val)
858 goto release;
859
860
861
862
863 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
864 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
865
866 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
867 e1e_rphy_locked(hw, MII_LPA, &data);
868 if (data & LPA_100FULL)
869 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
870 else
871
872
873
874
875 dev_spec->eee_lp_ability &=
876 ~I82579_EEE_100_SUPPORTED;
877 }
878 }
879
880 if (hw->phy.type == e1000_phy_82579) {
881 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
882 &data);
883 if (ret_val)
884 goto release;
885
886 data &= ~I82579_LPI_100_PLL_SHUT;
887 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
888 data);
889 }
890
891
892 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
893 if (ret_val)
894 goto release;
895
896 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
897release:
898 hw->phy.ops.release(hw);
899
900 return ret_val;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
915{
916 u32 fextnvm6 = er32(FEXTNVM6);
917 u32 status = er32(STATUS);
918 s32 ret_val = 0;
919 u16 reg;
920
921 if (link && (status & E1000_STATUS_SPEED_1000)) {
922 ret_val = hw->phy.ops.acquire(hw);
923 if (ret_val)
924 return ret_val;
925
926 ret_val =
927 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
928 ®);
929 if (ret_val)
930 goto release;
931
932 ret_val =
933 e1000e_write_kmrn_reg_locked(hw,
934 E1000_KMRNCTRLSTA_K1_CONFIG,
935 reg &
936 ~E1000_KMRNCTRLSTA_K1_ENABLE);
937 if (ret_val)
938 goto release;
939
940 usleep_range(10, 20);
941
942 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
943
944 ret_val =
945 e1000e_write_kmrn_reg_locked(hw,
946 E1000_KMRNCTRLSTA_K1_CONFIG,
947 reg);
948release:
949 hw->phy.ops.release(hw);
950 } else {
951
952 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
953
954 if ((hw->phy.revision > 5) || !link ||
955 ((status & E1000_STATUS_SPEED_100) &&
956 (status & E1000_STATUS_FD)))
957 goto update_fextnvm6;
958
959 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®);
960 if (ret_val)
961 return ret_val;
962
963
964 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
965
966 if (status & E1000_STATUS_SPEED_100) {
967
968 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
969
970
971 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
972 } else {
973
974 reg |= 50 <<
975 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
976
977
978 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
979 }
980
981 ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
982 if (ret_val)
983 return ret_val;
984
985update_fextnvm6:
986 ew32(FEXTNVM6, fextnvm6);
987 }
988
989 return ret_val;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1009{
1010 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1011 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1012 u16 max_ltr_enc_d = 0;
1013 u16 lat_enc_d = 0;
1014 u16 lat_enc = 0;
1015
1016 if (link) {
1017 u16 speed, duplex, scale = 0;
1018 u16 max_snoop, max_nosnoop;
1019 u16 max_ltr_enc;
1020 u64 value;
1021 u32 rxa;
1022
1023 if (!hw->adapter->max_frame_size) {
1024 e_dbg("max_frame_size not set.\n");
1025 return -E1000_ERR_CONFIG;
1026 }
1027
1028 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1029 if (!speed) {
1030 e_dbg("Speed not set.\n");
1031 return -E1000_ERR_CONFIG;
1032 }
1033
1034
1035 rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 rxa *= 512;
1046 value = (rxa > hw->adapter->max_frame_size) ?
1047 (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
1048 0;
1049
1050 while (value > PCI_LTR_VALUE_MASK) {
1051 scale++;
1052 value = DIV_ROUND_UP(value, BIT(5));
1053 }
1054 if (scale > E1000_LTRV_SCALE_MAX) {
1055 e_dbg("Invalid LTR latency scale %d\n", scale);
1056 return -E1000_ERR_CONFIG;
1057 }
1058 lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1059
1060
1061 pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1062 &max_snoop);
1063 pci_read_config_word(hw->adapter->pdev,
1064 E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1065 max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1066
1067 lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
1068 (1U << (E1000_LTRV_SCALE_FACTOR *
1069 ((lat_enc & E1000_LTRV_SCALE_MASK)
1070 >> E1000_LTRV_SCALE_SHIFT)));
1071
1072 max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
1073 (1U << (E1000_LTRV_SCALE_FACTOR *
1074 ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
1075 >> E1000_LTRV_SCALE_SHIFT)));
1076
1077 if (lat_enc_d > max_ltr_enc_d)
1078 lat_enc = max_ltr_enc;
1079 }
1080
1081
1082 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1083 ew32(LTRV, reg);
1084
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1099{
1100 u32 mac_reg;
1101 s32 ret_val = 0;
1102 u16 phy_reg;
1103 u16 oem_reg = 0;
1104
1105 if ((hw->mac.type < e1000_pch_lpt) ||
1106 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1107 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1108 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1109 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1110 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1111 return 0;
1112
1113 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1114
1115 mac_reg = er32(H2ME);
1116 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1117 ew32(H2ME, mac_reg);
1118
1119 goto out;
1120 }
1121
1122 if (!to_sx) {
1123 int i = 0;
1124
1125
1126 while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1127
1128 if (er32(STATUS) & E1000_STATUS_LU)
1129 return -E1000_ERR_PHY;
1130
1131 if (i++ == 100)
1132 break;
1133
1134 msleep(50);
1135 }
1136 e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1137 (er32(FEXT) &
1138 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1139 }
1140
1141 ret_val = hw->phy.ops.acquire(hw);
1142 if (ret_val)
1143 goto out;
1144
1145
1146 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1147 if (ret_val)
1148 goto release;
1149 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1150 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1151
1152
1153 mac_reg = er32(CTRL_EXT);
1154 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1155 ew32(CTRL_EXT, mac_reg);
1156
1157
1158
1159
1160 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1161 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1162 &oem_reg);
1163 if (ret_val)
1164 goto release;
1165
1166 phy_reg = oem_reg;
1167 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1168
1169 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1170 phy_reg);
1171
1172 if (ret_val)
1173 goto release;
1174 }
1175
1176
1177
1178
1179 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1180 if (ret_val)
1181 goto release;
1182 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1183 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1184 if (to_sx) {
1185 if (er32(WUFC) & E1000_WUFC_LNKC)
1186 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1187 else
1188 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1189
1190 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1191 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1192 } else {
1193 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1194 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1195 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1196 }
1197 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1198
1199
1200 mac_reg = er32(FEXTNVM7);
1201 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1202 ew32(FEXTNVM7, mac_reg);
1203
1204
1205 phy_reg |= I218_ULP_CONFIG1_START;
1206 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1207
1208 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1209 to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
1210 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1211 oem_reg);
1212 if (ret_val)
1213 goto release;
1214 }
1215
1216release:
1217 hw->phy.ops.release(hw);
1218out:
1219 if (ret_val)
1220 e_dbg("Error in ULP enable flow: %d\n", ret_val);
1221 else
1222 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1223
1224 return ret_val;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1243{
1244 s32 ret_val = 0;
1245 u32 mac_reg;
1246 u16 phy_reg;
1247 int i = 0;
1248
1249 if ((hw->mac.type < e1000_pch_lpt) ||
1250 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1251 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1252 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1253 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1254 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1255 return 0;
1256
1257 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1258 struct e1000_adapter *adapter = hw->adapter;
1259 bool firmware_bug = false;
1260
1261 if (force) {
1262
1263 mac_reg = er32(H2ME);
1264 mac_reg &= ~E1000_H2ME_ULP;
1265 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1266 ew32(H2ME, mac_reg);
1267 }
1268
1269
1270
1271
1272
1273 while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1274 if (i++ == 250) {
1275 ret_val = -E1000_ERR_PHY;
1276 goto out;
1277 }
1278 if (i > 100 && !firmware_bug)
1279 firmware_bug = true;
1280
1281 usleep_range(10000, 11000);
1282 }
1283 if (firmware_bug)
1284 e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n",
1285 i * 10);
1286 else
1287 e_dbg("ULP_CONFIG_DONE cleared after %d msec\n",
1288 i * 10);
1289
1290 if (force) {
1291 mac_reg = er32(H2ME);
1292 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1293 ew32(H2ME, mac_reg);
1294 } else {
1295
1296 mac_reg = er32(H2ME);
1297 mac_reg &= ~E1000_H2ME_ULP;
1298 ew32(H2ME, mac_reg);
1299 }
1300
1301 goto out;
1302 }
1303
1304 ret_val = hw->phy.ops.acquire(hw);
1305 if (ret_val)
1306 goto out;
1307
1308 if (force)
1309
1310 e1000_toggle_lanphypc_pch_lpt(hw);
1311
1312
1313 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1314 if (ret_val) {
1315
1316
1317
1318 mac_reg = er32(CTRL_EXT);
1319 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1320 ew32(CTRL_EXT, mac_reg);
1321
1322 msleep(50);
1323
1324 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1325 &phy_reg);
1326 if (ret_val)
1327 goto release;
1328 }
1329 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1330 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1331
1332
1333 mac_reg = er32(CTRL_EXT);
1334 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1335 ew32(CTRL_EXT, mac_reg);
1336
1337
1338
1339
1340 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1341 if (ret_val)
1342 goto release;
1343 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1344 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1345
1346
1347 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1348 if (ret_val)
1349 goto release;
1350 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1351 I218_ULP_CONFIG1_STICKY_ULP |
1352 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1353 I218_ULP_CONFIG1_WOL_HOST |
1354 I218_ULP_CONFIG1_INBAND_EXIT |
1355 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1356 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1357 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1358 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1359
1360
1361 phy_reg |= I218_ULP_CONFIG1_START;
1362 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1363
1364
1365 mac_reg = er32(FEXTNVM7);
1366 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1367 ew32(FEXTNVM7, mac_reg);
1368
1369release:
1370 hw->phy.ops.release(hw);
1371 if (force) {
1372 e1000_phy_hw_reset(hw);
1373 msleep(50);
1374 }
1375out:
1376 if (ret_val)
1377 e_dbg("Error in ULP disable flow: %d\n", ret_val);
1378 else
1379 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1380
1381 return ret_val;
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1393{
1394 struct e1000_mac_info *mac = &hw->mac;
1395 s32 ret_val, tipg_reg = 0;
1396 u16 emi_addr, emi_val = 0;
1397 bool link;
1398 u16 phy_reg;
1399
1400
1401
1402
1403
1404
1405 if (!mac->get_link_status)
1406 return 0;
1407 mac->get_link_status = false;
1408
1409
1410
1411
1412
1413 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1414 if (ret_val)
1415 goto out;
1416
1417 if (hw->mac.type == e1000_pchlan) {
1418 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1419 if (ret_val)
1420 goto out;
1421 }
1422
1423
1424
1425
1426
1427 if ((hw->mac.type >= e1000_pch2lan) && link) {
1428 u16 speed, duplex;
1429
1430 e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
1431 tipg_reg = er32(TIPG);
1432 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1433
1434 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1435 tipg_reg |= 0xFF;
1436
1437 emi_val = 0;
1438 } else if (hw->mac.type >= e1000_pch_spt &&
1439 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1440 tipg_reg |= 0xC;
1441 emi_val = 1;
1442 } else {
1443
1444
1445 tipg_reg |= 0x08;
1446 emi_val = 1;
1447 }
1448
1449 ew32(TIPG, tipg_reg);
1450
1451 ret_val = hw->phy.ops.acquire(hw);
1452 if (ret_val)
1453 goto out;
1454
1455 if (hw->mac.type == e1000_pch2lan)
1456 emi_addr = I82579_RX_CONFIG;
1457 else
1458 emi_addr = I217_RX_CONFIG;
1459 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1460
1461 if (hw->mac.type >= e1000_pch_lpt) {
1462 u16 phy_reg;
1463
1464 e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
1465 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1466 if (speed == SPEED_100 || speed == SPEED_10)
1467 phy_reg |= 0x3E8;
1468 else
1469 phy_reg |= 0xFA;
1470 e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
1471
1472 if (speed == SPEED_1000) {
1473 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1474 &phy_reg);
1475
1476 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1477
1478 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1479 phy_reg);
1480 }
1481 }
1482 hw->phy.ops.release(hw);
1483
1484 if (ret_val)
1485 goto out;
1486
1487 if (hw->mac.type >= e1000_pch_spt) {
1488 u16 data;
1489 u16 ptr_gap;
1490
1491 if (speed == SPEED_1000) {
1492 ret_val = hw->phy.ops.acquire(hw);
1493 if (ret_val)
1494 goto out;
1495
1496 ret_val = e1e_rphy_locked(hw,
1497 PHY_REG(776, 20),
1498 &data);
1499 if (ret_val) {
1500 hw->phy.ops.release(hw);
1501 goto out;
1502 }
1503
1504 ptr_gap = (data & (0x3FF << 2)) >> 2;
1505 if (ptr_gap < 0x18) {
1506 data &= ~(0x3FF << 2);
1507 data |= (0x18 << 2);
1508 ret_val =
1509 e1e_wphy_locked(hw,
1510 PHY_REG(776, 20),
1511 data);
1512 }
1513 hw->phy.ops.release(hw);
1514 if (ret_val)
1515 goto out;
1516 } else {
1517 ret_val = hw->phy.ops.acquire(hw);
1518 if (ret_val)
1519 goto out;
1520
1521 ret_val = e1e_wphy_locked(hw,
1522 PHY_REG(776, 20),
1523 0xC023);
1524 hw->phy.ops.release(hw);
1525 if (ret_val)
1526 goto out;
1527
1528 }
1529 }
1530 }
1531
1532
1533
1534
1535
1536
1537 if (hw->mac.type >= e1000_pch_lpt) {
1538 u32 mac_reg;
1539
1540 mac_reg = er32(FEXTNVM4);
1541 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1542 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1543 ew32(FEXTNVM4, mac_reg);
1544 }
1545
1546
1547 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1548 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1549 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1550 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1551 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1552 if (ret_val)
1553 goto out;
1554 }
1555 if (hw->mac.type >= e1000_pch_lpt) {
1556
1557
1558
1559 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1560 if (ret_val)
1561 goto out;
1562 }
1563
1564
1565 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1566
1567 if (hw->mac.type >= e1000_pch_lpt) {
1568 u32 fextnvm6 = er32(FEXTNVM6);
1569
1570 if (hw->mac.type == e1000_pch_spt) {
1571
1572 u32 pcieanacfg = er32(PCIEANACFG);
1573
1574 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1575 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1576 else
1577 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1578 }
1579
1580 ew32(FEXTNVM6, fextnvm6);
1581 }
1582
1583 if (!link)
1584 goto out;
1585
1586 switch (hw->mac.type) {
1587 case e1000_pch2lan:
1588 ret_val = e1000_k1_workaround_lv(hw);
1589 if (ret_val)
1590 return ret_val;
1591 fallthrough;
1592 case e1000_pchlan:
1593 if (hw->phy.type == e1000_phy_82578) {
1594 ret_val = e1000_link_stall_workaround_hv(hw);
1595 if (ret_val)
1596 return ret_val;
1597 }
1598
1599
1600
1601
1602
1603
1604 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1605 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1606
1607 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1608 phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1609
1610 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1611 break;
1612 default:
1613 break;
1614 }
1615
1616
1617
1618
1619 e1000e_check_downshift(hw);
1620
1621
1622 if (hw->phy.type > e1000_phy_82579) {
1623 ret_val = e1000_set_eee_pchlan(hw);
1624 if (ret_val)
1625 return ret_val;
1626 }
1627
1628
1629
1630
1631 if (!mac->autoneg)
1632 return -E1000_ERR_CONFIG;
1633
1634
1635
1636
1637
1638 mac->ops.config_collision_dist(hw);
1639
1640
1641
1642
1643
1644
1645 ret_val = e1000e_config_fc_after_link_up(hw);
1646 if (ret_val)
1647 e_dbg("Error configuring flow control\n");
1648
1649 return ret_val;
1650
1651out:
1652 mac->get_link_status = true;
1653 return ret_val;
1654}
1655
1656static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1657{
1658 struct e1000_hw *hw = &adapter->hw;
1659 s32 rc;
1660
1661 rc = e1000_init_mac_params_ich8lan(hw);
1662 if (rc)
1663 return rc;
1664
1665 rc = e1000_init_nvm_params_ich8lan(hw);
1666 if (rc)
1667 return rc;
1668
1669 switch (hw->mac.type) {
1670 case e1000_ich8lan:
1671 case e1000_ich9lan:
1672 case e1000_ich10lan:
1673 rc = e1000_init_phy_params_ich8lan(hw);
1674 break;
1675 case e1000_pchlan:
1676 case e1000_pch2lan:
1677 case e1000_pch_lpt:
1678 case e1000_pch_spt:
1679 case e1000_pch_cnp:
1680 case e1000_pch_tgp:
1681 case e1000_pch_adp:
1682 case e1000_pch_mtp:
1683 case e1000_pch_lnp:
1684 rc = e1000_init_phy_params_pchlan(hw);
1685 break;
1686 default:
1687 break;
1688 }
1689 if (rc)
1690 return rc;
1691
1692
1693
1694
1695 if ((adapter->hw.phy.type == e1000_phy_ife) ||
1696 ((adapter->hw.mac.type >= e1000_pch2lan) &&
1697 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1698 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1699 adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1700
1701 hw->mac.ops.blink_led = NULL;
1702 }
1703
1704 if ((adapter->hw.mac.type == e1000_ich8lan) &&
1705 (adapter->hw.phy.type != e1000_phy_ife))
1706 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1707
1708
1709 if ((adapter->hw.mac.type == e1000_pch2lan) &&
1710 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1711 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1712
1713 return 0;
1714}
1715
1716static DEFINE_MUTEX(nvm_mutex);
1717
1718
1719
1720
1721
1722
1723
1724static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1725{
1726 mutex_lock(&nvm_mutex);
1727
1728 return 0;
1729}
1730
1731
1732
1733
1734
1735
1736
1737static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1738{
1739 mutex_unlock(&nvm_mutex);
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1750{
1751 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1752 s32 ret_val = 0;
1753
1754 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1755 &hw->adapter->state)) {
1756 e_dbg("contention for Phy access\n");
1757 return -E1000_ERR_PHY;
1758 }
1759
1760 while (timeout) {
1761 extcnf_ctrl = er32(EXTCNF_CTRL);
1762 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1763 break;
1764
1765 mdelay(1);
1766 timeout--;
1767 }
1768
1769 if (!timeout) {
1770 e_dbg("SW has already locked the resource.\n");
1771 ret_val = -E1000_ERR_CONFIG;
1772 goto out;
1773 }
1774
1775 timeout = SW_FLAG_TIMEOUT;
1776
1777 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1778 ew32(EXTCNF_CTRL, extcnf_ctrl);
1779
1780 while (timeout) {
1781 extcnf_ctrl = er32(EXTCNF_CTRL);
1782 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1783 break;
1784
1785 mdelay(1);
1786 timeout--;
1787 }
1788
1789 if (!timeout) {
1790 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1791 er32(FWSM), extcnf_ctrl);
1792 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1793 ew32(EXTCNF_CTRL, extcnf_ctrl);
1794 ret_val = -E1000_ERR_CONFIG;
1795 goto out;
1796 }
1797
1798out:
1799 if (ret_val)
1800 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1801
1802 return ret_val;
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1813{
1814 u32 extcnf_ctrl;
1815
1816 extcnf_ctrl = er32(EXTCNF_CTRL);
1817
1818 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1819 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1820 ew32(EXTCNF_CTRL, extcnf_ctrl);
1821 } else {
1822 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1823 }
1824
1825 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1837{
1838 u32 fwsm;
1839
1840 fwsm = er32(FWSM);
1841 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1842 ((fwsm & E1000_FWSM_MODE_MASK) ==
1843 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1855{
1856 u32 fwsm;
1857
1858 fwsm = er32(FWSM);
1859 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1860 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1875{
1876 u32 rar_low, rar_high;
1877
1878
1879
1880
1881 rar_low = ((u32)addr[0] |
1882 ((u32)addr[1] << 8) |
1883 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1884
1885 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1886
1887
1888 if (rar_low || rar_high)
1889 rar_high |= E1000_RAH_AV;
1890
1891 if (index == 0) {
1892 ew32(RAL(index), rar_low);
1893 e1e_flush();
1894 ew32(RAH(index), rar_high);
1895 e1e_flush();
1896 return 0;
1897 }
1898
1899
1900
1901
1902 if (index < (u32)(hw->mac.rar_entry_count)) {
1903 s32 ret_val;
1904
1905 ret_val = e1000_acquire_swflag_ich8lan(hw);
1906 if (ret_val)
1907 goto out;
1908
1909 ew32(SHRAL(index - 1), rar_low);
1910 e1e_flush();
1911 ew32(SHRAH(index - 1), rar_high);
1912 e1e_flush();
1913
1914 e1000_release_swflag_ich8lan(hw);
1915
1916
1917 if ((er32(SHRAL(index - 1)) == rar_low) &&
1918 (er32(SHRAH(index - 1)) == rar_high))
1919 return 0;
1920
1921 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1922 (index - 1), er32(FWSM));
1923 }
1924
1925out:
1926 e_dbg("Failed to write receive address at index %d\n", index);
1927 return -E1000_ERR_CONFIG;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1941{
1942 u32 wlock_mac;
1943 u32 num_entries;
1944
1945 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1946 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1947
1948 switch (wlock_mac) {
1949 case 0:
1950
1951 num_entries = hw->mac.rar_entry_count;
1952 break;
1953 case 1:
1954
1955 num_entries = 1;
1956 break;
1957 default:
1958
1959 num_entries = wlock_mac + 1;
1960 break;
1961 }
1962
1963 return num_entries;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1978{
1979 u32 rar_low, rar_high;
1980 u32 wlock_mac;
1981
1982
1983
1984
1985 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1986 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1987
1988 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1989
1990
1991 if (rar_low || rar_high)
1992 rar_high |= E1000_RAH_AV;
1993
1994 if (index == 0) {
1995 ew32(RAL(index), rar_low);
1996 e1e_flush();
1997 ew32(RAH(index), rar_high);
1998 e1e_flush();
1999 return 0;
2000 }
2001
2002
2003
2004
2005 if (index < hw->mac.rar_entry_count) {
2006 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
2007 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2008
2009
2010 if (wlock_mac == 1)
2011 goto out;
2012
2013 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2014 s32 ret_val;
2015
2016 ret_val = e1000_acquire_swflag_ich8lan(hw);
2017
2018 if (ret_val)
2019 goto out;
2020
2021 ew32(SHRAL_PCH_LPT(index - 1), rar_low);
2022 e1e_flush();
2023 ew32(SHRAH_PCH_LPT(index - 1), rar_high);
2024 e1e_flush();
2025
2026 e1000_release_swflag_ich8lan(hw);
2027
2028
2029 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2030 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
2031 return 0;
2032 }
2033 }
2034
2035out:
2036 e_dbg("Failed to write receive address at index %d\n", index);
2037 return -E1000_ERR_CONFIG;
2038}
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2049{
2050 bool blocked = false;
2051 int i = 0;
2052
2053 while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
2054 (i++ < 30))
2055 usleep_range(10000, 11000);
2056 return blocked ? E1000_BLK_PHY_RESET : 0;
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2067{
2068 u16 phy_data;
2069 u32 strap = er32(STRAP);
2070 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2071 E1000_STRAP_SMT_FREQ_SHIFT;
2072 s32 ret_val;
2073
2074 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2075
2076 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2077 if (ret_val)
2078 return ret_val;
2079
2080 phy_data &= ~HV_SMB_ADDR_MASK;
2081 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2082 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2083
2084 if (hw->phy.type == e1000_phy_i217) {
2085
2086 if (freq--) {
2087 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2088 phy_data |= (freq & BIT(0)) <<
2089 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2090 phy_data |= (freq & BIT(1)) <<
2091 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2092 } else {
2093 e_dbg("Unsupported SMB frequency in PHY\n");
2094 }
2095 }
2096
2097 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2108{
2109 struct e1000_phy_info *phy = &hw->phy;
2110 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2111 s32 ret_val = 0;
2112 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2113
2114
2115
2116
2117
2118
2119
2120 switch (hw->mac.type) {
2121 case e1000_ich8lan:
2122 if (phy->type != e1000_phy_igp_3)
2123 return ret_val;
2124
2125 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
2126 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
2127 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2128 break;
2129 }
2130 fallthrough;
2131 case e1000_pchlan:
2132 case e1000_pch2lan:
2133 case e1000_pch_lpt:
2134 case e1000_pch_spt:
2135 case e1000_pch_cnp:
2136 case e1000_pch_tgp:
2137 case e1000_pch_adp:
2138 case e1000_pch_mtp:
2139 case e1000_pch_lnp:
2140 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2141 break;
2142 default:
2143 return ret_val;
2144 }
2145
2146 ret_val = hw->phy.ops.acquire(hw);
2147 if (ret_val)
2148 return ret_val;
2149
2150 data = er32(FEXTNVM);
2151 if (!(data & sw_cfg_mask))
2152 goto release;
2153
2154
2155
2156
2157 data = er32(EXTCNF_CTRL);
2158 if ((hw->mac.type < e1000_pch2lan) &&
2159 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2160 goto release;
2161
2162 cnf_size = er32(EXTCNF_SIZE);
2163 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2164 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2165 if (!cnf_size)
2166 goto release;
2167
2168 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2169 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2170
2171 if (((hw->mac.type == e1000_pchlan) &&
2172 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2173 (hw->mac.type > e1000_pchlan)) {
2174
2175
2176
2177
2178
2179 ret_val = e1000_write_smbus_addr(hw);
2180 if (ret_val)
2181 goto release;
2182
2183 data = er32(LEDCTL);
2184 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2185 (u16)data);
2186 if (ret_val)
2187 goto release;
2188 }
2189
2190
2191
2192
2193 word_addr = (u16)(cnf_base_addr << 1);
2194
2195 for (i = 0; i < cnf_size; i++) {
2196 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data);
2197 if (ret_val)
2198 goto release;
2199
2200 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2201 1, ®_addr);
2202 if (ret_val)
2203 goto release;
2204
2205
2206 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2207 phy_page = reg_data;
2208 continue;
2209 }
2210
2211 reg_addr &= PHY_REG_MASK;
2212 reg_addr |= phy_page;
2213
2214 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2215 if (ret_val)
2216 goto release;
2217 }
2218
2219release:
2220 hw->phy.ops.release(hw);
2221 return ret_val;
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2235{
2236 s32 ret_val = 0;
2237 u16 status_reg = 0;
2238 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2239
2240 if (hw->mac.type != e1000_pchlan)
2241 return 0;
2242
2243
2244 ret_val = hw->phy.ops.acquire(hw);
2245 if (ret_val)
2246 return ret_val;
2247
2248
2249 if (link) {
2250 if (hw->phy.type == e1000_phy_82578) {
2251 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2252 &status_reg);
2253 if (ret_val)
2254 goto release;
2255
2256 status_reg &= (BM_CS_STATUS_LINK_UP |
2257 BM_CS_STATUS_RESOLVED |
2258 BM_CS_STATUS_SPEED_MASK);
2259
2260 if (status_reg == (BM_CS_STATUS_LINK_UP |
2261 BM_CS_STATUS_RESOLVED |
2262 BM_CS_STATUS_SPEED_1000))
2263 k1_enable = false;
2264 }
2265
2266 if (hw->phy.type == e1000_phy_82577) {
2267 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2268 if (ret_val)
2269 goto release;
2270
2271 status_reg &= (HV_M_STATUS_LINK_UP |
2272 HV_M_STATUS_AUTONEG_COMPLETE |
2273 HV_M_STATUS_SPEED_MASK);
2274
2275 if (status_reg == (HV_M_STATUS_LINK_UP |
2276 HV_M_STATUS_AUTONEG_COMPLETE |
2277 HV_M_STATUS_SPEED_1000))
2278 k1_enable = false;
2279 }
2280
2281
2282 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2283 if (ret_val)
2284 goto release;
2285
2286 } else {
2287
2288 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2289 if (ret_val)
2290 goto release;
2291 }
2292
2293 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2294
2295release:
2296 hw->phy.ops.release(hw);
2297
2298 return ret_val;
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2312{
2313 s32 ret_val;
2314 u32 ctrl_reg = 0;
2315 u32 ctrl_ext = 0;
2316 u32 reg = 0;
2317 u16 kmrn_reg = 0;
2318
2319 ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2320 &kmrn_reg);
2321 if (ret_val)
2322 return ret_val;
2323
2324 if (k1_enable)
2325 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2326 else
2327 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2328
2329 ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2330 kmrn_reg);
2331 if (ret_val)
2332 return ret_val;
2333
2334 usleep_range(20, 40);
2335 ctrl_ext = er32(CTRL_EXT);
2336 ctrl_reg = er32(CTRL);
2337
2338 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2339 reg |= E1000_CTRL_FRCSPD;
2340 ew32(CTRL, reg);
2341
2342 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2343 e1e_flush();
2344 usleep_range(20, 40);
2345 ew32(CTRL, ctrl_reg);
2346 ew32(CTRL_EXT, ctrl_ext);
2347 e1e_flush();
2348 usleep_range(20, 40);
2349
2350 return 0;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2363{
2364 s32 ret_val = 0;
2365 u32 mac_reg;
2366 u16 oem_reg;
2367
2368 if (hw->mac.type < e1000_pchlan)
2369 return ret_val;
2370
2371 ret_val = hw->phy.ops.acquire(hw);
2372 if (ret_val)
2373 return ret_val;
2374
2375 if (hw->mac.type == e1000_pchlan) {
2376 mac_reg = er32(EXTCNF_CTRL);
2377 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2378 goto release;
2379 }
2380
2381 mac_reg = er32(FEXTNVM);
2382 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2383 goto release;
2384
2385 mac_reg = er32(PHY_CTRL);
2386
2387 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2388 if (ret_val)
2389 goto release;
2390
2391 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2392
2393 if (d0_state) {
2394 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2395 oem_reg |= HV_OEM_BITS_GBE_DIS;
2396
2397 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2398 oem_reg |= HV_OEM_BITS_LPLU;
2399 } else {
2400 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2401 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2402 oem_reg |= HV_OEM_BITS_GBE_DIS;
2403
2404 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2405 E1000_PHY_CTRL_NOND0A_LPLU))
2406 oem_reg |= HV_OEM_BITS_LPLU;
2407 }
2408
2409
2410 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2411 !hw->phy.ops.check_reset_block(hw))
2412 oem_reg |= HV_OEM_BITS_RESTART_AN;
2413
2414 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2415
2416release:
2417 hw->phy.ops.release(hw);
2418
2419 return ret_val;
2420}
2421
2422
2423
2424
2425
2426static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2427{
2428 s32 ret_val;
2429 u16 data;
2430
2431 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2432 if (ret_val)
2433 return ret_val;
2434
2435 data |= HV_KMRN_MDIO_SLOW;
2436
2437 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2438
2439 return ret_val;
2440}
2441
2442
2443
2444
2445
2446
2447
2448static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2449{
2450 s32 ret_val = 0;
2451 u16 phy_data;
2452
2453 if (hw->mac.type != e1000_pchlan)
2454 return 0;
2455
2456
2457 if (hw->phy.type == e1000_phy_82577) {
2458 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2459 if (ret_val)
2460 return ret_val;
2461 }
2462
2463 if (((hw->phy.type == e1000_phy_82577) &&
2464 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2465 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2466
2467 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2468 if (ret_val)
2469 return ret_val;
2470
2471
2472 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2473 if (ret_val)
2474 return ret_val;
2475 }
2476
2477 if (hw->phy.type == e1000_phy_82578) {
2478
2479
2480
2481 if (hw->phy.revision < 2) {
2482 e1000e_phy_sw_reset(hw);
2483 ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2484 if (ret_val)
2485 return ret_val;
2486 }
2487 }
2488
2489
2490 ret_val = hw->phy.ops.acquire(hw);
2491 if (ret_val)
2492 return ret_val;
2493
2494 hw->phy.addr = 1;
2495 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2496 hw->phy.ops.release(hw);
2497 if (ret_val)
2498 return ret_val;
2499
2500
2501
2502
2503 ret_val = e1000_k1_gig_workaround_hv(hw, true);
2504 if (ret_val)
2505 return ret_val;
2506
2507
2508 ret_val = hw->phy.ops.acquire(hw);
2509 if (ret_val)
2510 return ret_val;
2511 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2512 if (ret_val)
2513 goto release;
2514 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2515 if (ret_val)
2516 goto release;
2517
2518
2519 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2520release:
2521 hw->phy.ops.release(hw);
2522
2523 return ret_val;
2524}
2525
2526
2527
2528
2529
2530void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2531{
2532 u32 mac_reg;
2533 u16 i, phy_reg = 0;
2534 s32 ret_val;
2535
2536 ret_val = hw->phy.ops.acquire(hw);
2537 if (ret_val)
2538 return;
2539 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2540 if (ret_val)
2541 goto release;
2542
2543
2544 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2545 mac_reg = er32(RAL(i));
2546 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2547 (u16)(mac_reg & 0xFFFF));
2548 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2549 (u16)((mac_reg >> 16) & 0xFFFF));
2550
2551 mac_reg = er32(RAH(i));
2552 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2553 (u16)(mac_reg & 0xFFFF));
2554 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2555 (u16)((mac_reg & E1000_RAH_AV)
2556 >> 16));
2557 }
2558
2559 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2560
2561release:
2562 hw->phy.ops.release(hw);
2563}
2564
2565
2566
2567
2568
2569
2570
2571s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2572{
2573 s32 ret_val = 0;
2574 u16 phy_reg, data;
2575 u32 mac_reg;
2576 u16 i;
2577
2578 if (hw->mac.type < e1000_pch2lan)
2579 return 0;
2580
2581
2582 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2583 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
2584 if (ret_val)
2585 return ret_val;
2586
2587 if (enable) {
2588
2589
2590
2591 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2592 u8 mac_addr[ETH_ALEN] = { 0 };
2593 u32 addr_high, addr_low;
2594
2595 addr_high = er32(RAH(i));
2596 if (!(addr_high & E1000_RAH_AV))
2597 continue;
2598 addr_low = er32(RAL(i));
2599 mac_addr[0] = (addr_low & 0xFF);
2600 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2601 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2602 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2603 mac_addr[4] = (addr_high & 0xFF);
2604 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2605
2606 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2607 }
2608
2609
2610 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2611
2612
2613 mac_reg = er32(FFLT_DBG);
2614 mac_reg &= ~BIT(14);
2615 mac_reg |= (7 << 15);
2616 ew32(FFLT_DBG, mac_reg);
2617
2618 mac_reg = er32(RCTL);
2619 mac_reg |= E1000_RCTL_SECRC;
2620 ew32(RCTL, mac_reg);
2621
2622 ret_val = e1000e_read_kmrn_reg(hw,
2623 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2624 &data);
2625 if (ret_val)
2626 return ret_val;
2627 ret_val = e1000e_write_kmrn_reg(hw,
2628 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2629 data | BIT(0));
2630 if (ret_val)
2631 return ret_val;
2632 ret_val = e1000e_read_kmrn_reg(hw,
2633 E1000_KMRNCTRLSTA_HD_CTRL,
2634 &data);
2635 if (ret_val)
2636 return ret_val;
2637 data &= ~(0xF << 8);
2638 data |= (0xB << 8);
2639 ret_val = e1000e_write_kmrn_reg(hw,
2640 E1000_KMRNCTRLSTA_HD_CTRL,
2641 data);
2642 if (ret_val)
2643 return ret_val;
2644
2645
2646 e1e_rphy(hw, PHY_REG(769, 23), &data);
2647 data &= ~(0x7F << 5);
2648 data |= (0x37 << 5);
2649 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2650 if (ret_val)
2651 return ret_val;
2652 e1e_rphy(hw, PHY_REG(769, 16), &data);
2653 data &= ~BIT(13);
2654 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2655 if (ret_val)
2656 return ret_val;
2657 e1e_rphy(hw, PHY_REG(776, 20), &data);
2658 data &= ~(0x3FF << 2);
2659 data |= (E1000_TX_PTR_GAP << 2);
2660 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2661 if (ret_val)
2662 return ret_val;
2663 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2664 if (ret_val)
2665 return ret_val;
2666 e1e_rphy(hw, HV_PM_CTRL, &data);
2667 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
2668 if (ret_val)
2669 return ret_val;
2670 } else {
2671
2672 mac_reg = er32(FFLT_DBG);
2673 mac_reg &= ~(0xF << 14);
2674 ew32(FFLT_DBG, mac_reg);
2675
2676 mac_reg = er32(RCTL);
2677 mac_reg &= ~E1000_RCTL_SECRC;
2678 ew32(RCTL, mac_reg);
2679
2680 ret_val = e1000e_read_kmrn_reg(hw,
2681 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2682 &data);
2683 if (ret_val)
2684 return ret_val;
2685 ret_val = e1000e_write_kmrn_reg(hw,
2686 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2687 data & ~BIT(0));
2688 if (ret_val)
2689 return ret_val;
2690 ret_val = e1000e_read_kmrn_reg(hw,
2691 E1000_KMRNCTRLSTA_HD_CTRL,
2692 &data);
2693 if (ret_val)
2694 return ret_val;
2695 data &= ~(0xF << 8);
2696 data |= (0xB << 8);
2697 ret_val = e1000e_write_kmrn_reg(hw,
2698 E1000_KMRNCTRLSTA_HD_CTRL,
2699 data);
2700 if (ret_val)
2701 return ret_val;
2702
2703
2704 e1e_rphy(hw, PHY_REG(769, 23), &data);
2705 data &= ~(0x7F << 5);
2706 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2707 if (ret_val)
2708 return ret_val;
2709 e1e_rphy(hw, PHY_REG(769, 16), &data);
2710 data |= BIT(13);
2711 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2712 if (ret_val)
2713 return ret_val;
2714 e1e_rphy(hw, PHY_REG(776, 20), &data);
2715 data &= ~(0x3FF << 2);
2716 data |= (0x8 << 2);
2717 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2718 if (ret_val)
2719 return ret_val;
2720 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2721 if (ret_val)
2722 return ret_val;
2723 e1e_rphy(hw, HV_PM_CTRL, &data);
2724 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
2725 if (ret_val)
2726 return ret_val;
2727 }
2728
2729
2730 return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
2731}
2732
2733
2734
2735
2736
2737
2738
2739static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2740{
2741 s32 ret_val = 0;
2742
2743 if (hw->mac.type != e1000_pch2lan)
2744 return 0;
2745
2746
2747 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2748 if (ret_val)
2749 return ret_val;
2750
2751 ret_val = hw->phy.ops.acquire(hw);
2752 if (ret_val)
2753 return ret_val;
2754
2755 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2756 if (ret_val)
2757 goto release;
2758
2759 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2760release:
2761 hw->phy.ops.release(hw);
2762
2763 return ret_val;
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2774{
2775 s32 ret_val = 0;
2776 u16 status_reg = 0;
2777
2778 if (hw->mac.type != e1000_pch2lan)
2779 return 0;
2780
2781
2782 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2783 if (ret_val)
2784 return ret_val;
2785
2786 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2787 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2788 if (status_reg &
2789 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2790 u16 pm_phy_reg;
2791
2792
2793 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2794 if (ret_val)
2795 return ret_val;
2796 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2797 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2798 if (ret_val)
2799 return ret_val;
2800 } else {
2801 u32 mac_reg;
2802
2803 mac_reg = er32(FEXTNVM4);
2804 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2805 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2806 ew32(FEXTNVM4, mac_reg);
2807 }
2808 }
2809
2810 return ret_val;
2811}
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2822{
2823 u32 extcnf_ctrl;
2824
2825 if (hw->mac.type < e1000_pch2lan)
2826 return;
2827
2828 extcnf_ctrl = er32(EXTCNF_CTRL);
2829
2830 if (gate)
2831 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2832 else
2833 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2834
2835 ew32(EXTCNF_CTRL, extcnf_ctrl);
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2846{
2847 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2848
2849
2850 do {
2851 data = er32(STATUS);
2852 data &= E1000_STATUS_LAN_INIT_DONE;
2853 usleep_range(100, 200);
2854 } while ((!data) && --loop);
2855
2856
2857
2858
2859
2860 if (loop == 0)
2861 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2862
2863
2864 data = er32(STATUS);
2865 data &= ~E1000_STATUS_LAN_INIT_DONE;
2866 ew32(STATUS, data);
2867}
2868
2869
2870
2871
2872
2873static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2874{
2875 s32 ret_val = 0;
2876 u16 reg;
2877
2878 if (hw->phy.ops.check_reset_block(hw))
2879 return 0;
2880
2881
2882 usleep_range(10000, 11000);
2883
2884
2885 switch (hw->mac.type) {
2886 case e1000_pchlan:
2887 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2888 if (ret_val)
2889 return ret_val;
2890 break;
2891 case e1000_pch2lan:
2892 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2893 if (ret_val)
2894 return ret_val;
2895 break;
2896 default:
2897 break;
2898 }
2899
2900
2901 if (hw->mac.type >= e1000_pchlan) {
2902 e1e_rphy(hw, BM_PORT_GEN_CFG, ®);
2903 reg &= ~BM_WUC_HOST_WU_BIT;
2904 e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2905 }
2906
2907
2908 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2909 if (ret_val)
2910 return ret_val;
2911
2912
2913 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2914
2915 if (hw->mac.type == e1000_pch2lan) {
2916
2917 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2918 usleep_range(10000, 11000);
2919 e1000_gate_hw_phy_config_ich8lan(hw, false);
2920 }
2921
2922
2923 ret_val = hw->phy.ops.acquire(hw);
2924 if (ret_val)
2925 return ret_val;
2926 ret_val = e1000_write_emi_reg_locked(hw,
2927 I82579_LPI_UPDATE_TIMER,
2928 0x1387);
2929 hw->phy.ops.release(hw);
2930 }
2931
2932 return ret_val;
2933}
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2944{
2945 s32 ret_val = 0;
2946
2947
2948 if ((hw->mac.type == e1000_pch2lan) &&
2949 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2950 e1000_gate_hw_phy_config_ich8lan(hw, true);
2951
2952 ret_val = e1000e_phy_hw_reset_generic(hw);
2953 if (ret_val)
2954 return ret_val;
2955
2956 return e1000_post_phy_reset_ich8lan(hw);
2957}
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2971{
2972 s32 ret_val;
2973 u16 oem_reg;
2974
2975 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2976 if (ret_val)
2977 return ret_val;
2978
2979 if (active)
2980 oem_reg |= HV_OEM_BITS_LPLU;
2981 else
2982 oem_reg &= ~HV_OEM_BITS_LPLU;
2983
2984 if (!hw->phy.ops.check_reset_block(hw))
2985 oem_reg |= HV_OEM_BITS_RESTART_AN;
2986
2987 return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2988}
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3004{
3005 struct e1000_phy_info *phy = &hw->phy;
3006 u32 phy_ctrl;
3007 s32 ret_val = 0;
3008 u16 data;
3009
3010 if (phy->type == e1000_phy_ife)
3011 return 0;
3012
3013 phy_ctrl = er32(PHY_CTRL);
3014
3015 if (active) {
3016 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3017 ew32(PHY_CTRL, phy_ctrl);
3018
3019 if (phy->type != e1000_phy_igp_3)
3020 return 0;
3021
3022
3023
3024
3025 if (hw->mac.type == e1000_ich8lan)
3026 e1000e_gig_downshift_workaround_ich8lan(hw);
3027
3028
3029 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3030 if (ret_val)
3031 return ret_val;
3032 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3033 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3034 if (ret_val)
3035 return ret_val;
3036 } else {
3037 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3038 ew32(PHY_CTRL, phy_ctrl);
3039
3040 if (phy->type != e1000_phy_igp_3)
3041 return 0;
3042
3043
3044
3045
3046
3047
3048 if (phy->smart_speed == e1000_smart_speed_on) {
3049 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3050 &data);
3051 if (ret_val)
3052 return ret_val;
3053
3054 data |= IGP01E1000_PSCFR_SMART_SPEED;
3055 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3056 data);
3057 if (ret_val)
3058 return ret_val;
3059 } else if (phy->smart_speed == e1000_smart_speed_off) {
3060 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3061 &data);
3062 if (ret_val)
3063 return ret_val;
3064
3065 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3066 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3067 data);
3068 if (ret_val)
3069 return ret_val;
3070 }
3071 }
3072
3073 return 0;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3090{
3091 struct e1000_phy_info *phy = &hw->phy;
3092 u32 phy_ctrl;
3093 s32 ret_val = 0;
3094 u16 data;
3095
3096 phy_ctrl = er32(PHY_CTRL);
3097
3098 if (!active) {
3099 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3100 ew32(PHY_CTRL, phy_ctrl);
3101
3102 if (phy->type != e1000_phy_igp_3)
3103 return 0;
3104
3105
3106
3107
3108
3109
3110 if (phy->smart_speed == e1000_smart_speed_on) {
3111 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3112 &data);
3113 if (ret_val)
3114 return ret_val;
3115
3116 data |= IGP01E1000_PSCFR_SMART_SPEED;
3117 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3118 data);
3119 if (ret_val)
3120 return ret_val;
3121 } else if (phy->smart_speed == e1000_smart_speed_off) {
3122 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3123 &data);
3124 if (ret_val)
3125 return ret_val;
3126
3127 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3128 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3129 data);
3130 if (ret_val)
3131 return ret_val;
3132 }
3133 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3134 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3135 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3136 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3137 ew32(PHY_CTRL, phy_ctrl);
3138
3139 if (phy->type != e1000_phy_igp_3)
3140 return 0;
3141
3142
3143
3144
3145 if (hw->mac.type == e1000_ich8lan)
3146 e1000e_gig_downshift_workaround_ich8lan(hw);
3147
3148
3149 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3150 if (ret_val)
3151 return ret_val;
3152
3153 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3154 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3155 }
3156
3157 return ret_val;
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3169{
3170 u32 eecd;
3171 struct e1000_nvm_info *nvm = &hw->nvm;
3172 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3173 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3174 u32 nvm_dword = 0;
3175 u8 sig_byte = 0;
3176 s32 ret_val;
3177
3178 switch (hw->mac.type) {
3179 case e1000_pch_spt:
3180 case e1000_pch_cnp:
3181 case e1000_pch_tgp:
3182 case e1000_pch_adp:
3183 case e1000_pch_mtp:
3184 case e1000_pch_lnp:
3185 bank1_offset = nvm->flash_bank_size;
3186 act_offset = E1000_ICH_NVM_SIG_WORD;
3187
3188
3189 *bank = 0;
3190
3191
3192 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3193 &nvm_dword);
3194 if (ret_val)
3195 return ret_val;
3196 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3197 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3198 E1000_ICH_NVM_SIG_VALUE) {
3199 *bank = 0;
3200 return 0;
3201 }
3202
3203
3204 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3205 bank1_offset,
3206 &nvm_dword);
3207 if (ret_val)
3208 return ret_val;
3209 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3210 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3211 E1000_ICH_NVM_SIG_VALUE) {
3212 *bank = 1;
3213 return 0;
3214 }
3215
3216 e_dbg("ERROR: No valid NVM bank present\n");
3217 return -E1000_ERR_NVM;
3218 case e1000_ich8lan:
3219 case e1000_ich9lan:
3220 eecd = er32(EECD);
3221 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3222 E1000_EECD_SEC1VAL_VALID_MASK) {
3223 if (eecd & E1000_EECD_SEC1VAL)
3224 *bank = 1;
3225 else
3226 *bank = 0;
3227
3228 return 0;
3229 }
3230 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3231 fallthrough;
3232 default:
3233
3234 *bank = 0;
3235
3236
3237 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3238 &sig_byte);
3239 if (ret_val)
3240 return ret_val;
3241 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3242 E1000_ICH_NVM_SIG_VALUE) {
3243 *bank = 0;
3244 return 0;
3245 }
3246
3247
3248 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3249 bank1_offset,
3250 &sig_byte);
3251 if (ret_val)
3252 return ret_val;
3253 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3254 E1000_ICH_NVM_SIG_VALUE) {
3255 *bank = 1;
3256 return 0;
3257 }
3258
3259 e_dbg("ERROR: No valid NVM bank present\n");
3260 return -E1000_ERR_NVM;
3261 }
3262}
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3274 u16 *data)
3275{
3276 struct e1000_nvm_info *nvm = &hw->nvm;
3277 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3278 u32 act_offset;
3279 s32 ret_val = 0;
3280 u32 bank = 0;
3281 u32 dword = 0;
3282 u16 offset_to_read;
3283 u16 i;
3284
3285 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3286 (words == 0)) {
3287 e_dbg("nvm parameter(s) out of bounds\n");
3288 ret_val = -E1000_ERR_NVM;
3289 goto out;
3290 }
3291
3292 nvm->ops.acquire(hw);
3293
3294 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3295 if (ret_val) {
3296 e_dbg("Could not detect valid bank, assuming bank 0\n");
3297 bank = 0;
3298 }
3299
3300 act_offset = (bank) ? nvm->flash_bank_size : 0;
3301 act_offset += offset;
3302
3303 ret_val = 0;
3304
3305 for (i = 0; i < words; i += 2) {
3306 if (words - i == 1) {
3307 if (dev_spec->shadow_ram[offset + i].modified) {
3308 data[i] =
3309 dev_spec->shadow_ram[offset + i].value;
3310 } else {
3311 offset_to_read = act_offset + i -
3312 ((act_offset + i) % 2);
3313 ret_val =
3314 e1000_read_flash_dword_ich8lan(hw,
3315 offset_to_read,
3316 &dword);
3317 if (ret_val)
3318 break;
3319 if ((act_offset + i) % 2 == 0)
3320 data[i] = (u16)(dword & 0xFFFF);
3321 else
3322 data[i] = (u16)((dword >> 16) & 0xFFFF);
3323 }
3324 } else {
3325 offset_to_read = act_offset + i;
3326 if (!(dev_spec->shadow_ram[offset + i].modified) ||
3327 !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3328 ret_val =
3329 e1000_read_flash_dword_ich8lan(hw,
3330 offset_to_read,
3331 &dword);
3332 if (ret_val)
3333 break;
3334 }
3335 if (dev_spec->shadow_ram[offset + i].modified)
3336 data[i] =
3337 dev_spec->shadow_ram[offset + i].value;
3338 else
3339 data[i] = (u16)(dword & 0xFFFF);
3340 if (dev_spec->shadow_ram[offset + i].modified)
3341 data[i + 1] =
3342 dev_spec->shadow_ram[offset + i + 1].value;
3343 else
3344 data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3345 }
3346 }
3347
3348 nvm->ops.release(hw);
3349
3350out:
3351 if (ret_val)
3352 e_dbg("NVM read error: %d\n", ret_val);
3353
3354 return ret_val;
3355}
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3367 u16 *data)
3368{
3369 struct e1000_nvm_info *nvm = &hw->nvm;
3370 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3371 u32 act_offset;
3372 s32 ret_val = 0;
3373 u32 bank = 0;
3374 u16 i, word;
3375
3376 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3377 (words == 0)) {
3378 e_dbg("nvm parameter(s) out of bounds\n");
3379 ret_val = -E1000_ERR_NVM;
3380 goto out;
3381 }
3382
3383 nvm->ops.acquire(hw);
3384
3385 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3386 if (ret_val) {
3387 e_dbg("Could not detect valid bank, assuming bank 0\n");
3388 bank = 0;
3389 }
3390
3391 act_offset = (bank) ? nvm->flash_bank_size : 0;
3392 act_offset += offset;
3393
3394 ret_val = 0;
3395 for (i = 0; i < words; i++) {
3396 if (dev_spec->shadow_ram[offset + i].modified) {
3397 data[i] = dev_spec->shadow_ram[offset + i].value;
3398 } else {
3399 ret_val = e1000_read_flash_word_ich8lan(hw,
3400 act_offset + i,
3401 &word);
3402 if (ret_val)
3403 break;
3404 data[i] = word;
3405 }
3406 }
3407
3408 nvm->ops.release(hw);
3409
3410out:
3411 if (ret_val)
3412 e_dbg("NVM read error: %d\n", ret_val);
3413
3414 return ret_val;
3415}
3416
3417
3418
3419
3420
3421
3422
3423
3424static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3425{
3426 union ich8_hws_flash_status hsfsts;
3427 s32 ret_val = -E1000_ERR_NVM;
3428
3429 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3430
3431
3432 if (!hsfsts.hsf_status.fldesvalid) {
3433 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
3434 return -E1000_ERR_NVM;
3435 }
3436
3437
3438 hsfsts.hsf_status.flcerr = 1;
3439 hsfsts.hsf_status.dael = 1;
3440 if (hw->mac.type >= e1000_pch_spt)
3441 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3442 else
3443 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453 if (!hsfsts.hsf_status.flcinprog) {
3454
3455
3456
3457
3458 hsfsts.hsf_status.flcdone = 1;
3459 if (hw->mac.type >= e1000_pch_spt)
3460 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3461 else
3462 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3463 ret_val = 0;
3464 } else {
3465 s32 i;
3466
3467
3468
3469
3470 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3471 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3472 if (!hsfsts.hsf_status.flcinprog) {
3473 ret_val = 0;
3474 break;
3475 }
3476 udelay(1);
3477 }
3478 if (!ret_val) {
3479
3480
3481
3482 hsfsts.hsf_status.flcdone = 1;
3483 if (hw->mac.type >= e1000_pch_spt)
3484 ew32flash(ICH_FLASH_HSFSTS,
3485 hsfsts.regval & 0xFFFF);
3486 else
3487 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3488 } else {
3489 e_dbg("Flash controller busy, cannot get access\n");
3490 }
3491 }
3492
3493 return ret_val;
3494}
3495
3496
3497
3498
3499
3500
3501
3502
3503static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3504{
3505 union ich8_hws_flash_ctrl hsflctl;
3506 union ich8_hws_flash_status hsfsts;
3507 u32 i = 0;
3508
3509
3510 if (hw->mac.type >= e1000_pch_spt)
3511 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3512 else
3513 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3514 hsflctl.hsf_ctrl.flcgo = 1;
3515
3516 if (hw->mac.type >= e1000_pch_spt)
3517 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3518 else
3519 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3520
3521
3522 do {
3523 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3524 if (hsfsts.hsf_status.flcdone)
3525 break;
3526 udelay(1);
3527 } while (i++ < timeout);
3528
3529 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3530 return 0;
3531
3532 return -E1000_ERR_NVM;
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3545 u32 *data)
3546{
3547
3548 offset <<= 1;
3549 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3562 u16 *data)
3563{
3564
3565 offset <<= 1;
3566
3567 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3568}
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3579 u8 *data)
3580{
3581 s32 ret_val;
3582 u16 word = 0;
3583
3584
3585
3586
3587 if (hw->mac.type >= e1000_pch_spt)
3588 return -E1000_ERR_NVM;
3589 else
3590 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3591
3592 if (ret_val)
3593 return ret_val;
3594
3595 *data = (u8)word;
3596
3597 return 0;
3598}
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3610 u8 size, u16 *data)
3611{
3612 union ich8_hws_flash_status hsfsts;
3613 union ich8_hws_flash_ctrl hsflctl;
3614 u32 flash_linear_addr;
3615 u32 flash_data = 0;
3616 s32 ret_val = -E1000_ERR_NVM;
3617 u8 count = 0;
3618
3619 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3620 return -E1000_ERR_NVM;
3621
3622 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3623 hw->nvm.flash_base_addr);
3624
3625 do {
3626 udelay(1);
3627
3628 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3629 if (ret_val)
3630 break;
3631
3632 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3633
3634 hsflctl.hsf_ctrl.fldbcount = size - 1;
3635 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3636 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3637
3638 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3639
3640 ret_val =
3641 e1000_flash_cycle_ich8lan(hw,
3642 ICH_FLASH_READ_COMMAND_TIMEOUT);
3643
3644
3645
3646
3647
3648
3649 if (!ret_val) {
3650 flash_data = er32flash(ICH_FLASH_FDATA0);
3651 if (size == 1)
3652 *data = (u8)(flash_data & 0x000000FF);
3653 else if (size == 2)
3654 *data = (u16)(flash_data & 0x0000FFFF);
3655 break;
3656 } else {
3657
3658
3659
3660
3661
3662 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3663 if (hsfsts.hsf_status.flcerr) {
3664
3665 continue;
3666 } else if (!hsfsts.hsf_status.flcdone) {
3667 e_dbg("Timeout error - flash cycle did not complete.\n");
3668 break;
3669 }
3670 }
3671 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3672
3673 return ret_val;
3674}
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3686 u32 *data)
3687{
3688 union ich8_hws_flash_status hsfsts;
3689 union ich8_hws_flash_ctrl hsflctl;
3690 u32 flash_linear_addr;
3691 s32 ret_val = -E1000_ERR_NVM;
3692 u8 count = 0;
3693
3694 if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
3695 return -E1000_ERR_NVM;
3696 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3697 hw->nvm.flash_base_addr);
3698
3699 do {
3700 udelay(1);
3701
3702 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3703 if (ret_val)
3704 break;
3705
3706
3707
3708 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3709
3710
3711 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3712 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3713
3714
3715
3716 ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3717 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3718
3719 ret_val =
3720 e1000_flash_cycle_ich8lan(hw,
3721 ICH_FLASH_READ_COMMAND_TIMEOUT);
3722
3723
3724
3725
3726
3727
3728 if (!ret_val) {
3729 *data = er32flash(ICH_FLASH_FDATA0);
3730 break;
3731 } else {
3732
3733
3734
3735
3736
3737 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3738 if (hsfsts.hsf_status.flcerr) {
3739
3740 continue;
3741 } else if (!hsfsts.hsf_status.flcdone) {
3742 e_dbg("Timeout error - flash cycle did not complete.\n");
3743 break;
3744 }
3745 }
3746 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3747
3748 return ret_val;
3749}
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3761 u16 *data)
3762{
3763 struct e1000_nvm_info *nvm = &hw->nvm;
3764 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3765 u16 i;
3766
3767 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3768 (words == 0)) {
3769 e_dbg("nvm parameter(s) out of bounds\n");
3770 return -E1000_ERR_NVM;
3771 }
3772
3773 nvm->ops.acquire(hw);
3774
3775 for (i = 0; i < words; i++) {
3776 dev_spec->shadow_ram[offset + i].modified = true;
3777 dev_spec->shadow_ram[offset + i].value = data[i];
3778 }
3779
3780 nvm->ops.release(hw);
3781
3782 return 0;
3783}
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3797{
3798 struct e1000_nvm_info *nvm = &hw->nvm;
3799 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3800 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3801 s32 ret_val;
3802 u32 dword = 0;
3803
3804 ret_val = e1000e_update_nvm_checksum_generic(hw);
3805 if (ret_val)
3806 goto out;
3807
3808 if (nvm->type != e1000_nvm_flash_sw)
3809 goto out;
3810
3811 nvm->ops.acquire(hw);
3812
3813
3814
3815
3816
3817 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3818 if (ret_val) {
3819 e_dbg("Could not detect valid bank, assuming bank 0\n");
3820 bank = 0;
3821 }
3822
3823 if (bank == 0) {
3824 new_bank_offset = nvm->flash_bank_size;
3825 old_bank_offset = 0;
3826 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3827 if (ret_val)
3828 goto release;
3829 } else {
3830 old_bank_offset = nvm->flash_bank_size;
3831 new_bank_offset = 0;
3832 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3833 if (ret_val)
3834 goto release;
3835 }
3836 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3837
3838
3839
3840
3841 ret_val = e1000_read_flash_dword_ich8lan(hw,
3842 i + old_bank_offset,
3843 &dword);
3844
3845 if (dev_spec->shadow_ram[i].modified) {
3846 dword &= 0xffff0000;
3847 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3848 }
3849 if (dev_spec->shadow_ram[i + 1].modified) {
3850 dword &= 0x0000ffff;
3851 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3852 << 16);
3853 }
3854 if (ret_val)
3855 break;
3856
3857
3858
3859
3860
3861
3862
3863
3864 if (i == E1000_ICH_NVM_SIG_WORD - 1)
3865 dword |= E1000_ICH_NVM_SIG_MASK << 16;
3866
3867
3868 act_offset = (i + new_bank_offset) << 1;
3869
3870 usleep_range(100, 200);
3871
3872
3873 act_offset = i + new_bank_offset;
3874 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3875 dword);
3876 if (ret_val)
3877 break;
3878 }
3879
3880
3881
3882
3883 if (ret_val) {
3884
3885 e_dbg("Flash commit failed.\n");
3886 goto release;
3887 }
3888
3889
3890
3891
3892
3893
3894 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3895
3896
3897 --act_offset;
3898 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3899
3900 if (ret_val)
3901 goto release;
3902
3903 dword &= 0xBFFFFFFF;
3904 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3905
3906 if (ret_val)
3907 goto release;
3908
3909
3910 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3911 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3912
3913 if (ret_val)
3914 goto release;
3915
3916 dword &= 0x00FFFFFF;
3917 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3918
3919 if (ret_val)
3920 goto release;
3921
3922
3923 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3924 dev_spec->shadow_ram[i].modified = false;
3925 dev_spec->shadow_ram[i].value = 0xFFFF;
3926 }
3927
3928release:
3929 nvm->ops.release(hw);
3930
3931
3932
3933
3934 if (!ret_val) {
3935 nvm->ops.reload(hw);
3936 usleep_range(10000, 11000);
3937 }
3938
3939out:
3940 if (ret_val)
3941 e_dbg("NVM update error: %d\n", ret_val);
3942
3943 return ret_val;
3944}
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3958{
3959 struct e1000_nvm_info *nvm = &hw->nvm;
3960 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3961 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3962 s32 ret_val;
3963 u16 data = 0;
3964
3965 ret_val = e1000e_update_nvm_checksum_generic(hw);
3966 if (ret_val)
3967 goto out;
3968
3969 if (nvm->type != e1000_nvm_flash_sw)
3970 goto out;
3971
3972 nvm->ops.acquire(hw);
3973
3974
3975
3976
3977
3978 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3979 if (ret_val) {
3980 e_dbg("Could not detect valid bank, assuming bank 0\n");
3981 bank = 0;
3982 }
3983
3984 if (bank == 0) {
3985 new_bank_offset = nvm->flash_bank_size;
3986 old_bank_offset = 0;
3987 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3988 if (ret_val)
3989 goto release;
3990 } else {
3991 old_bank_offset = nvm->flash_bank_size;
3992 new_bank_offset = 0;
3993 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3994 if (ret_val)
3995 goto release;
3996 }
3997 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3998 if (dev_spec->shadow_ram[i].modified) {
3999 data = dev_spec->shadow_ram[i].value;
4000 } else {
4001 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4002 old_bank_offset,
4003 &data);
4004 if (ret_val)
4005 break;
4006 }
4007
4008
4009
4010
4011
4012
4013
4014
4015 if (i == E1000_ICH_NVM_SIG_WORD)
4016 data |= E1000_ICH_NVM_SIG_MASK;
4017
4018
4019 act_offset = (i + new_bank_offset) << 1;
4020
4021 usleep_range(100, 200);
4022
4023 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4024 act_offset,
4025 (u8)data);
4026 if (ret_val)
4027 break;
4028
4029 usleep_range(100, 200);
4030 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4031 act_offset + 1,
4032 (u8)(data >> 8));
4033 if (ret_val)
4034 break;
4035 }
4036
4037
4038
4039
4040 if (ret_val) {
4041
4042 e_dbg("Flash commit failed.\n");
4043 goto release;
4044 }
4045
4046
4047
4048
4049
4050
4051 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4052 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4053 if (ret_val)
4054 goto release;
4055
4056 data &= 0xBFFF;
4057 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4058 act_offset * 2 + 1,
4059 (u8)(data >> 8));
4060 if (ret_val)
4061 goto release;
4062
4063
4064
4065
4066
4067
4068 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4069 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4070 if (ret_val)
4071 goto release;
4072
4073
4074 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
4075 dev_spec->shadow_ram[i].modified = false;
4076 dev_spec->shadow_ram[i].value = 0xFFFF;
4077 }
4078
4079release:
4080 nvm->ops.release(hw);
4081
4082
4083
4084
4085 if (!ret_val) {
4086 nvm->ops.reload(hw);
4087 usleep_range(10000, 11000);
4088 }
4089
4090out:
4091 if (ret_val)
4092 e_dbg("NVM update error: %d\n", ret_val);
4093
4094 return ret_val;
4095}
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4106{
4107 s32 ret_val;
4108 u16 data;
4109 u16 word;
4110 u16 valid_csum_mask;
4111
4112
4113
4114
4115
4116
4117 switch (hw->mac.type) {
4118 case e1000_pch_lpt:
4119 case e1000_pch_spt:
4120 case e1000_pch_cnp:
4121 case e1000_pch_tgp:
4122 case e1000_pch_adp:
4123 case e1000_pch_mtp:
4124 case e1000_pch_lnp:
4125 word = NVM_COMPAT;
4126 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4127 break;
4128 default:
4129 word = NVM_FUTURE_INIT_WORD1;
4130 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4131 break;
4132 }
4133
4134 ret_val = e1000_read_nvm(hw, word, 1, &data);
4135 if (ret_val)
4136 return ret_val;
4137
4138 if (!(data & valid_csum_mask)) {
4139 e_dbg("NVM Checksum Invalid\n");
4140
4141 if (hw->mac.type < e1000_pch_cnp) {
4142 data |= valid_csum_mask;
4143 ret_val = e1000_write_nvm(hw, word, 1, &data);
4144 if (ret_val)
4145 return ret_val;
4146 ret_val = e1000e_update_nvm_checksum(hw);
4147 if (ret_val)
4148 return ret_val;
4149 }
4150 }
4151
4152 return e1000e_validate_nvm_checksum_generic(hw);
4153}
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
4166{
4167 struct e1000_nvm_info *nvm = &hw->nvm;
4168 union ich8_flash_protected_range pr0;
4169 union ich8_hws_flash_status hsfsts;
4170 u32 gfpreg;
4171
4172 nvm->ops.acquire(hw);
4173
4174 gfpreg = er32flash(ICH_FLASH_GFPREG);
4175
4176
4177 pr0.regval = er32flash(ICH_FLASH_PR0);
4178 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4179 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4180 pr0.range.wpe = true;
4181 ew32flash(ICH_FLASH_PR0, pr0.regval);
4182
4183
4184
4185
4186
4187
4188 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4189 hsfsts.hsf_status.flockdn = true;
4190 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4191
4192 nvm->ops.release(hw);
4193}
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4205 u8 size, u16 data)
4206{
4207 union ich8_hws_flash_status hsfsts;
4208 union ich8_hws_flash_ctrl hsflctl;
4209 u32 flash_linear_addr;
4210 u32 flash_data = 0;
4211 s32 ret_val;
4212 u8 count = 0;
4213
4214 if (hw->mac.type >= e1000_pch_spt) {
4215 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4216 return -E1000_ERR_NVM;
4217 } else {
4218 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4219 return -E1000_ERR_NVM;
4220 }
4221
4222 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4223 hw->nvm.flash_base_addr);
4224
4225 do {
4226 udelay(1);
4227
4228 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4229 if (ret_val)
4230 break;
4231
4232
4233
4234 if (hw->mac.type >= e1000_pch_spt)
4235 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4236 else
4237 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4238
4239
4240 hsflctl.hsf_ctrl.fldbcount = size - 1;
4241 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4242
4243
4244
4245
4246 if (hw->mac.type >= e1000_pch_spt)
4247 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4248 else
4249 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4250
4251 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4252
4253 if (size == 1)
4254 flash_data = (u32)data & 0x00FF;
4255 else
4256 flash_data = (u32)data;
4257
4258 ew32flash(ICH_FLASH_FDATA0, flash_data);
4259
4260
4261
4262
4263 ret_val =
4264 e1000_flash_cycle_ich8lan(hw,
4265 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4266 if (!ret_val)
4267 break;
4268
4269
4270
4271
4272
4273
4274 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4275 if (hsfsts.hsf_status.flcerr)
4276
4277 continue;
4278 if (!hsfsts.hsf_status.flcdone) {
4279 e_dbg("Timeout error - flash cycle did not complete.\n");
4280 break;
4281 }
4282 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4283
4284 return ret_val;
4285}
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4296 u32 data)
4297{
4298 union ich8_hws_flash_status hsfsts;
4299 union ich8_hws_flash_ctrl hsflctl;
4300 u32 flash_linear_addr;
4301 s32 ret_val;
4302 u8 count = 0;
4303
4304 if (hw->mac.type >= e1000_pch_spt) {
4305 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4306 return -E1000_ERR_NVM;
4307 }
4308 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4309 hw->nvm.flash_base_addr);
4310 do {
4311 udelay(1);
4312
4313 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4314 if (ret_val)
4315 break;
4316
4317
4318
4319
4320 if (hw->mac.type >= e1000_pch_spt)
4321 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4322 >> 16;
4323 else
4324 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4325
4326 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4327 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4328
4329
4330
4331
4332
4333 if (hw->mac.type >= e1000_pch_spt)
4334 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4335 else
4336 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4337
4338 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4339
4340 ew32flash(ICH_FLASH_FDATA0, data);
4341
4342
4343
4344
4345 ret_val =
4346 e1000_flash_cycle_ich8lan(hw,
4347 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4348
4349 if (!ret_val)
4350 break;
4351
4352
4353
4354
4355
4356
4357 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4358
4359 if (hsfsts.hsf_status.flcerr)
4360
4361 continue;
4362 if (!hsfsts.hsf_status.flcdone) {
4363 e_dbg("Timeout error - flash cycle did not complete.\n");
4364 break;
4365 }
4366 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4367
4368 return ret_val;
4369}
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4380 u8 data)
4381{
4382 u16 word = (u16)data;
4383
4384 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4385}
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4397 u32 offset, u32 dword)
4398{
4399 s32 ret_val;
4400 u16 program_retries;
4401
4402
4403 offset <<= 1;
4404 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4405
4406 if (!ret_val)
4407 return ret_val;
4408 for (program_retries = 0; program_retries < 100; program_retries++) {
4409 e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4410 usleep_range(100, 200);
4411 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4412 if (!ret_val)
4413 break;
4414 }
4415 if (program_retries == 100)
4416 return -E1000_ERR_NVM;
4417
4418 return 0;
4419}
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4431 u32 offset, u8 byte)
4432{
4433 s32 ret_val;
4434 u16 program_retries;
4435
4436 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4437 if (!ret_val)
4438 return ret_val;
4439
4440 for (program_retries = 0; program_retries < 100; program_retries++) {
4441 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4442 usleep_range(100, 200);
4443 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4444 if (!ret_val)
4445 break;
4446 }
4447 if (program_retries == 100)
4448 return -E1000_ERR_NVM;
4449
4450 return 0;
4451}
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4462{
4463 struct e1000_nvm_info *nvm = &hw->nvm;
4464 union ich8_hws_flash_status hsfsts;
4465 union ich8_hws_flash_ctrl hsflctl;
4466 u32 flash_linear_addr;
4467
4468 u32 flash_bank_size = nvm->flash_bank_size * 2;
4469 s32 ret_val;
4470 s32 count = 0;
4471 s32 j, iteration, sector_size;
4472
4473 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487 switch (hsfsts.hsf_status.berasesz) {
4488 case 0:
4489
4490 sector_size = ICH_FLASH_SEG_SIZE_256;
4491 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4492 break;
4493 case 1:
4494 sector_size = ICH_FLASH_SEG_SIZE_4K;
4495 iteration = 1;
4496 break;
4497 case 2:
4498 sector_size = ICH_FLASH_SEG_SIZE_8K;
4499 iteration = 1;
4500 break;
4501 case 3:
4502 sector_size = ICH_FLASH_SEG_SIZE_64K;
4503 iteration = 1;
4504 break;
4505 default:
4506 return -E1000_ERR_NVM;
4507 }
4508
4509
4510 flash_linear_addr = hw->nvm.flash_base_addr;
4511 flash_linear_addr += (bank) ? flash_bank_size : 0;
4512
4513 for (j = 0; j < iteration; j++) {
4514 do {
4515 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4516
4517
4518 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4519 if (ret_val)
4520 return ret_val;
4521
4522
4523
4524
4525 if (hw->mac.type >= e1000_pch_spt)
4526 hsflctl.regval =
4527 er32flash(ICH_FLASH_HSFSTS) >> 16;
4528 else
4529 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4530
4531 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4532 if (hw->mac.type >= e1000_pch_spt)
4533 ew32flash(ICH_FLASH_HSFSTS,
4534 hsflctl.regval << 16);
4535 else
4536 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4537
4538
4539
4540
4541
4542 flash_linear_addr += (j * sector_size);
4543 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4544
4545 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4546 if (!ret_val)
4547 break;
4548
4549
4550
4551
4552
4553 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4554 if (hsfsts.hsf_status.flcerr)
4555
4556 continue;
4557 else if (!hsfsts.hsf_status.flcdone)
4558 return ret_val;
4559 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4560 }
4561
4562 return 0;
4563}
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4575{
4576 s32 ret_val;
4577
4578 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4579 if (ret_val) {
4580 e_dbg("NVM Read Error\n");
4581 return ret_val;
4582 }
4583
4584 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4585 *data = ID_LED_DEFAULT_ICH8LAN;
4586
4587 return 0;
4588}
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4604{
4605 struct e1000_mac_info *mac = &hw->mac;
4606 s32 ret_val;
4607 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4608 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4609 u16 data, i, temp, shift;
4610
4611
4612 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4613 if (ret_val)
4614 return ret_val;
4615
4616 mac->ledctl_default = er32(LEDCTL);
4617 mac->ledctl_mode1 = mac->ledctl_default;
4618 mac->ledctl_mode2 = mac->ledctl_default;
4619
4620 for (i = 0; i < 4; i++) {
4621 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4622 shift = (i * 5);
4623 switch (temp) {
4624 case ID_LED_ON1_DEF2:
4625 case ID_LED_ON1_ON2:
4626 case ID_LED_ON1_OFF2:
4627 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4628 mac->ledctl_mode1 |= (ledctl_on << shift);
4629 break;
4630 case ID_LED_OFF1_DEF2:
4631 case ID_LED_OFF1_ON2:
4632 case ID_LED_OFF1_OFF2:
4633 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4634 mac->ledctl_mode1 |= (ledctl_off << shift);
4635 break;
4636 default:
4637
4638 break;
4639 }
4640 switch (temp) {
4641 case ID_LED_DEF1_ON2:
4642 case ID_LED_ON1_ON2:
4643 case ID_LED_OFF1_ON2:
4644 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4645 mac->ledctl_mode2 |= (ledctl_on << shift);
4646 break;
4647 case ID_LED_DEF1_OFF2:
4648 case ID_LED_ON1_OFF2:
4649 case ID_LED_OFF1_OFF2:
4650 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4651 mac->ledctl_mode2 |= (ledctl_off << shift);
4652 break;
4653 default:
4654
4655 break;
4656 }
4657 }
4658
4659 return 0;
4660}
4661
4662
4663
4664
4665
4666
4667
4668
4669static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4670{
4671 struct e1000_bus_info *bus = &hw->bus;
4672 s32 ret_val;
4673
4674 ret_val = e1000e_get_bus_info_pcie(hw);
4675
4676
4677
4678
4679
4680
4681 if (bus->width == e1000_bus_width_unknown)
4682 bus->width = e1000_bus_width_pcie_x1;
4683
4684 return ret_val;
4685}
4686
4687
4688
4689
4690
4691
4692
4693
4694static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4695{
4696 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4697 u16 kum_cfg;
4698 u32 ctrl, reg;
4699 s32 ret_val;
4700
4701
4702
4703
4704 ret_val = e1000e_disable_pcie_master(hw);
4705 if (ret_val)
4706 e_dbg("PCI-E Master disable polling has failed.\n");
4707
4708 e_dbg("Masking off all interrupts\n");
4709 ew32(IMC, 0xffffffff);
4710
4711
4712
4713
4714
4715 ew32(RCTL, 0);
4716 ew32(TCTL, E1000_TCTL_PSP);
4717 e1e_flush();
4718
4719 usleep_range(10000, 11000);
4720
4721
4722 if (hw->mac.type == e1000_ich8lan) {
4723
4724 ew32(PBA, E1000_PBA_8K);
4725
4726 ew32(PBS, E1000_PBS_16K);
4727 }
4728
4729 if (hw->mac.type == e1000_pchlan) {
4730
4731 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4732 if (ret_val)
4733 return ret_val;
4734
4735 if (kum_cfg & E1000_NVM_K1_ENABLE)
4736 dev_spec->nvm_k1_enabled = true;
4737 else
4738 dev_spec->nvm_k1_enabled = false;
4739 }
4740
4741 ctrl = er32(CTRL);
4742
4743 if (!hw->phy.ops.check_reset_block(hw)) {
4744
4745
4746
4747
4748 ctrl |= E1000_CTRL_PHY_RST;
4749
4750
4751
4752
4753 if ((hw->mac.type == e1000_pch2lan) &&
4754 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4755 e1000_gate_hw_phy_config_ich8lan(hw, true);
4756 }
4757 ret_val = e1000_acquire_swflag_ich8lan(hw);
4758 e_dbg("Issuing a global reset to ich8lan\n");
4759 ew32(CTRL, (ctrl | E1000_CTRL_RST));
4760
4761 msleep(20);
4762
4763
4764 if (hw->mac.type == e1000_pch2lan) {
4765 reg = er32(FEXTNVM3);
4766 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4767 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4768 ew32(FEXTNVM3, reg);
4769 }
4770
4771 if (!ret_val)
4772 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4773
4774 if (ctrl & E1000_CTRL_PHY_RST) {
4775 ret_val = hw->phy.ops.get_cfg_done(hw);
4776 if (ret_val)
4777 return ret_val;
4778
4779 ret_val = e1000_post_phy_reset_ich8lan(hw);
4780 if (ret_val)
4781 return ret_val;
4782 }
4783
4784
4785
4786
4787
4788 if (hw->mac.type == e1000_pchlan)
4789 ew32(CRC_OFFSET, 0x65656565);
4790
4791 ew32(IMC, 0xffffffff);
4792 er32(ICR);
4793
4794 reg = er32(KABGTXD);
4795 reg |= E1000_KABGTXD_BGSQLBIAS;
4796 ew32(KABGTXD, reg);
4797
4798 return 0;
4799}
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4814{
4815 struct e1000_mac_info *mac = &hw->mac;
4816 u32 ctrl_ext, txdctl, snoop, fflt_dbg;
4817 s32 ret_val;
4818 u16 i;
4819
4820 e1000_initialize_hw_bits_ich8lan(hw);
4821
4822
4823 ret_val = mac->ops.id_led_init(hw);
4824
4825 if (ret_val)
4826 e_dbg("Error initializing identification LED\n");
4827
4828
4829 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4830
4831
4832 e_dbg("Zeroing the MTA\n");
4833 for (i = 0; i < mac->mta_reg_count; i++)
4834 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4835
4836
4837
4838
4839
4840 if (hw->phy.type == e1000_phy_82578) {
4841 e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4842 i &= ~BM_WUC_HOST_WU_BIT;
4843 e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4844 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4845 if (ret_val)
4846 return ret_val;
4847 }
4848
4849
4850 ret_val = mac->ops.setup_link(hw);
4851
4852
4853 txdctl = er32(TXDCTL(0));
4854 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4855 E1000_TXDCTL_FULL_TX_DESC_WB);
4856 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4857 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4858 ew32(TXDCTL(0), txdctl);
4859 txdctl = er32(TXDCTL(1));
4860 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4861 E1000_TXDCTL_FULL_TX_DESC_WB);
4862 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4863 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4864 ew32(TXDCTL(1), txdctl);
4865
4866
4867
4868
4869 if (mac->type == e1000_ich8lan)
4870 snoop = PCIE_ICH8_SNOOP_ALL;
4871 else
4872 snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4873 e1000e_set_pcie_no_snoop(hw, snoop);
4874
4875
4876
4877
4878 if (mac->type >= e1000_pch_tgp) {
4879 fflt_dbg = er32(FFLT_DBG);
4880 fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
4881 ew32(FFLT_DBG, fflt_dbg);
4882 }
4883
4884 ctrl_ext = er32(CTRL_EXT);
4885 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4886 ew32(CTRL_EXT, ctrl_ext);
4887
4888
4889
4890
4891
4892
4893 e1000_clear_hw_cntrs_ich8lan(hw);
4894
4895 return ret_val;
4896}
4897
4898
4899
4900
4901
4902
4903
4904
4905static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4906{
4907 u32 reg;
4908
4909
4910 reg = er32(CTRL_EXT);
4911 reg |= BIT(22);
4912
4913 if (hw->mac.type >= e1000_pchlan)
4914 reg |= E1000_CTRL_EXT_PHYPDEN;
4915 ew32(CTRL_EXT, reg);
4916
4917
4918 reg = er32(TXDCTL(0));
4919 reg |= BIT(22);
4920 ew32(TXDCTL(0), reg);
4921
4922
4923 reg = er32(TXDCTL(1));
4924 reg |= BIT(22);
4925 ew32(TXDCTL(1), reg);
4926
4927
4928 reg = er32(TARC(0));
4929 if (hw->mac.type == e1000_ich8lan)
4930 reg |= BIT(28) | BIT(29);
4931 reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
4932 ew32(TARC(0), reg);
4933
4934
4935 reg = er32(TARC(1));
4936 if (er32(TCTL) & E1000_TCTL_MULR)
4937 reg &= ~BIT(28);
4938 else
4939 reg |= BIT(28);
4940 reg |= BIT(24) | BIT(26) | BIT(30);
4941 ew32(TARC(1), reg);
4942
4943
4944 if (hw->mac.type == e1000_ich8lan) {
4945 reg = er32(STATUS);
4946 reg &= ~BIT(31);
4947 ew32(STATUS, reg);
4948 }
4949
4950
4951
4952
4953 reg = er32(RFCTL);
4954 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4955
4956
4957
4958
4959 if (hw->mac.type == e1000_ich8lan)
4960 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4961 ew32(RFCTL, reg);
4962
4963
4964 if (hw->mac.type >= e1000_pch_lpt) {
4965 reg = er32(PBECCSTS);
4966 reg |= E1000_PBECCSTS_ECC_ENABLE;
4967 ew32(PBECCSTS, reg);
4968
4969 reg = er32(CTRL);
4970 reg |= E1000_CTRL_MEHE;
4971 ew32(CTRL, reg);
4972 }
4973}
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4986{
4987 s32 ret_val;
4988
4989 if (hw->phy.ops.check_reset_block(hw))
4990 return 0;
4991
4992
4993
4994
4995
4996 if (hw->fc.requested_mode == e1000_fc_default) {
4997
4998 if (hw->mac.type == e1000_pchlan)
4999 hw->fc.requested_mode = e1000_fc_rx_pause;
5000 else
5001 hw->fc.requested_mode = e1000_fc_full;
5002 }
5003
5004
5005
5006
5007 hw->fc.current_mode = hw->fc.requested_mode;
5008
5009 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
5010
5011
5012 ret_val = hw->mac.ops.setup_physical_interface(hw);
5013 if (ret_val)
5014 return ret_val;
5015
5016 ew32(FCTTV, hw->fc.pause_time);
5017 if ((hw->phy.type == e1000_phy_82578) ||
5018 (hw->phy.type == e1000_phy_82579) ||
5019 (hw->phy.type == e1000_phy_i217) ||
5020 (hw->phy.type == e1000_phy_82577)) {
5021 ew32(FCRTV_PCH, hw->fc.refresh_time);
5022
5023 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
5024 hw->fc.pause_time);
5025 if (ret_val)
5026 return ret_val;
5027 }
5028
5029 return e1000e_set_fc_watermarks(hw);
5030}
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5041{
5042 u32 ctrl;
5043 s32 ret_val;
5044 u16 reg_data;
5045
5046 ctrl = er32(CTRL);
5047 ctrl |= E1000_CTRL_SLU;
5048 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5049 ew32(CTRL, ctrl);
5050
5051
5052
5053
5054
5055 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
5056 if (ret_val)
5057 return ret_val;
5058 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5059 ®_data);
5060 if (ret_val)
5061 return ret_val;
5062 reg_data |= 0x3F;
5063 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5064 reg_data);
5065 if (ret_val)
5066 return ret_val;
5067
5068 switch (hw->phy.type) {
5069 case e1000_phy_igp_3:
5070 ret_val = e1000e_copper_link_setup_igp(hw);
5071 if (ret_val)
5072 return ret_val;
5073 break;
5074 case e1000_phy_bm:
5075 case e1000_phy_82578:
5076 ret_val = e1000e_copper_link_setup_m88(hw);
5077 if (ret_val)
5078 return ret_val;
5079 break;
5080 case e1000_phy_82577:
5081 case e1000_phy_82579:
5082 ret_val = e1000_copper_link_setup_82577(hw);
5083 if (ret_val)
5084 return ret_val;
5085 break;
5086 case e1000_phy_ife:
5087 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data);
5088 if (ret_val)
5089 return ret_val;
5090
5091 reg_data &= ~IFE_PMC_AUTO_MDIX;
5092
5093 switch (hw->phy.mdix) {
5094 case 1:
5095 reg_data &= ~IFE_PMC_FORCE_MDIX;
5096 break;
5097 case 2:
5098 reg_data |= IFE_PMC_FORCE_MDIX;
5099 break;
5100 case 0:
5101 default:
5102 reg_data |= IFE_PMC_AUTO_MDIX;
5103 break;
5104 }
5105 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
5106 if (ret_val)
5107 return ret_val;
5108 break;
5109 default:
5110 break;
5111 }
5112
5113 return e1000e_setup_copper_link(hw);
5114}
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5125{
5126 u32 ctrl;
5127 s32 ret_val;
5128
5129 ctrl = er32(CTRL);
5130 ctrl |= E1000_CTRL_SLU;
5131 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5132 ew32(CTRL, ctrl);
5133
5134 ret_val = e1000_copper_link_setup_82577(hw);
5135 if (ret_val)
5136 return ret_val;
5137
5138 return e1000e_setup_copper_link(hw);
5139}
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5152 u16 *duplex)
5153{
5154 s32 ret_val;
5155
5156 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
5157 if (ret_val)
5158 return ret_val;
5159
5160 if ((hw->mac.type == e1000_ich8lan) &&
5161 (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
5162 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5163 }
5164
5165 return ret_val;
5166}
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5184{
5185 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5186 u32 phy_ctrl;
5187 s32 ret_val;
5188 u16 i, data;
5189 bool link;
5190
5191 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5192 return 0;
5193
5194
5195
5196
5197
5198 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5199 if (!link)
5200 return 0;
5201
5202 for (i = 0; i < 10; i++) {
5203
5204 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5205 if (ret_val)
5206 return ret_val;
5207
5208 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5209 if (ret_val)
5210 return ret_val;
5211
5212
5213 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5214 return 0;
5215
5216
5217 e1000_phy_hw_reset(hw);
5218 mdelay(5);
5219 }
5220
5221 phy_ctrl = er32(PHY_CTRL);
5222 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5223 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5224 ew32(PHY_CTRL, phy_ctrl);
5225
5226
5227
5228
5229 e1000e_gig_downshift_workaround_ich8lan(hw);
5230
5231
5232 return -E1000_ERR_PHY;
5233}
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5244 bool state)
5245{
5246 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5247
5248 if (hw->mac.type != e1000_ich8lan) {
5249 e_dbg("Workaround applies to ICH8 only.\n");
5250 return;
5251 }
5252
5253 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5254}
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5267{
5268 u32 reg;
5269 u16 data;
5270 u8 retry = 0;
5271
5272 if (hw->phy.type != e1000_phy_igp_3)
5273 return;
5274
5275
5276 do {
5277
5278 reg = er32(PHY_CTRL);
5279 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5280 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5281 ew32(PHY_CTRL, reg);
5282
5283
5284
5285
5286 if (hw->mac.type == e1000_ich8lan)
5287 e1000e_gig_downshift_workaround_ich8lan(hw);
5288
5289
5290 e1e_rphy(hw, IGP3_VR_CTRL, &data);
5291 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5292 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5293
5294
5295 e1e_rphy(hw, IGP3_VR_CTRL, &data);
5296 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5297 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5298 break;
5299
5300
5301 reg = er32(CTRL);
5302 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5303 retry++;
5304 } while (retry);
5305}
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5318{
5319 s32 ret_val;
5320 u16 reg_data;
5321
5322 if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5323 return;
5324
5325 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5326 ®_data);
5327 if (ret_val)
5328 return;
5329 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5330 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5331 reg_data);
5332 if (ret_val)
5333 return;
5334 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5335 e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5336}
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5353{
5354 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5355 u32 phy_ctrl;
5356 s32 ret_val;
5357
5358 phy_ctrl = er32(PHY_CTRL);
5359 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5360
5361 if (hw->phy.type == e1000_phy_i217) {
5362 u16 phy_reg, device_id = hw->adapter->pdev->device;
5363
5364 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5365 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5366 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5367 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5368 (hw->mac.type >= e1000_pch_spt)) {
5369 u32 fextnvm6 = er32(FEXTNVM6);
5370
5371 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5372 }
5373
5374 ret_val = hw->phy.ops.acquire(hw);
5375 if (ret_val)
5376 goto out;
5377
5378 if (!dev_spec->eee_disable) {
5379 u16 eee_advert;
5380
5381 ret_val =
5382 e1000_read_emi_reg_locked(hw,
5383 I217_EEE_ADVERTISEMENT,
5384 &eee_advert);
5385 if (ret_val)
5386 goto release;
5387
5388
5389
5390
5391
5392
5393 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5394 (dev_spec->eee_lp_ability &
5395 I82579_EEE_100_SUPPORTED) &&
5396 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5397 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5398 E1000_PHY_CTRL_NOND0A_LPLU);
5399
5400
5401 e1e_rphy_locked(hw,
5402 I217_LPI_GPIO_CTRL, &phy_reg);
5403 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5404 e1e_wphy_locked(hw,
5405 I217_LPI_GPIO_CTRL, phy_reg);
5406 }
5407 }
5408
5409
5410
5411
5412
5413
5414
5415
5416 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5417
5418 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5419 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5420 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5421
5422
5423
5424
5425 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5426 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5427 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5428
5429
5430 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5431 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5432 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5433 }
5434
5435
5436
5437
5438 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5439 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5440 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5441
5442release:
5443 hw->phy.ops.release(hw);
5444 }
5445out:
5446 ew32(PHY_CTRL, phy_ctrl);
5447
5448 if (hw->mac.type == e1000_ich8lan)
5449 e1000e_gig_downshift_workaround_ich8lan(hw);
5450
5451 if (hw->mac.type >= e1000_pchlan) {
5452 e1000_oem_bits_config_ich8lan(hw, false);
5453
5454
5455 if (hw->mac.type == e1000_pchlan)
5456 e1000e_phy_hw_reset_generic(hw);
5457
5458 ret_val = hw->phy.ops.acquire(hw);
5459 if (ret_val)
5460 return;
5461 e1000_write_smbus_addr(hw);
5462 hw->phy.ops.release(hw);
5463 }
5464}
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5477{
5478 s32 ret_val;
5479
5480 if (hw->mac.type < e1000_pch2lan)
5481 return;
5482
5483 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5484 if (ret_val) {
5485 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5486 return;
5487 }
5488
5489
5490
5491
5492
5493
5494 if (hw->phy.type == e1000_phy_i217) {
5495 u16 phy_reg;
5496
5497 ret_val = hw->phy.ops.acquire(hw);
5498 if (ret_val) {
5499 e_dbg("Failed to setup iRST\n");
5500 return;
5501 }
5502
5503
5504 e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5505 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5506 e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5507
5508 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5509
5510
5511
5512 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5513 if (ret_val)
5514 goto release;
5515 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5516 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5517
5518
5519 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5520 }
5521
5522 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5523 if (ret_val)
5524 goto release;
5525 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5526 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5527release:
5528 if (ret_val)
5529 e_dbg("Error %d in resume workarounds\n", ret_val);
5530 hw->phy.ops.release(hw);
5531 }
5532}
5533
5534
5535
5536
5537
5538
5539
5540static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5541{
5542 if (hw->phy.type == e1000_phy_ife)
5543 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5544
5545 ew32(LEDCTL, hw->mac.ledctl_default);
5546 return 0;
5547}
5548
5549
5550
5551
5552
5553
5554
5555static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5556{
5557 if (hw->phy.type == e1000_phy_ife)
5558 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5559 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5560
5561 ew32(LEDCTL, hw->mac.ledctl_mode2);
5562 return 0;
5563}
5564
5565
5566
5567
5568
5569
5570
5571static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5572{
5573 if (hw->phy.type == e1000_phy_ife)
5574 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5575 (IFE_PSCL_PROBE_MODE |
5576 IFE_PSCL_PROBE_LEDS_OFF));
5577
5578 ew32(LEDCTL, hw->mac.ledctl_mode1);
5579 return 0;
5580}
5581
5582
5583
5584
5585
5586
5587
5588static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5589{
5590 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5591}
5592
5593
5594
5595
5596
5597
5598
5599static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5600{
5601 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5602}
5603
5604
5605
5606
5607
5608
5609
5610static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5611{
5612 u16 data = (u16)hw->mac.ledctl_mode2;
5613 u32 i, led;
5614
5615
5616
5617
5618 if (!(er32(STATUS) & E1000_STATUS_LU)) {
5619 for (i = 0; i < 3; i++) {
5620 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5621 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5622 E1000_LEDCTL_MODE_LINK_UP)
5623 continue;
5624 if (led & E1000_PHY_LED0_IVRT)
5625 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5626 else
5627 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5628 }
5629 }
5630
5631 return e1e_wphy(hw, HV_LED_CONFIG, data);
5632}
5633
5634
5635
5636
5637
5638
5639
5640static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5641{
5642 u16 data = (u16)hw->mac.ledctl_mode1;
5643 u32 i, led;
5644
5645
5646
5647
5648 if (!(er32(STATUS) & E1000_STATUS_LU)) {
5649 for (i = 0; i < 3; i++) {
5650 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5651 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5652 E1000_LEDCTL_MODE_LINK_UP)
5653 continue;
5654 if (led & E1000_PHY_LED0_IVRT)
5655 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5656 else
5657 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5658 }
5659 }
5660
5661 return e1e_wphy(hw, HV_LED_CONFIG, data);
5662}
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5677{
5678 s32 ret_val = 0;
5679 u32 bank = 0;
5680 u32 status;
5681
5682 e1000e_get_cfg_done_generic(hw);
5683
5684
5685 if (hw->mac.type >= e1000_ich10lan) {
5686 e1000_lan_init_done_ich8lan(hw);
5687 } else {
5688 ret_val = e1000e_get_auto_rd_done(hw);
5689 if (ret_val) {
5690
5691
5692
5693
5694 e_dbg("Auto Read Done did not complete\n");
5695 ret_val = 0;
5696 }
5697 }
5698
5699
5700 status = er32(STATUS);
5701 if (status & E1000_STATUS_PHYRA)
5702 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5703 else
5704 e_dbg("PHY Reset Asserted not set - needs delay\n");
5705
5706
5707 if (hw->mac.type <= e1000_ich9lan) {
5708 if (!(er32(EECD) & E1000_EECD_PRES) &&
5709 (hw->phy.type == e1000_phy_igp_3)) {
5710 e1000e_phy_init_script_igp3(hw);
5711 }
5712 } else {
5713 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5714
5715 e_dbg("EEPROM not present\n");
5716 ret_val = -E1000_ERR_CONFIG;
5717 }
5718 }
5719
5720 return ret_val;
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5731{
5732
5733 if (!(hw->mac.ops.check_mng_mode(hw) ||
5734 hw->phy.ops.check_reset_block(hw)))
5735 e1000_power_down_phy_copper(hw);
5736}
5737
5738
5739
5740
5741
5742
5743
5744
5745static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5746{
5747 u16 phy_data;
5748 s32 ret_val;
5749
5750 e1000e_clear_hw_cntrs_base(hw);
5751
5752 er32(ALGNERRC);
5753 er32(RXERRC);
5754 er32(TNCRS);
5755 er32(CEXTERR);
5756 er32(TSCTC);
5757 er32(TSCTFC);
5758
5759 er32(MGTPRC);
5760 er32(MGTPDC);
5761 er32(MGTPTC);
5762
5763 er32(IAC);
5764 er32(ICRXOC);
5765
5766
5767 if ((hw->phy.type == e1000_phy_82578) ||
5768 (hw->phy.type == e1000_phy_82579) ||
5769 (hw->phy.type == e1000_phy_i217) ||
5770 (hw->phy.type == e1000_phy_82577)) {
5771 ret_val = hw->phy.ops.acquire(hw);
5772 if (ret_val)
5773 return;
5774 ret_val = hw->phy.ops.set_page(hw,
5775 HV_STATS_PAGE << IGP_PAGE_SHIFT);
5776 if (ret_val)
5777 goto release;
5778 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5779 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5780 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5781 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5782 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5783 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5784 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5785 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5786 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5787 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5788 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5789 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5790 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5791 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5792release:
5793 hw->phy.ops.release(hw);
5794 }
5795}
5796
5797static const struct e1000_mac_operations ich8_mac_ops = {
5798
5799 .check_for_link = e1000_check_for_copper_link_ich8lan,
5800
5801 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
5802 .get_bus_info = e1000_get_bus_info_ich8lan,
5803 .set_lan_id = e1000_set_lan_id_single_port,
5804 .get_link_up_info = e1000_get_link_up_info_ich8lan,
5805
5806
5807 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
5808 .reset_hw = e1000_reset_hw_ich8lan,
5809 .init_hw = e1000_init_hw_ich8lan,
5810 .setup_link = e1000_setup_link_ich8lan,
5811 .setup_physical_interface = e1000_setup_copper_link_ich8lan,
5812
5813 .config_collision_dist = e1000e_config_collision_dist_generic,
5814 .rar_set = e1000e_rar_set_generic,
5815 .rar_get_count = e1000e_rar_get_count_generic,
5816};
5817
5818static const struct e1000_phy_operations ich8_phy_ops = {
5819 .acquire = e1000_acquire_swflag_ich8lan,
5820 .check_reset_block = e1000_check_reset_block_ich8lan,
5821 .commit = NULL,
5822 .get_cfg_done = e1000_get_cfg_done_ich8lan,
5823 .get_cable_length = e1000e_get_cable_length_igp_2,
5824 .read_reg = e1000e_read_phy_reg_igp,
5825 .release = e1000_release_swflag_ich8lan,
5826 .reset = e1000_phy_hw_reset_ich8lan,
5827 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
5828 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
5829 .write_reg = e1000e_write_phy_reg_igp,
5830};
5831
5832static const struct e1000_nvm_operations ich8_nvm_ops = {
5833 .acquire = e1000_acquire_nvm_ich8lan,
5834 .read = e1000_read_nvm_ich8lan,
5835 .release = e1000_release_nvm_ich8lan,
5836 .reload = e1000e_reload_nvm_generic,
5837 .update = e1000_update_nvm_checksum_ich8lan,
5838 .valid_led_default = e1000_valid_led_default_ich8lan,
5839 .validate = e1000_validate_nvm_checksum_ich8lan,
5840 .write = e1000_write_nvm_ich8lan,
5841};
5842
5843static const struct e1000_nvm_operations spt_nvm_ops = {
5844 .acquire = e1000_acquire_nvm_ich8lan,
5845 .release = e1000_release_nvm_ich8lan,
5846 .read = e1000_read_nvm_spt,
5847 .update = e1000_update_nvm_checksum_spt,
5848 .reload = e1000e_reload_nvm_generic,
5849 .valid_led_default = e1000_valid_led_default_ich8lan,
5850 .validate = e1000_validate_nvm_checksum_ich8lan,
5851 .write = e1000_write_nvm_ich8lan,
5852};
5853
5854const struct e1000_info e1000_ich8_info = {
5855 .mac = e1000_ich8lan,
5856 .flags = FLAG_HAS_WOL
5857 | FLAG_IS_ICH
5858 | FLAG_HAS_CTRLEXT_ON_LOAD
5859 | FLAG_HAS_AMT
5860 | FLAG_HAS_FLASH
5861 | FLAG_APME_IN_WUC,
5862 .pba = 8,
5863 .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5864 .get_variants = e1000_get_variants_ich8lan,
5865 .mac_ops = &ich8_mac_ops,
5866 .phy_ops = &ich8_phy_ops,
5867 .nvm_ops = &ich8_nvm_ops,
5868};
5869
5870const struct e1000_info e1000_ich9_info = {
5871 .mac = e1000_ich9lan,
5872 .flags = FLAG_HAS_JUMBO_FRAMES
5873 | FLAG_IS_ICH
5874 | FLAG_HAS_WOL
5875 | FLAG_HAS_CTRLEXT_ON_LOAD
5876 | FLAG_HAS_AMT
5877 | FLAG_HAS_FLASH
5878 | FLAG_APME_IN_WUC,
5879 .pba = 18,
5880 .max_hw_frame_size = DEFAULT_JUMBO,
5881 .get_variants = e1000_get_variants_ich8lan,
5882 .mac_ops = &ich8_mac_ops,
5883 .phy_ops = &ich8_phy_ops,
5884 .nvm_ops = &ich8_nvm_ops,
5885};
5886
5887const struct e1000_info e1000_ich10_info = {
5888 .mac = e1000_ich10lan,
5889 .flags = FLAG_HAS_JUMBO_FRAMES
5890 | FLAG_IS_ICH
5891 | FLAG_HAS_WOL
5892 | FLAG_HAS_CTRLEXT_ON_LOAD
5893 | FLAG_HAS_AMT
5894 | FLAG_HAS_FLASH
5895 | FLAG_APME_IN_WUC,
5896 .pba = 18,
5897 .max_hw_frame_size = DEFAULT_JUMBO,
5898 .get_variants = e1000_get_variants_ich8lan,
5899 .mac_ops = &ich8_mac_ops,
5900 .phy_ops = &ich8_phy_ops,
5901 .nvm_ops = &ich8_nvm_ops,
5902};
5903
5904const struct e1000_info e1000_pch_info = {
5905 .mac = e1000_pchlan,
5906 .flags = FLAG_IS_ICH
5907 | FLAG_HAS_WOL
5908 | FLAG_HAS_CTRLEXT_ON_LOAD
5909 | FLAG_HAS_AMT
5910 | FLAG_HAS_FLASH
5911 | FLAG_HAS_JUMBO_FRAMES
5912 | FLAG_DISABLE_FC_PAUSE_TIME
5913 | FLAG_APME_IN_WUC,
5914 .flags2 = FLAG2_HAS_PHY_STATS,
5915 .pba = 26,
5916 .max_hw_frame_size = 4096,
5917 .get_variants = e1000_get_variants_ich8lan,
5918 .mac_ops = &ich8_mac_ops,
5919 .phy_ops = &ich8_phy_ops,
5920 .nvm_ops = &ich8_nvm_ops,
5921};
5922
5923const struct e1000_info e1000_pch2_info = {
5924 .mac = e1000_pch2lan,
5925 .flags = FLAG_IS_ICH
5926 | FLAG_HAS_WOL
5927 | FLAG_HAS_HW_TIMESTAMP
5928 | FLAG_HAS_CTRLEXT_ON_LOAD
5929 | FLAG_HAS_AMT
5930 | FLAG_HAS_FLASH
5931 | FLAG_HAS_JUMBO_FRAMES
5932 | FLAG_APME_IN_WUC,
5933 .flags2 = FLAG2_HAS_PHY_STATS
5934 | FLAG2_HAS_EEE
5935 | FLAG2_CHECK_SYSTIM_OVERFLOW,
5936 .pba = 26,
5937 .max_hw_frame_size = 9022,
5938 .get_variants = e1000_get_variants_ich8lan,
5939 .mac_ops = &ich8_mac_ops,
5940 .phy_ops = &ich8_phy_ops,
5941 .nvm_ops = &ich8_nvm_ops,
5942};
5943
5944const struct e1000_info e1000_pch_lpt_info = {
5945 .mac = e1000_pch_lpt,
5946 .flags = FLAG_IS_ICH
5947 | FLAG_HAS_WOL
5948 | FLAG_HAS_HW_TIMESTAMP
5949 | FLAG_HAS_CTRLEXT_ON_LOAD
5950 | FLAG_HAS_AMT
5951 | FLAG_HAS_FLASH
5952 | FLAG_HAS_JUMBO_FRAMES
5953 | FLAG_APME_IN_WUC,
5954 .flags2 = FLAG2_HAS_PHY_STATS
5955 | FLAG2_HAS_EEE
5956 | FLAG2_CHECK_SYSTIM_OVERFLOW,
5957 .pba = 26,
5958 .max_hw_frame_size = 9022,
5959 .get_variants = e1000_get_variants_ich8lan,
5960 .mac_ops = &ich8_mac_ops,
5961 .phy_ops = &ich8_phy_ops,
5962 .nvm_ops = &ich8_nvm_ops,
5963};
5964
5965const struct e1000_info e1000_pch_spt_info = {
5966 .mac = e1000_pch_spt,
5967 .flags = FLAG_IS_ICH
5968 | FLAG_HAS_WOL
5969 | FLAG_HAS_HW_TIMESTAMP
5970 | FLAG_HAS_CTRLEXT_ON_LOAD
5971 | FLAG_HAS_AMT
5972 | FLAG_HAS_FLASH
5973 | FLAG_HAS_JUMBO_FRAMES
5974 | FLAG_APME_IN_WUC,
5975 .flags2 = FLAG2_HAS_PHY_STATS
5976 | FLAG2_HAS_EEE,
5977 .pba = 26,
5978 .max_hw_frame_size = 9022,
5979 .get_variants = e1000_get_variants_ich8lan,
5980 .mac_ops = &ich8_mac_ops,
5981 .phy_ops = &ich8_phy_ops,
5982 .nvm_ops = &spt_nvm_ops,
5983};
5984
5985const struct e1000_info e1000_pch_cnp_info = {
5986 .mac = e1000_pch_cnp,
5987 .flags = FLAG_IS_ICH
5988 | FLAG_HAS_WOL
5989 | FLAG_HAS_HW_TIMESTAMP
5990 | FLAG_HAS_CTRLEXT_ON_LOAD
5991 | FLAG_HAS_AMT
5992 | FLAG_HAS_FLASH
5993 | FLAG_HAS_JUMBO_FRAMES
5994 | FLAG_APME_IN_WUC,
5995 .flags2 = FLAG2_HAS_PHY_STATS
5996 | FLAG2_HAS_EEE,
5997 .pba = 26,
5998 .max_hw_frame_size = 9022,
5999 .get_variants = e1000_get_variants_ich8lan,
6000 .mac_ops = &ich8_mac_ops,
6001 .phy_ops = &ich8_phy_ops,
6002 .nvm_ops = &spt_nvm_ops,
6003};
6004
6005const struct e1000_info e1000_pch_tgp_info = {
6006 .mac = e1000_pch_tgp,
6007 .flags = FLAG_IS_ICH
6008 | FLAG_HAS_WOL
6009 | FLAG_HAS_HW_TIMESTAMP
6010 | FLAG_HAS_CTRLEXT_ON_LOAD
6011 | FLAG_HAS_AMT
6012 | FLAG_HAS_FLASH
6013 | FLAG_HAS_JUMBO_FRAMES
6014 | FLAG_APME_IN_WUC,
6015 .flags2 = FLAG2_HAS_PHY_STATS
6016 | FLAG2_HAS_EEE,
6017 .pba = 26,
6018 .max_hw_frame_size = 9022,
6019 .get_variants = e1000_get_variants_ich8lan,
6020 .mac_ops = &ich8_mac_ops,
6021 .phy_ops = &ich8_phy_ops,
6022 .nvm_ops = &spt_nvm_ops,
6023};
6024