1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/if_ether.h>
29#include <linux/delay.h>
30
31#include "e1000_mac.h"
32#include "e1000_nvm.h"
33
34
35
36
37
38
39
40
41static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
42{
43 *eecd = *eecd | E1000_EECD_SK;
44 wr32(E1000_EECD, *eecd);
45 wrfl();
46 udelay(hw->nvm.delay_usec);
47}
48
49
50
51
52
53
54
55
56static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
57{
58 *eecd = *eecd & ~E1000_EECD_SK;
59 wr32(E1000_EECD, *eecd);
60 wrfl();
61 udelay(hw->nvm.delay_usec);
62}
63
64
65
66
67
68
69
70
71
72
73
74static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
75{
76 struct e1000_nvm_info *nvm = &hw->nvm;
77 u32 eecd = rd32(E1000_EECD);
78 u32 mask;
79
80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_spi)
82 eecd |= E1000_EECD_DO;
83
84 do {
85 eecd &= ~E1000_EECD_DI;
86
87 if (data & mask)
88 eecd |= E1000_EECD_DI;
89
90 wr32(E1000_EECD, eecd);
91 wrfl();
92
93 udelay(nvm->delay_usec);
94
95 igb_raise_eec_clk(hw, &eecd);
96 igb_lower_eec_clk(hw, &eecd);
97
98 mask >>= 1;
99 } while (mask);
100
101 eecd &= ~E1000_EECD_DI;
102 wr32(E1000_EECD, eecd);
103}
104
105
106
107
108
109
110
111
112
113
114
115
116static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
117{
118 u32 eecd;
119 u32 i;
120 u16 data;
121
122 eecd = rd32(E1000_EECD);
123
124 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
125 data = 0;
126
127 for (i = 0; i < count; i++) {
128 data <<= 1;
129 igb_raise_eec_clk(hw, &eecd);
130
131 eecd = rd32(E1000_EECD);
132
133 eecd &= ~E1000_EECD_DI;
134 if (eecd & E1000_EECD_DO)
135 data |= 1;
136
137 igb_lower_eec_clk(hw, &eecd);
138 }
139
140 return data;
141}
142
143
144
145
146
147
148
149
150
151static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
152{
153 u32 attempts = 100000;
154 u32 i, reg = 0;
155 s32 ret_val = -E1000_ERR_NVM;
156
157 for (i = 0; i < attempts; i++) {
158 if (ee_reg == E1000_NVM_POLL_READ)
159 reg = rd32(E1000_EERD);
160 else
161 reg = rd32(E1000_EEWR);
162
163 if (reg & E1000_NVM_RW_REG_DONE) {
164 ret_val = 0;
165 break;
166 }
167
168 udelay(5);
169 }
170
171 return ret_val;
172}
173
174
175
176
177
178
179
180
181
182s32 igb_acquire_nvm(struct e1000_hw *hw)
183{
184 u32 eecd = rd32(E1000_EECD);
185 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
186 s32 ret_val = 0;
187
188
189 wr32(E1000_EECD, eecd | E1000_EECD_REQ);
190 eecd = rd32(E1000_EECD);
191
192 while (timeout) {
193 if (eecd & E1000_EECD_GNT)
194 break;
195 udelay(5);
196 eecd = rd32(E1000_EECD);
197 timeout--;
198 }
199
200 if (!timeout) {
201 eecd &= ~E1000_EECD_REQ;
202 wr32(E1000_EECD, eecd);
203 hw_dbg("Could not acquire NVM grant\n");
204 ret_val = -E1000_ERR_NVM;
205 }
206
207 return ret_val;
208}
209
210
211
212
213
214
215
216static void igb_standby_nvm(struct e1000_hw *hw)
217{
218 struct e1000_nvm_info *nvm = &hw->nvm;
219 u32 eecd = rd32(E1000_EECD);
220
221 if (nvm->type == e1000_nvm_eeprom_spi) {
222
223 eecd |= E1000_EECD_CS;
224 wr32(E1000_EECD, eecd);
225 wrfl();
226 udelay(nvm->delay_usec);
227 eecd &= ~E1000_EECD_CS;
228 wr32(E1000_EECD, eecd);
229 wrfl();
230 udelay(nvm->delay_usec);
231 }
232}
233
234
235
236
237
238
239
240static void e1000_stop_nvm(struct e1000_hw *hw)
241{
242 u32 eecd;
243
244 eecd = rd32(E1000_EECD);
245 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
246
247 eecd |= E1000_EECD_CS;
248 igb_lower_eec_clk(hw, &eecd);
249 }
250}
251
252
253
254
255
256
257
258void igb_release_nvm(struct e1000_hw *hw)
259{
260 u32 eecd;
261
262 e1000_stop_nvm(hw);
263
264 eecd = rd32(E1000_EECD);
265 eecd &= ~E1000_EECD_REQ;
266 wr32(E1000_EECD, eecd);
267}
268
269
270
271
272
273
274
275static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
276{
277 struct e1000_nvm_info *nvm = &hw->nvm;
278 u32 eecd = rd32(E1000_EECD);
279 s32 ret_val = 0;
280 u16 timeout = 0;
281 u8 spi_stat_reg;
282
283
284 if (nvm->type == e1000_nvm_eeprom_spi) {
285
286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
287 wr32(E1000_EECD, eecd);
288 wrfl();
289 udelay(1);
290 timeout = NVM_MAX_RETRY_SPI;
291
292
293
294
295
296
297 while (timeout) {
298 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
299 hw->nvm.opcode_bits);
300 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
301 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
302 break;
303
304 udelay(5);
305 igb_standby_nvm(hw);
306 timeout--;
307 }
308
309 if (!timeout) {
310 hw_dbg("SPI NVM Status error\n");
311 ret_val = -E1000_ERR_NVM;
312 goto out;
313 }
314 }
315
316out:
317 return ret_val;
318}
319
320
321
322
323
324
325
326
327
328
329s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
330{
331 struct e1000_nvm_info *nvm = &hw->nvm;
332 u32 i = 0;
333 s32 ret_val;
334 u16 word_in;
335 u8 read_opcode = NVM_READ_OPCODE_SPI;
336
337
338
339
340 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
341 (words == 0)) {
342 hw_dbg("nvm parameter(s) out of bounds\n");
343 ret_val = -E1000_ERR_NVM;
344 goto out;
345 }
346
347 ret_val = nvm->ops.acquire(hw);
348 if (ret_val)
349 goto out;
350
351 ret_val = igb_ready_nvm_eeprom(hw);
352 if (ret_val)
353 goto release;
354
355 igb_standby_nvm(hw);
356
357 if ((nvm->address_bits == 8) && (offset >= 128))
358 read_opcode |= NVM_A8_OPCODE_SPI;
359
360
361 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
362 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
363
364
365
366
367
368 for (i = 0; i < words; i++) {
369 word_in = igb_shift_in_eec_bits(hw, 16);
370 data[i] = (word_in >> 8) | (word_in << 8);
371 }
372
373release:
374 nvm->ops.release(hw);
375
376out:
377 return ret_val;
378}
379
380
381
382
383
384
385
386
387
388
389s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
390{
391 struct e1000_nvm_info *nvm = &hw->nvm;
392 u32 i, eerd = 0;
393 s32 ret_val = 0;
394
395
396
397
398 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
399 (words == 0)) {
400 hw_dbg("nvm parameter(s) out of bounds\n");
401 ret_val = -E1000_ERR_NVM;
402 goto out;
403 }
404
405 for (i = 0; i < words; i++) {
406 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
407 E1000_NVM_RW_REG_START;
408
409 wr32(E1000_EERD, eerd);
410 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
411 if (ret_val)
412 break;
413
414 data[i] = (rd32(E1000_EERD) >>
415 E1000_NVM_RW_REG_DATA);
416 }
417
418out:
419 return ret_val;
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
435{
436 struct e1000_nvm_info *nvm = &hw->nvm;
437 s32 ret_val = -E1000_ERR_NVM;
438 u16 widx = 0;
439
440
441
442
443 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
444 (words == 0)) {
445 hw_dbg("nvm parameter(s) out of bounds\n");
446 return ret_val;
447 }
448
449 while (widx < words) {
450 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
451
452 ret_val = nvm->ops.acquire(hw);
453 if (ret_val)
454 return ret_val;
455
456 ret_val = igb_ready_nvm_eeprom(hw);
457 if (ret_val) {
458 nvm->ops.release(hw);
459 return ret_val;
460 }
461
462 igb_standby_nvm(hw);
463
464
465 igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
466 nvm->opcode_bits);
467
468 igb_standby_nvm(hw);
469
470
471
472
473 if ((nvm->address_bits == 8) && (offset >= 128))
474 write_opcode |= NVM_A8_OPCODE_SPI;
475
476
477 igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
478 igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
479 nvm->address_bits);
480
481
482 while (widx < words) {
483 u16 word_out = data[widx];
484 word_out = (word_out >> 8) | (word_out << 8);
485 igb_shift_out_eec_bits(hw, word_out, 16);
486 widx++;
487
488 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
489 igb_standby_nvm(hw);
490 break;
491 }
492 }
493 usleep_range(1000, 2000);
494 nvm->ops.release(hw);
495 }
496
497 return ret_val;
498}
499
500
501
502
503
504
505
506
507
508
509s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
510{
511 s32 ret_val;
512 u16 nvm_data;
513 u16 pointer;
514 u16 offset;
515 u16 length;
516
517 if (part_num == NULL) {
518 hw_dbg("PBA string buffer was null\n");
519 ret_val = E1000_ERR_INVALID_ARGUMENT;
520 goto out;
521 }
522
523 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
524 if (ret_val) {
525 hw_dbg("NVM Read Error\n");
526 goto out;
527 }
528
529 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
530 if (ret_val) {
531 hw_dbg("NVM Read Error\n");
532 goto out;
533 }
534
535
536
537
538
539 if (nvm_data != NVM_PBA_PTR_GUARD) {
540 hw_dbg("NVM PBA number is not stored as string\n");
541
542
543 if (part_num_size < 11) {
544 hw_dbg("PBA string buffer too small\n");
545 return E1000_ERR_NO_SPACE;
546 }
547
548
549 part_num[0] = (nvm_data >> 12) & 0xF;
550 part_num[1] = (nvm_data >> 8) & 0xF;
551 part_num[2] = (nvm_data >> 4) & 0xF;
552 part_num[3] = nvm_data & 0xF;
553 part_num[4] = (pointer >> 12) & 0xF;
554 part_num[5] = (pointer >> 8) & 0xF;
555 part_num[6] = '-';
556 part_num[7] = 0;
557 part_num[8] = (pointer >> 4) & 0xF;
558 part_num[9] = pointer & 0xF;
559
560
561 part_num[10] = '\0';
562
563
564 for (offset = 0; offset < 10; offset++) {
565 if (part_num[offset] < 0xA)
566 part_num[offset] += '0';
567 else if (part_num[offset] < 0x10)
568 part_num[offset] += 'A' - 0xA;
569 }
570
571 goto out;
572 }
573
574 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
575 if (ret_val) {
576 hw_dbg("NVM Read Error\n");
577 goto out;
578 }
579
580 if (length == 0xFFFF || length == 0) {
581 hw_dbg("NVM PBA number section invalid length\n");
582 ret_val = E1000_ERR_NVM_PBA_SECTION;
583 goto out;
584 }
585
586 if (part_num_size < (((u32)length * 2) - 1)) {
587 hw_dbg("PBA string buffer too small\n");
588 ret_val = E1000_ERR_NO_SPACE;
589 goto out;
590 }
591
592
593 pointer++;
594 length--;
595
596 for (offset = 0; offset < length; offset++) {
597 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
598 if (ret_val) {
599 hw_dbg("NVM Read Error\n");
600 goto out;
601 }
602 part_num[offset * 2] = (u8)(nvm_data >> 8);
603 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
604 }
605 part_num[offset * 2] = '\0';
606
607out:
608 return ret_val;
609}
610
611
612
613
614
615
616
617
618
619s32 igb_read_mac_addr(struct e1000_hw *hw)
620{
621 u32 rar_high;
622 u32 rar_low;
623 u16 i;
624
625 rar_high = rd32(E1000_RAH(0));
626 rar_low = rd32(E1000_RAL(0));
627
628 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
629 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
630
631 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
632 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
633
634 for (i = 0; i < ETH_ALEN; i++)
635 hw->mac.addr[i] = hw->mac.perm_addr[i];
636
637 return 0;
638}
639
640
641
642
643
644
645
646
647s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
648{
649 s32 ret_val = 0;
650 u16 checksum = 0;
651 u16 i, nvm_data;
652
653 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
654 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
655 if (ret_val) {
656 hw_dbg("NVM Read Error\n");
657 goto out;
658 }
659 checksum += nvm_data;
660 }
661
662 if (checksum != (u16) NVM_SUM) {
663 hw_dbg("NVM Checksum Invalid\n");
664 ret_val = -E1000_ERR_NVM;
665 goto out;
666 }
667
668out:
669 return ret_val;
670}
671
672
673
674
675
676
677
678
679
680s32 igb_update_nvm_checksum(struct e1000_hw *hw)
681{
682 s32 ret_val;
683 u16 checksum = 0;
684 u16 i, nvm_data;
685
686 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
687 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
688 if (ret_val) {
689 hw_dbg("NVM Read Error while updating checksum.\n");
690 goto out;
691 }
692 checksum += nvm_data;
693 }
694 checksum = (u16) NVM_SUM - checksum;
695 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
696 if (ret_val)
697 hw_dbg("NVM Write Error while updating checksum.\n");
698
699out:
700 return ret_val;
701}
702
703
704
705
706
707
708
709
710void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
711{
712 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
713 u16 fw_version;
714
715 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
716
717 switch (hw->mac.type) {
718 case e1000_i211:
719 igb_read_invm_version(hw, fw_vers);
720 return;
721 case e1000_82575:
722 case e1000_82576:
723 case e1000_82580:
724 case e1000_i354:
725 case e1000_i350:
726 case e1000_i210:
727 break;
728 default:
729 return;
730 }
731
732 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
733 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
734 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
735
736
737 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
738 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
739 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
740
741 switch (hw->mac.type) {
742 case e1000_i210:
743 case e1000_i354:
744 case e1000_i350:
745
746 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
747 if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
748
749 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
750 + 1), 1, &comb_verh);
751 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
752 1, &comb_verl);
753
754
755 if ((comb_verh && comb_verl) &&
756 ((comb_verh != NVM_VER_INVALID) &&
757 (comb_verl != NVM_VER_INVALID))) {
758
759 fw_vers->or_valid = true;
760 fw_vers->or_major =
761 comb_verl >> NVM_COMB_VER_SHFT;
762 fw_vers->or_build =
763 ((comb_verl << NVM_COMB_VER_SHFT)
764 | (comb_verh >> NVM_COMB_VER_SHFT));
765 fw_vers->or_patch =
766 comb_verh & NVM_COMB_VER_MASK;
767 }
768 }
769 break;
770 default:
771 break;
772 }
773 return;
774}
775