1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2007 - 2018 Intel Corporation. */ |
3 | |
4 | /* e1000_i210 |
5 | * e1000_i211 |
6 | */ |
7 | |
8 | #include <linux/types.h> |
9 | #include <linux/if_ether.h> |
10 | |
11 | #include "e1000_hw.h" |
12 | #include "e1000_i210.h" |
13 | |
14 | static s32 igb_update_flash_i210(struct e1000_hw *hw); |
15 | |
16 | /** |
17 | * igb_get_hw_semaphore_i210 - Acquire hardware semaphore |
18 | * @hw: pointer to the HW structure |
19 | * |
20 | * Acquire the HW semaphore to access the PHY or NVM |
21 | */ |
22 | static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) |
23 | { |
24 | u32 swsm; |
25 | s32 timeout = hw->nvm.word_size + 1; |
26 | s32 i = 0; |
27 | |
28 | /* Get the SW semaphore */ |
29 | while (i < timeout) { |
30 | swsm = rd32(E1000_SWSM); |
31 | if (!(swsm & E1000_SWSM_SMBI)) |
32 | break; |
33 | |
34 | udelay(50); |
35 | i++; |
36 | } |
37 | |
38 | if (i == timeout) { |
39 | /* In rare circumstances, the SW semaphore may already be held |
40 | * unintentionally. Clear the semaphore once before giving up. |
41 | */ |
42 | if (hw->dev_spec._82575.clear_semaphore_once) { |
43 | hw->dev_spec._82575.clear_semaphore_once = false; |
44 | igb_put_hw_semaphore(hw); |
45 | for (i = 0; i < timeout; i++) { |
46 | swsm = rd32(E1000_SWSM); |
47 | if (!(swsm & E1000_SWSM_SMBI)) |
48 | break; |
49 | |
50 | udelay(50); |
51 | } |
52 | } |
53 | |
54 | /* If we do not have the semaphore here, we have to give up. */ |
55 | if (i == timeout) { |
56 | hw_dbg("Driver can't access device - SMBI bit is set.\n" ); |
57 | return -E1000_ERR_NVM; |
58 | } |
59 | } |
60 | |
61 | /* Get the FW semaphore. */ |
62 | for (i = 0; i < timeout; i++) { |
63 | swsm = rd32(E1000_SWSM); |
64 | wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); |
65 | |
66 | /* Semaphore acquired if bit latched */ |
67 | if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) |
68 | break; |
69 | |
70 | udelay(50); |
71 | } |
72 | |
73 | if (i == timeout) { |
74 | /* Release semaphores */ |
75 | igb_put_hw_semaphore(hw); |
76 | hw_dbg("Driver can't access the NVM\n" ); |
77 | return -E1000_ERR_NVM; |
78 | } |
79 | |
80 | return 0; |
81 | } |
82 | |
83 | /** |
84 | * igb_acquire_nvm_i210 - Request for access to EEPROM |
85 | * @hw: pointer to the HW structure |
86 | * |
87 | * Acquire the necessary semaphores for exclusive access to the EEPROM. |
88 | * Set the EEPROM access request bit and wait for EEPROM access grant bit. |
89 | * Return successful if access grant bit set, else clear the request for |
90 | * EEPROM access and return -E1000_ERR_NVM (-1). |
91 | **/ |
92 | static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) |
93 | { |
94 | return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); |
95 | } |
96 | |
97 | /** |
98 | * igb_release_nvm_i210 - Release exclusive access to EEPROM |
99 | * @hw: pointer to the HW structure |
100 | * |
101 | * Stop any current commands to the EEPROM and clear the EEPROM request bit, |
102 | * then release the semaphores acquired. |
103 | **/ |
104 | static void igb_release_nvm_i210(struct e1000_hw *hw) |
105 | { |
106 | igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); |
107 | } |
108 | |
109 | /** |
110 | * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore |
111 | * @hw: pointer to the HW structure |
112 | * @mask: specifies which semaphore to acquire |
113 | * |
114 | * Acquire the SW/FW semaphore to access the PHY or NVM. The mask |
115 | * will also specify which port we're acquiring the lock for. |
116 | **/ |
117 | s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) |
118 | { |
119 | u32 swfw_sync; |
120 | u32 swmask = mask; |
121 | u32 fwmask = mask << 16; |
122 | s32 ret_val = 0; |
123 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ |
124 | |
125 | while (i < timeout) { |
126 | if (igb_get_hw_semaphore_i210(hw)) { |
127 | ret_val = -E1000_ERR_SWFW_SYNC; |
128 | goto out; |
129 | } |
130 | |
131 | swfw_sync = rd32(E1000_SW_FW_SYNC); |
132 | if (!(swfw_sync & (fwmask | swmask))) |
133 | break; |
134 | |
135 | /* Firmware currently using resource (fwmask) */ |
136 | igb_put_hw_semaphore(hw); |
137 | mdelay(5); |
138 | i++; |
139 | } |
140 | |
141 | if (i == timeout) { |
142 | hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n" ); |
143 | ret_val = -E1000_ERR_SWFW_SYNC; |
144 | goto out; |
145 | } |
146 | |
147 | swfw_sync |= swmask; |
148 | wr32(E1000_SW_FW_SYNC, swfw_sync); |
149 | |
150 | igb_put_hw_semaphore(hw); |
151 | out: |
152 | return ret_val; |
153 | } |
154 | |
155 | /** |
156 | * igb_release_swfw_sync_i210 - Release SW/FW semaphore |
157 | * @hw: pointer to the HW structure |
158 | * @mask: specifies which semaphore to acquire |
159 | * |
160 | * Release the SW/FW semaphore used to access the PHY or NVM. The mask |
161 | * will also specify which port we're releasing the lock for. |
162 | **/ |
163 | void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) |
164 | { |
165 | u32 swfw_sync; |
166 | |
167 | while (igb_get_hw_semaphore_i210(hw)) |
168 | ; /* Empty */ |
169 | |
170 | swfw_sync = rd32(E1000_SW_FW_SYNC); |
171 | swfw_sync &= ~mask; |
172 | wr32(E1000_SW_FW_SYNC, swfw_sync); |
173 | |
174 | igb_put_hw_semaphore(hw); |
175 | } |
176 | |
177 | /** |
178 | * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register |
179 | * @hw: pointer to the HW structure |
180 | * @offset: offset of word in the Shadow Ram to read |
181 | * @words: number of words to read |
182 | * @data: word read from the Shadow Ram |
183 | * |
184 | * Reads a 16 bit word from the Shadow Ram using the EERD register. |
185 | * Uses necessary synchronization semaphores. |
186 | **/ |
187 | static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, |
188 | u16 *data) |
189 | { |
190 | s32 status = 0; |
191 | u16 i, count; |
192 | |
193 | /* We cannot hold synchronization semaphores for too long, |
194 | * because of forceful takeover procedure. However it is more efficient |
195 | * to read in bursts than synchronizing access for each word. |
196 | */ |
197 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { |
198 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? |
199 | E1000_EERD_EEWR_MAX_COUNT : (words - i); |
200 | if (!(hw->nvm.ops.acquire(hw))) { |
201 | status = igb_read_nvm_eerd(hw, offset, words: count, |
202 | data: data + i); |
203 | hw->nvm.ops.release(hw); |
204 | } else { |
205 | status = E1000_ERR_SWFW_SYNC; |
206 | } |
207 | |
208 | if (status) |
209 | break; |
210 | } |
211 | |
212 | return status; |
213 | } |
214 | |
215 | /** |
216 | * igb_write_nvm_srwr - Write to Shadow Ram using EEWR |
217 | * @hw: pointer to the HW structure |
218 | * @offset: offset within the Shadow Ram to be written to |
219 | * @words: number of words to write |
220 | * @data: 16 bit word(s) to be written to the Shadow Ram |
221 | * |
222 | * Writes data to Shadow Ram at offset using EEWR register. |
223 | * |
224 | * If igb_update_nvm_checksum is not called after this function , the |
225 | * Shadow Ram will most likely contain an invalid checksum. |
226 | **/ |
227 | static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, |
228 | u16 *data) |
229 | { |
230 | struct e1000_nvm_info *nvm = &hw->nvm; |
231 | u32 i, k, eewr = 0; |
232 | u32 attempts = 100000; |
233 | s32 ret_val = 0; |
234 | |
235 | /* A check for invalid values: offset too large, too many words, |
236 | * too many words for the offset, and not enough words. |
237 | */ |
238 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
239 | (words == 0)) { |
240 | hw_dbg("nvm parameter(s) out of bounds\n" ); |
241 | ret_val = -E1000_ERR_NVM; |
242 | goto out; |
243 | } |
244 | |
245 | for (i = 0; i < words; i++) { |
246 | eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | |
247 | (data[i] << E1000_NVM_RW_REG_DATA) | |
248 | E1000_NVM_RW_REG_START; |
249 | |
250 | wr32(E1000_SRWR, eewr); |
251 | |
252 | for (k = 0; k < attempts; k++) { |
253 | if (E1000_NVM_RW_REG_DONE & |
254 | rd32(E1000_SRWR)) { |
255 | ret_val = 0; |
256 | break; |
257 | } |
258 | udelay(5); |
259 | } |
260 | |
261 | if (ret_val) { |
262 | hw_dbg("Shadow RAM write EEWR timed out\n" ); |
263 | break; |
264 | } |
265 | } |
266 | |
267 | out: |
268 | return ret_val; |
269 | } |
270 | |
271 | /** |
272 | * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR |
273 | * @hw: pointer to the HW structure |
274 | * @offset: offset within the Shadow RAM to be written to |
275 | * @words: number of words to write |
276 | * @data: 16 bit word(s) to be written to the Shadow RAM |
277 | * |
278 | * Writes data to Shadow RAM at offset using EEWR register. |
279 | * |
280 | * If e1000_update_nvm_checksum is not called after this function , the |
281 | * data will not be committed to FLASH and also Shadow RAM will most likely |
282 | * contain an invalid checksum. |
283 | * |
284 | * If error code is returned, data and Shadow RAM may be inconsistent - buffer |
285 | * partially written. |
286 | **/ |
287 | static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, |
288 | u16 *data) |
289 | { |
290 | s32 status = 0; |
291 | u16 i, count; |
292 | |
293 | /* We cannot hold synchronization semaphores for too long, |
294 | * because of forceful takeover procedure. However it is more efficient |
295 | * to write in bursts than synchronizing access for each word. |
296 | */ |
297 | for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { |
298 | count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? |
299 | E1000_EERD_EEWR_MAX_COUNT : (words - i); |
300 | if (!(hw->nvm.ops.acquire(hw))) { |
301 | status = igb_write_nvm_srwr(hw, offset, words: count, |
302 | data: data + i); |
303 | hw->nvm.ops.release(hw); |
304 | } else { |
305 | status = E1000_ERR_SWFW_SYNC; |
306 | } |
307 | |
308 | if (status) |
309 | break; |
310 | } |
311 | |
312 | return status; |
313 | } |
314 | |
315 | /** |
316 | * igb_read_invm_word_i210 - Reads OTP |
317 | * @hw: pointer to the HW structure |
318 | * @address: the word address (aka eeprom offset) to read |
319 | * @data: pointer to the data read |
320 | * |
321 | * Reads 16-bit words from the OTP. Return error when the word is not |
322 | * stored in OTP. |
323 | **/ |
324 | static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) |
325 | { |
326 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; |
327 | u32 invm_dword; |
328 | u16 i; |
329 | u8 record_type, word_address; |
330 | |
331 | for (i = 0; i < E1000_INVM_SIZE; i++) { |
332 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); |
333 | /* Get record type */ |
334 | record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); |
335 | if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) |
336 | break; |
337 | if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) |
338 | i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; |
339 | if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) |
340 | i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; |
341 | if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { |
342 | word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); |
343 | if (word_address == address) { |
344 | *data = INVM_DWORD_TO_WORD_DATA(invm_dword); |
345 | hw_dbg("Read INVM Word 0x%02x = %x\n" , |
346 | address, *data); |
347 | status = 0; |
348 | break; |
349 | } |
350 | } |
351 | } |
352 | if (status) |
353 | hw_dbg("Requested word 0x%02x not found in OTP\n" , address); |
354 | return status; |
355 | } |
356 | |
357 | /** |
358 | * igb_read_invm_i210 - Read invm wrapper function for I210/I211 |
359 | * @hw: pointer to the HW structure |
360 | * @offset: offset to read from |
361 | * @words: number of words to read (unused) |
362 | * @data: pointer to the data read |
363 | * |
364 | * Wrapper function to return data formerly found in the NVM. |
365 | **/ |
366 | static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, |
367 | u16 __always_unused words, u16 *data) |
368 | { |
369 | s32 ret_val = 0; |
370 | |
371 | /* Only the MAC addr is required to be present in the iNVM */ |
372 | switch (offset) { |
373 | case NVM_MAC_ADDR: |
374 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data: &data[0]); |
375 | ret_val |= igb_read_invm_word_i210(hw, address: (u8)offset+1, |
376 | data: &data[1]); |
377 | ret_val |= igb_read_invm_word_i210(hw, address: (u8)offset+2, |
378 | data: &data[2]); |
379 | if (ret_val) |
380 | hw_dbg("MAC Addr not found in iNVM\n" ); |
381 | break; |
382 | case NVM_INIT_CTRL_2: |
383 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data); |
384 | if (ret_val) { |
385 | *data = NVM_INIT_CTRL_2_DEFAULT_I211; |
386 | ret_val = 0; |
387 | } |
388 | break; |
389 | case NVM_INIT_CTRL_4: |
390 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data); |
391 | if (ret_val) { |
392 | *data = NVM_INIT_CTRL_4_DEFAULT_I211; |
393 | ret_val = 0; |
394 | } |
395 | break; |
396 | case NVM_LED_1_CFG: |
397 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data); |
398 | if (ret_val) { |
399 | *data = NVM_LED_1_CFG_DEFAULT_I211; |
400 | ret_val = 0; |
401 | } |
402 | break; |
403 | case NVM_LED_0_2_CFG: |
404 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data); |
405 | if (ret_val) { |
406 | *data = NVM_LED_0_2_CFG_DEFAULT_I211; |
407 | ret_val = 0; |
408 | } |
409 | break; |
410 | case NVM_ID_LED_SETTINGS: |
411 | ret_val = igb_read_invm_word_i210(hw, address: (u8)offset, data); |
412 | if (ret_val) { |
413 | *data = ID_LED_RESERVED_FFFF; |
414 | ret_val = 0; |
415 | } |
416 | break; |
417 | case NVM_SUB_DEV_ID: |
418 | *data = hw->subsystem_device_id; |
419 | break; |
420 | case NVM_SUB_VEN_ID: |
421 | *data = hw->subsystem_vendor_id; |
422 | break; |
423 | case NVM_DEV_ID: |
424 | *data = hw->device_id; |
425 | break; |
426 | case NVM_VEN_ID: |
427 | *data = hw->vendor_id; |
428 | break; |
429 | default: |
430 | hw_dbg("NVM word 0x%02x is not mapped.\n" , offset); |
431 | *data = NVM_RESERVED_WORD; |
432 | break; |
433 | } |
434 | return ret_val; |
435 | } |
436 | |
437 | /** |
438 | * igb_read_invm_version - Reads iNVM version and image type |
439 | * @hw: pointer to the HW structure |
440 | * @invm_ver: version structure for the version read |
441 | * |
442 | * Reads iNVM version and image type. |
443 | **/ |
444 | s32 igb_read_invm_version(struct e1000_hw *hw, |
445 | struct e1000_fw_version *invm_ver) { |
446 | u32 *record = NULL; |
447 | u32 *next_record = NULL; |
448 | u32 i = 0; |
449 | u32 invm_dword = 0; |
450 | u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / |
451 | E1000_INVM_RECORD_SIZE_IN_BYTES); |
452 | u32 buffer[E1000_INVM_SIZE]; |
453 | s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; |
454 | u16 version = 0; |
455 | |
456 | /* Read iNVM memory */ |
457 | for (i = 0; i < E1000_INVM_SIZE; i++) { |
458 | invm_dword = rd32(E1000_INVM_DATA_REG(i)); |
459 | buffer[i] = invm_dword; |
460 | } |
461 | |
462 | /* Read version number */ |
463 | for (i = 1; i < invm_blocks; i++) { |
464 | record = &buffer[invm_blocks - i]; |
465 | next_record = &buffer[invm_blocks - i + 1]; |
466 | |
467 | /* Check if we have first version location used */ |
468 | if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { |
469 | version = 0; |
470 | status = 0; |
471 | break; |
472 | } |
473 | /* Check if we have second version location used */ |
474 | else if ((i == 1) && |
475 | ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { |
476 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; |
477 | status = 0; |
478 | break; |
479 | } |
480 | /* Check if we have odd version location |
481 | * used and it is the last one used |
482 | */ |
483 | else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && |
484 | ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && |
485 | (i != 1))) { |
486 | version = (*next_record & E1000_INVM_VER_FIELD_TWO) |
487 | >> 13; |
488 | status = 0; |
489 | break; |
490 | } |
491 | /* Check if we have even version location |
492 | * used and it is the last one used |
493 | */ |
494 | else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && |
495 | ((*record & 0x3) == 0)) { |
496 | version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; |
497 | status = 0; |
498 | break; |
499 | } |
500 | } |
501 | |
502 | if (!status) { |
503 | invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) |
504 | >> E1000_INVM_MAJOR_SHIFT; |
505 | invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; |
506 | } |
507 | /* Read Image Type */ |
508 | for (i = 1; i < invm_blocks; i++) { |
509 | record = &buffer[invm_blocks - i]; |
510 | next_record = &buffer[invm_blocks - i + 1]; |
511 | |
512 | /* Check if we have image type in first location used */ |
513 | if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { |
514 | invm_ver->invm_img_type = 0; |
515 | status = 0; |
516 | break; |
517 | } |
518 | /* Check if we have image type in first location used */ |
519 | else if ((((*record & 0x3) == 0) && |
520 | ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || |
521 | ((((*record & 0x3) != 0) && (i != 1)))) { |
522 | invm_ver->invm_img_type = |
523 | (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; |
524 | status = 0; |
525 | break; |
526 | } |
527 | } |
528 | return status; |
529 | } |
530 | |
531 | /** |
532 | * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum |
533 | * @hw: pointer to the HW structure |
534 | * |
535 | * Calculates the EEPROM checksum by reading/adding each word of the EEPROM |
536 | * and then verifies that the sum of the EEPROM is equal to 0xBABA. |
537 | **/ |
538 | static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) |
539 | { |
540 | s32 status = 0; |
541 | s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); |
542 | |
543 | if (!(hw->nvm.ops.acquire(hw))) { |
544 | |
545 | /* Replace the read function with semaphore grabbing with |
546 | * the one that skips this for a while. |
547 | * We have semaphore taken already here. |
548 | */ |
549 | read_op_ptr = hw->nvm.ops.read; |
550 | hw->nvm.ops.read = igb_read_nvm_eerd; |
551 | |
552 | status = igb_validate_nvm_checksum(hw); |
553 | |
554 | /* Revert original read operation. */ |
555 | hw->nvm.ops.read = read_op_ptr; |
556 | |
557 | hw->nvm.ops.release(hw); |
558 | } else { |
559 | status = E1000_ERR_SWFW_SYNC; |
560 | } |
561 | |
562 | return status; |
563 | } |
564 | |
565 | /** |
566 | * igb_update_nvm_checksum_i210 - Update EEPROM checksum |
567 | * @hw: pointer to the HW structure |
568 | * |
569 | * Updates the EEPROM checksum by reading/adding each word of the EEPROM |
570 | * up to the checksum. Then calculates the EEPROM checksum and writes the |
571 | * value to the EEPROM. Next commit EEPROM data onto the Flash. |
572 | **/ |
573 | static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) |
574 | { |
575 | s32 ret_val = 0; |
576 | u16 checksum = 0; |
577 | u16 i, nvm_data; |
578 | |
579 | /* Read the first word from the EEPROM. If this times out or fails, do |
580 | * not continue or we could be in for a very long wait while every |
581 | * EEPROM read fails |
582 | */ |
583 | ret_val = igb_read_nvm_eerd(hw, offset: 0, words: 1, data: &nvm_data); |
584 | if (ret_val) { |
585 | hw_dbg("EEPROM read failed\n" ); |
586 | goto out; |
587 | } |
588 | |
589 | if (!(hw->nvm.ops.acquire(hw))) { |
590 | /* Do not use hw->nvm.ops.write, hw->nvm.ops.read |
591 | * because we do not want to take the synchronization |
592 | * semaphores twice here. |
593 | */ |
594 | |
595 | for (i = 0; i < NVM_CHECKSUM_REG; i++) { |
596 | ret_val = igb_read_nvm_eerd(hw, offset: i, words: 1, data: &nvm_data); |
597 | if (ret_val) { |
598 | hw->nvm.ops.release(hw); |
599 | hw_dbg("NVM Read Error while updating checksum.\n" ); |
600 | goto out; |
601 | } |
602 | checksum += nvm_data; |
603 | } |
604 | checksum = (u16) NVM_SUM - checksum; |
605 | ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, words: 1, |
606 | data: &checksum); |
607 | if (ret_val) { |
608 | hw->nvm.ops.release(hw); |
609 | hw_dbg("NVM Write Error while updating checksum.\n" ); |
610 | goto out; |
611 | } |
612 | |
613 | hw->nvm.ops.release(hw); |
614 | |
615 | ret_val = igb_update_flash_i210(hw); |
616 | } else { |
617 | ret_val = -E1000_ERR_SWFW_SYNC; |
618 | } |
619 | out: |
620 | return ret_val; |
621 | } |
622 | |
623 | /** |
624 | * igb_pool_flash_update_done_i210 - Pool FLUDONE status. |
625 | * @hw: pointer to the HW structure |
626 | * |
627 | **/ |
628 | static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) |
629 | { |
630 | s32 ret_val = -E1000_ERR_NVM; |
631 | u32 i, reg; |
632 | |
633 | for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { |
634 | reg = rd32(E1000_EECD); |
635 | if (reg & E1000_EECD_FLUDONE_I210) { |
636 | ret_val = 0; |
637 | break; |
638 | } |
639 | udelay(5); |
640 | } |
641 | |
642 | return ret_val; |
643 | } |
644 | |
645 | /** |
646 | * igb_get_flash_presence_i210 - Check if flash device is detected. |
647 | * @hw: pointer to the HW structure |
648 | * |
649 | **/ |
650 | bool igb_get_flash_presence_i210(struct e1000_hw *hw) |
651 | { |
652 | u32 eec = 0; |
653 | bool ret_val = false; |
654 | |
655 | eec = rd32(E1000_EECD); |
656 | if (eec & E1000_EECD_FLASH_DETECTED_I210) |
657 | ret_val = true; |
658 | |
659 | return ret_val; |
660 | } |
661 | |
662 | /** |
663 | * igb_update_flash_i210 - Commit EEPROM to the flash |
664 | * @hw: pointer to the HW structure |
665 | * |
666 | **/ |
667 | static s32 igb_update_flash_i210(struct e1000_hw *hw) |
668 | { |
669 | s32 ret_val = 0; |
670 | u32 flup; |
671 | |
672 | ret_val = igb_pool_flash_update_done_i210(hw); |
673 | if (ret_val == -E1000_ERR_NVM) { |
674 | hw_dbg("Flash update time out\n" ); |
675 | goto out; |
676 | } |
677 | |
678 | flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; |
679 | wr32(E1000_EECD, flup); |
680 | |
681 | ret_val = igb_pool_flash_update_done_i210(hw); |
682 | if (ret_val) |
683 | hw_dbg("Flash update time out\n" ); |
684 | else |
685 | hw_dbg("Flash update complete\n" ); |
686 | |
687 | out: |
688 | return ret_val; |
689 | } |
690 | |
691 | /** |
692 | * igb_valid_led_default_i210 - Verify a valid default LED config |
693 | * @hw: pointer to the HW structure |
694 | * @data: pointer to the NVM (EEPROM) |
695 | * |
696 | * Read the EEPROM for the current default LED configuration. If the |
697 | * LED configuration is not valid, set to a valid LED configuration. |
698 | **/ |
699 | s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) |
700 | { |
701 | s32 ret_val; |
702 | |
703 | ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); |
704 | if (ret_val) { |
705 | hw_dbg("NVM Read Error\n" ); |
706 | goto out; |
707 | } |
708 | |
709 | if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { |
710 | switch (hw->phy.media_type) { |
711 | case e1000_media_type_internal_serdes: |
712 | *data = ID_LED_DEFAULT_I210_SERDES; |
713 | break; |
714 | case e1000_media_type_copper: |
715 | default: |
716 | *data = ID_LED_DEFAULT_I210; |
717 | break; |
718 | } |
719 | } |
720 | out: |
721 | return ret_val; |
722 | } |
723 | |
724 | /** |
725 | * __igb_access_xmdio_reg - Read/write XMDIO register |
726 | * @hw: pointer to the HW structure |
727 | * @address: XMDIO address to program |
728 | * @dev_addr: device address to program |
729 | * @data: pointer to value to read/write from/to the XMDIO address |
730 | * @read: boolean flag to indicate read or write |
731 | **/ |
732 | static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, |
733 | u8 dev_addr, u16 *data, bool read) |
734 | { |
735 | s32 ret_val = 0; |
736 | |
737 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); |
738 | if (ret_val) |
739 | return ret_val; |
740 | |
741 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); |
742 | if (ret_val) |
743 | return ret_val; |
744 | |
745 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | |
746 | dev_addr); |
747 | if (ret_val) |
748 | return ret_val; |
749 | |
750 | if (read) |
751 | ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); |
752 | else |
753 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); |
754 | if (ret_val) |
755 | return ret_val; |
756 | |
757 | /* Recalibrate the device back to 0 */ |
758 | ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); |
759 | if (ret_val) |
760 | return ret_val; |
761 | |
762 | return ret_val; |
763 | } |
764 | |
765 | /** |
766 | * igb_read_xmdio_reg - Read XMDIO register |
767 | * @hw: pointer to the HW structure |
768 | * @addr: XMDIO address to program |
769 | * @dev_addr: device address to program |
770 | * @data: value to be read from the EMI address |
771 | **/ |
772 | s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) |
773 | { |
774 | return __igb_access_xmdio_reg(hw, address: addr, dev_addr, data, read: true); |
775 | } |
776 | |
777 | /** |
778 | * igb_write_xmdio_reg - Write XMDIO register |
779 | * @hw: pointer to the HW structure |
780 | * @addr: XMDIO address to program |
781 | * @dev_addr: device address to program |
782 | * @data: value to be written to the XMDIO address |
783 | **/ |
784 | s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) |
785 | { |
786 | return __igb_access_xmdio_reg(hw, address: addr, dev_addr, data: &data, read: false); |
787 | } |
788 | |
789 | /** |
790 | * igb_init_nvm_params_i210 - Init NVM func ptrs. |
791 | * @hw: pointer to the HW structure |
792 | **/ |
793 | s32 igb_init_nvm_params_i210(struct e1000_hw *hw) |
794 | { |
795 | struct e1000_nvm_info *nvm = &hw->nvm; |
796 | |
797 | nvm->ops.acquire = igb_acquire_nvm_i210; |
798 | nvm->ops.release = igb_release_nvm_i210; |
799 | nvm->ops.valid_led_default = igb_valid_led_default_i210; |
800 | |
801 | /* NVM Function Pointers */ |
802 | if (igb_get_flash_presence_i210(hw)) { |
803 | hw->nvm.type = e1000_nvm_flash_hw; |
804 | nvm->ops.read = igb_read_nvm_srrd_i210; |
805 | nvm->ops.write = igb_write_nvm_srwr_i210; |
806 | nvm->ops.validate = igb_validate_nvm_checksum_i210; |
807 | nvm->ops.update = igb_update_nvm_checksum_i210; |
808 | } else { |
809 | hw->nvm.type = e1000_nvm_invm; |
810 | nvm->ops.read = igb_read_invm_i210; |
811 | nvm->ops.write = NULL; |
812 | nvm->ops.validate = NULL; |
813 | nvm->ops.update = NULL; |
814 | } |
815 | return 0; |
816 | } |
817 | |
818 | /** |
819 | * igb_pll_workaround_i210 |
820 | * @hw: pointer to the HW structure |
821 | * |
822 | * Works around an errata in the PLL circuit where it occasionally |
823 | * provides the wrong clock frequency after power up. |
824 | **/ |
825 | s32 igb_pll_workaround_i210(struct e1000_hw *hw) |
826 | { |
827 | s32 ret_val; |
828 | u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; |
829 | u16 nvm_word, phy_word, pci_word, tmp_nvm; |
830 | int i; |
831 | |
832 | /* Get and set needed register values */ |
833 | wuc = rd32(E1000_WUC); |
834 | mdicnfg = rd32(E1000_MDICNFG); |
835 | reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; |
836 | wr32(E1000_MDICNFG, reg_val); |
837 | |
838 | /* Get data from NVM, or set default */ |
839 | ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, |
840 | data: &nvm_word); |
841 | if (ret_val) |
842 | nvm_word = E1000_INVM_DEFAULT_AL; |
843 | tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; |
844 | igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE); |
845 | phy_word = E1000_PHY_PLL_UNCONF; |
846 | for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { |
847 | /* check current state directly from internal PHY */ |
848 | igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, data: &phy_word); |
849 | if ((phy_word & E1000_PHY_PLL_UNCONF) |
850 | != E1000_PHY_PLL_UNCONF) { |
851 | ret_val = 0; |
852 | break; |
853 | } else { |
854 | ret_val = -E1000_ERR_PHY; |
855 | } |
856 | /* directly reset the internal PHY */ |
857 | ctrl = rd32(E1000_CTRL); |
858 | wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); |
859 | |
860 | ctrl_ext = rd32(E1000_CTRL_EXT); |
861 | ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); |
862 | wr32(E1000_CTRL_EXT, ctrl_ext); |
863 | |
864 | wr32(E1000_WUC, 0); |
865 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); |
866 | wr32(E1000_EEARBC_I210, reg_val); |
867 | |
868 | igb_read_pci_cfg(hw, E1000_PCI_PMCSR, value: &pci_word); |
869 | pci_word |= E1000_PCI_PMCSR_D3; |
870 | igb_write_pci_cfg(hw, E1000_PCI_PMCSR, value: &pci_word); |
871 | usleep_range(min: 1000, max: 2000); |
872 | pci_word &= ~E1000_PCI_PMCSR_D3; |
873 | igb_write_pci_cfg(hw, E1000_PCI_PMCSR, value: &pci_word); |
874 | reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); |
875 | wr32(E1000_EEARBC_I210, reg_val); |
876 | |
877 | /* restore WUC register */ |
878 | wr32(E1000_WUC, wuc); |
879 | } |
880 | igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, data: 0); |
881 | /* restore MDICNFG setting */ |
882 | wr32(E1000_MDICNFG, mdicnfg); |
883 | return ret_val; |
884 | } |
885 | |
886 | /** |
887 | * igb_get_cfg_done_i210 - Read config done bit |
888 | * @hw: pointer to the HW structure |
889 | * |
890 | * Read the management control register for the config done bit for |
891 | * completion status. NOTE: silicon which is EEPROM-less will fail trying |
892 | * to read the config done bit, so an error is *ONLY* logged and returns |
893 | * 0. If we were to return with error, EEPROM-less silicon |
894 | * would not be able to be reset or change link. |
895 | **/ |
896 | s32 igb_get_cfg_done_i210(struct e1000_hw *hw) |
897 | { |
898 | s32 timeout = PHY_CFG_TIMEOUT; |
899 | u32 mask = E1000_NVM_CFG_DONE_PORT_0; |
900 | |
901 | while (timeout) { |
902 | if (rd32(E1000_EEMNGCTL_I210) & mask) |
903 | break; |
904 | usleep_range(min: 1000, max: 2000); |
905 | timeout--; |
906 | } |
907 | if (!timeout) |
908 | hw_dbg("MNG configuration cycle has not completed.\n" ); |
909 | |
910 | return 0; |
911 | } |
912 | |