1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2001-2015, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34 /*$FreeBSD$*/
35
36 #include "e1000_api.h"
37
38 static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
39 static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
40 static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
41 static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
42
43 /**
44 * e1000_init_mac_ops_generic - Initialize MAC function pointers
45 * @hw: pointer to the HW structure
46 *
47 * Setups up the function pointers to no-op functions
48 **/
e1000_init_mac_ops_generic(struct e1000_hw * hw)49 void e1000_init_mac_ops_generic(struct e1000_hw *hw)
50 {
51 struct e1000_mac_info *mac = &hw->mac;
52 DEBUGFUNC("e1000_init_mac_ops_generic");
53
54 /* General Setup */
55 mac->ops.init_params = e1000_null_ops_generic;
56 mac->ops.init_hw = e1000_null_ops_generic;
57 mac->ops.reset_hw = e1000_null_ops_generic;
58 mac->ops.setup_physical_interface = e1000_null_ops_generic;
59 mac->ops.get_bus_info = e1000_null_ops_generic;
60 mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
61 mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
62 mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
63 mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
64 /* LED */
65 mac->ops.cleanup_led = e1000_null_ops_generic;
66 mac->ops.setup_led = e1000_null_ops_generic;
67 mac->ops.blink_led = e1000_null_ops_generic;
68 mac->ops.led_on = e1000_null_ops_generic;
69 mac->ops.led_off = e1000_null_ops_generic;
70 /* LINK */
71 mac->ops.setup_link = e1000_null_ops_generic;
72 mac->ops.get_link_up_info = e1000_null_link_info;
73 mac->ops.check_for_link = e1000_null_ops_generic;
74 mac->ops.set_obff_timer = e1000_null_set_obff_timer;
75 /* Management */
76 mac->ops.check_mng_mode = e1000_null_mng_mode;
77 /* VLAN, MC, etc. */
78 mac->ops.update_mc_addr_list = e1000_null_update_mc;
79 mac->ops.clear_vfta = e1000_null_mac_generic;
80 mac->ops.write_vfta = e1000_null_write_vfta;
81 mac->ops.rar_set = e1000_rar_set_generic;
82 mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
83 }
84
85 /**
86 * e1000_null_ops_generic - No-op function, returns 0
87 * @hw: pointer to the HW structure
88 **/
e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG * hw)89 s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
90 {
91 DEBUGFUNC("e1000_null_ops_generic");
92 return E1000_SUCCESS;
93 }
94
95 /**
96 * e1000_null_mac_generic - No-op function, return void
97 * @hw: pointer to the HW structure
98 **/
e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG * hw)99 void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
100 {
101 DEBUGFUNC("e1000_null_mac_generic");
102 return;
103 }
104
105 /**
106 * e1000_null_link_info - No-op function, return 0
107 * @hw: pointer to the HW structure
108 **/
e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG * hw,u16 E1000_UNUSEDARG * s,u16 E1000_UNUSEDARG * d)109 s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
110 u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
111 {
112 DEBUGFUNC("e1000_null_link_info");
113 return E1000_SUCCESS;
114 }
115
116 /**
117 * e1000_null_mng_mode - No-op function, return FALSE
118 * @hw: pointer to the HW structure
119 **/
e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG * hw)120 bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
121 {
122 DEBUGFUNC("e1000_null_mng_mode");
123 return FALSE;
124 }
125
126 /**
127 * e1000_null_update_mc - No-op function, return void
128 * @hw: pointer to the HW structure
129 **/
e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG * hw,u8 E1000_UNUSEDARG * h,u32 E1000_UNUSEDARG a)130 void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
131 u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
132 {
133 DEBUGFUNC("e1000_null_update_mc");
134 return;
135 }
136
137 /**
138 * e1000_null_write_vfta - No-op function, return void
139 * @hw: pointer to the HW structure
140 **/
e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG * hw,u32 E1000_UNUSEDARG a,u32 E1000_UNUSEDARG b)141 void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
142 u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
143 {
144 DEBUGFUNC("e1000_null_write_vfta");
145 return;
146 }
147
148 /**
149 * e1000_null_rar_set - No-op function, return 0
150 * @hw: pointer to the HW structure
151 **/
e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG * hw,u8 E1000_UNUSEDARG * h,u32 E1000_UNUSEDARG a)152 int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
153 u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
154 {
155 DEBUGFUNC("e1000_null_rar_set");
156 return E1000_SUCCESS;
157 }
158
159 /**
160 * e1000_null_set_obff_timer - No-op function, return 0
161 * @hw: pointer to the HW structure
162 **/
e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG * hw,u32 E1000_UNUSEDARG a)163 s32 e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG *hw,
164 u32 E1000_UNUSEDARG a)
165 {
166 DEBUGFUNC("e1000_null_set_obff_timer");
167 return E1000_SUCCESS;
168 }
169
170 /**
171 * e1000_get_bus_info_pci_generic - Get PCI(x) bus information
172 * @hw: pointer to the HW structure
173 *
174 * Determines and stores the system bus information for a particular
175 * network interface. The following bus information is determined and stored:
176 * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
177 **/
e1000_get_bus_info_pci_generic(struct e1000_hw * hw)178 s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
179 {
180 struct e1000_mac_info *mac = &hw->mac;
181 struct e1000_bus_info *bus = &hw->bus;
182 u32 status = E1000_READ_REG(hw, E1000_STATUS);
183 s32 ret_val = E1000_SUCCESS;
184
185 DEBUGFUNC("e1000_get_bus_info_pci_generic");
186
187 /* PCI or PCI-X? */
188 bus->type = (status & E1000_STATUS_PCIX_MODE)
189 ? e1000_bus_type_pcix
190 : e1000_bus_type_pci;
191
192 /* Bus speed */
193 if (bus->type == e1000_bus_type_pci) {
194 bus->speed = (status & E1000_STATUS_PCI66)
195 ? e1000_bus_speed_66
196 : e1000_bus_speed_33;
197 } else {
198 switch (status & E1000_STATUS_PCIX_SPEED) {
199 case E1000_STATUS_PCIX_SPEED_66:
200 bus->speed = e1000_bus_speed_66;
201 break;
202 case E1000_STATUS_PCIX_SPEED_100:
203 bus->speed = e1000_bus_speed_100;
204 break;
205 case E1000_STATUS_PCIX_SPEED_133:
206 bus->speed = e1000_bus_speed_133;
207 break;
208 default:
209 bus->speed = e1000_bus_speed_reserved;
210 break;
211 }
212 }
213
214 /* Bus width */
215 bus->width = (status & E1000_STATUS_BUS64)
216 ? e1000_bus_width_64
217 : e1000_bus_width_32;
218
219 /* Which PCI(-X) function? */
220 mac->ops.set_lan_id(hw);
221
222 return ret_val;
223 }
224
225 /**
226 * e1000_get_bus_info_pcie_generic - Get PCIe bus information
227 * @hw: pointer to the HW structure
228 *
229 * Determines and stores the system bus information for a particular
230 * network interface. The following bus information is determined and stored:
231 * bus speed, bus width, type (PCIe), and PCIe function.
232 **/
e1000_get_bus_info_pcie_generic(struct e1000_hw * hw)233 s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
234 {
235 struct e1000_mac_info *mac = &hw->mac;
236 struct e1000_bus_info *bus = &hw->bus;
237 s32 ret_val;
238 u16 pcie_link_status;
239
240 DEBUGFUNC("e1000_get_bus_info_pcie_generic");
241
242 bus->type = e1000_bus_type_pci_express;
243
244 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
245 &pcie_link_status);
246 if (ret_val) {
247 bus->width = e1000_bus_width_unknown;
248 bus->speed = e1000_bus_speed_unknown;
249 } else {
250 switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
251 case PCIE_LINK_SPEED_2500:
252 bus->speed = e1000_bus_speed_2500;
253 break;
254 case PCIE_LINK_SPEED_5000:
255 bus->speed = e1000_bus_speed_5000;
256 break;
257 default:
258 bus->speed = e1000_bus_speed_unknown;
259 break;
260 }
261
262 bus->width = (enum e1000_bus_width)((pcie_link_status &
263 PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
264 }
265
266 mac->ops.set_lan_id(hw);
267
268 return E1000_SUCCESS;
269 }
270
271 /**
272 * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
273 *
274 * @hw: pointer to the HW structure
275 *
276 * Determines the LAN function id by reading memory-mapped registers
277 * and swaps the port value if requested.
278 **/
e1000_set_lan_id_multi_port_pcie(struct e1000_hw * hw)279 static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
280 {
281 struct e1000_bus_info *bus = &hw->bus;
282 u32 reg;
283
284 /* The status register reports the correct function number
285 * for the device regardless of function swap state.
286 */
287 reg = E1000_READ_REG(hw, E1000_STATUS);
288 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
289 }
290
291 /**
292 * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
293 * @hw: pointer to the HW structure
294 *
295 * Determines the LAN function id by reading PCI config space.
296 **/
e1000_set_lan_id_multi_port_pci(struct e1000_hw * hw)297 void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
298 {
299 struct e1000_bus_info *bus = &hw->bus;
300 u16 pci_header_type;
301 u32 status;
302
303 e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
304 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
305 status = E1000_READ_REG(hw, E1000_STATUS);
306 bus->func = (status & E1000_STATUS_FUNC_MASK)
307 >> E1000_STATUS_FUNC_SHIFT;
308 } else {
309 bus->func = 0;
310 }
311 }
312
313 /**
314 * e1000_set_lan_id_single_port - Set LAN id for a single port device
315 * @hw: pointer to the HW structure
316 *
317 * Sets the LAN function id to zero for a single port device.
318 **/
e1000_set_lan_id_single_port(struct e1000_hw * hw)319 void e1000_set_lan_id_single_port(struct e1000_hw *hw)
320 {
321 struct e1000_bus_info *bus = &hw->bus;
322
323 bus->func = 0;
324 }
325
326 /**
327 * e1000_clear_vfta_generic - Clear VLAN filter table
328 * @hw: pointer to the HW structure
329 *
330 * Clears the register array which contains the VLAN filter table by
331 * setting all the values to 0.
332 **/
e1000_clear_vfta_generic(struct e1000_hw * hw)333 void e1000_clear_vfta_generic(struct e1000_hw *hw)
334 {
335 u32 offset;
336
337 DEBUGFUNC("e1000_clear_vfta_generic");
338
339 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
340 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
341 E1000_WRITE_FLUSH(hw);
342 }
343 }
344
345 /**
346 * e1000_write_vfta_generic - Write value to VLAN filter table
347 * @hw: pointer to the HW structure
348 * @offset: register offset in VLAN filter table
349 * @value: register value written to VLAN filter table
350 *
351 * Writes value at the given offset in the register array which stores
352 * the VLAN filter table.
353 **/
e1000_write_vfta_generic(struct e1000_hw * hw,u32 offset,u32 value)354 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
355 {
356 DEBUGFUNC("e1000_write_vfta_generic");
357
358 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
359 E1000_WRITE_FLUSH(hw);
360 }
361
362 /**
363 * e1000_init_rx_addrs_generic - Initialize receive address's
364 * @hw: pointer to the HW structure
365 * @rar_count: receive address registers
366 *
367 * Setup the receive address registers by setting the base receive address
368 * register to the devices MAC address and clearing all the other receive
369 * address registers to 0.
370 **/
e1000_init_rx_addrs_generic(struct e1000_hw * hw,u16 rar_count)371 void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
372 {
373 u32 i;
374 u8 mac_addr[ETH_ADDR_LEN] = {0};
375
376 DEBUGFUNC("e1000_init_rx_addrs_generic");
377
378 /* Setup the receive address */
379 DEBUGOUT("Programming MAC Address into RAR[0]\n");
380
381 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
382
383 /* Zero out the other (rar_entry_count - 1) receive addresses */
384 DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
385 for (i = 1; i < rar_count; i++)
386 hw->mac.ops.rar_set(hw, mac_addr, i);
387 }
388
389 /**
390 * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
391 * @hw: pointer to the HW structure
392 *
393 * Checks the nvm for an alternate MAC address. An alternate MAC address
394 * can be setup by pre-boot software and must be treated like a permanent
395 * address and must override the actual permanent MAC address. If an
396 * alternate MAC address is found it is programmed into RAR0, replacing
397 * the permanent address that was installed into RAR0 by the Si on reset.
398 * This function will return SUCCESS unless it encounters an error while
399 * reading the EEPROM.
400 **/
e1000_check_alt_mac_addr_generic(struct e1000_hw * hw)401 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
402 {
403 u32 i;
404 s32 ret_val;
405 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
406 u8 alt_mac_addr[ETH_ADDR_LEN];
407
408 DEBUGFUNC("e1000_check_alt_mac_addr_generic");
409
410 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
411 if (ret_val)
412 return ret_val;
413
414 /* not supported on older hardware or 82573 */
415 if ((hw->mac.type < e1000_82571) || (hw->mac.type == e1000_82573))
416 return E1000_SUCCESS;
417
418 /* Alternate MAC address is handled by the option ROM for 82580
419 * and newer. SW support not required.
420 */
421 if (hw->mac.type >= e1000_82580)
422 return E1000_SUCCESS;
423
424 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
425 &nvm_alt_mac_addr_offset);
426 if (ret_val) {
427 DEBUGOUT("NVM Read Error\n");
428 return ret_val;
429 }
430
431 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
432 (nvm_alt_mac_addr_offset == 0x0000))
433 /* There is no Alternate MAC Address */
434 return E1000_SUCCESS;
435
436 if (hw->bus.func == E1000_FUNC_1)
437 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
438 if (hw->bus.func == E1000_FUNC_2)
439 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
440
441 if (hw->bus.func == E1000_FUNC_3)
442 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
443 for (i = 0; i < ETH_ADDR_LEN; i += 2) {
444 offset = nvm_alt_mac_addr_offset + (i >> 1);
445 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
446 if (ret_val) {
447 DEBUGOUT("NVM Read Error\n");
448 return ret_val;
449 }
450
451 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
452 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
453 }
454
455 /* if multicast bit is set, the alternate address will not be used */
456 if (alt_mac_addr[0] & 0x01) {
457 DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
458 return E1000_SUCCESS;
459 }
460
461 /* We have a valid alternate MAC address, and we want to treat it the
462 * same as the normal permanent MAC address stored by the HW into the
463 * RAR. Do this by mapping this address into RAR0.
464 */
465 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
466
467 return E1000_SUCCESS;
468 }
469
470 /**
471 * e1000_rar_set_generic - Set receive address register
472 * @hw: pointer to the HW structure
473 * @addr: pointer to the receive address
474 * @index: receive address array register
475 *
476 * Sets the receive address array register at index to the address passed
477 * in by addr.
478 **/
e1000_rar_set_generic(struct e1000_hw * hw,u8 * addr,u32 index)479 static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
480 {
481 u32 rar_low, rar_high;
482
483 DEBUGFUNC("e1000_rar_set_generic");
484
485 /* HW expects these in little endian so we reverse the byte order
486 * from network order (big endian) to little endian
487 */
488 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
489 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
490
491 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
492
493 /* If MAC address zero, no need to set the AV bit */
494 if (rar_low || rar_high)
495 rar_high |= E1000_RAH_AV;
496
497 /* Some bridges will combine consecutive 32-bit writes into
498 * a single burst write, which will malfunction on some parts.
499 * The flushes avoid this.
500 */
501 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
502 E1000_WRITE_FLUSH(hw);
503 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
504 E1000_WRITE_FLUSH(hw);
505
506 return E1000_SUCCESS;
507 }
508
509 /**
510 * e1000_hash_mc_addr_generic - Generate a multicast hash value
511 * @hw: pointer to the HW structure
512 * @mc_addr: pointer to a multicast address
513 *
514 * Generates a multicast address hash value which is used to determine
515 * the multicast filter table array address and new table value.
516 **/
e1000_hash_mc_addr_generic(struct e1000_hw * hw,u8 * mc_addr)517 u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
518 {
519 u32 hash_value, hash_mask;
520 u8 bit_shift = 0;
521
522 DEBUGFUNC("e1000_hash_mc_addr_generic");
523
524 /* Register count multiplied by bits per register */
525 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
526
527 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
528 * where 0xFF would still fall within the hash mask.
529 */
530 while (hash_mask >> bit_shift != 0xFF)
531 bit_shift++;
532
533 /* The portion of the address that is used for the hash table
534 * is determined by the mc_filter_type setting.
535 * The algorithm is such that there is a total of 8 bits of shifting.
536 * The bit_shift for a mc_filter_type of 0 represents the number of
537 * left-shifts where the MSB of mc_addr[5] would still fall within
538 * the hash_mask. Case 0 does this exactly. Since there are a total
539 * of 8 bits of shifting, then mc_addr[4] will shift right the
540 * remaining number of bits. Thus 8 - bit_shift. The rest of the
541 * cases are a variation of this algorithm...essentially raising the
542 * number of bits to shift mc_addr[5] left, while still keeping the
543 * 8-bit shifting total.
544 *
545 * For example, given the following Destination MAC Address and an
546 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
547 * we can see that the bit_shift for case 0 is 4. These are the hash
548 * values resulting from each mc_filter_type...
549 * [0] [1] [2] [3] [4] [5]
550 * 01 AA 00 12 34 56
551 * LSB MSB
552 *
553 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
554 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
555 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
556 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
557 */
558 switch (hw->mac.mc_filter_type) {
559 default:
560 case 0:
561 break;
562 case 1:
563 bit_shift += 1;
564 break;
565 case 2:
566 bit_shift += 2;
567 break;
568 case 3:
569 bit_shift += 4;
570 break;
571 }
572
573 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
574 (((u16) mc_addr[5]) << bit_shift)));
575
576 return hash_value;
577 }
578
579 /**
580 * e1000_update_mc_addr_list_generic - Update Multicast addresses
581 * @hw: pointer to the HW structure
582 * @mc_addr_list: array of multicast addresses to program
583 * @mc_addr_count: number of multicast addresses to program
584 *
585 * Updates entire Multicast Table Array.
586 * The caller must have a packed mc_addr_list of multicast addresses.
587 **/
e1000_update_mc_addr_list_generic(struct e1000_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)588 void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
589 u8 *mc_addr_list, u32 mc_addr_count)
590 {
591 u32 hash_value, hash_bit, hash_reg;
592 int i;
593
594 DEBUGFUNC("e1000_update_mc_addr_list_generic");
595
596 /* clear mta_shadow */
597 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
598
599 /* update mta_shadow from mc_addr_list */
600 for (i = 0; (u32) i < mc_addr_count; i++) {
601 hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
602
603 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
604 hash_bit = hash_value & 0x1F;
605
606 hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
607 mc_addr_list += (ETH_ADDR_LEN);
608 }
609
610 /* replace the entire MTA table */
611 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
612 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
613 E1000_WRITE_FLUSH(hw);
614 }
615
616 /**
617 * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
618 * @hw: pointer to the HW structure
619 *
620 * In certain situations, a system BIOS may report that the PCIx maximum
621 * memory read byte count (MMRBC) value is higher than than the actual
622 * value. We check the PCIx command register with the current PCIx status
623 * register.
624 **/
e1000_pcix_mmrbc_workaround_generic(struct e1000_hw * hw)625 void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
626 {
627 u16 cmd_mmrbc;
628 u16 pcix_cmd;
629 u16 pcix_stat_hi_word;
630 u16 stat_mmrbc;
631
632 DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
633
634 /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
635 if (hw->bus.type != e1000_bus_type_pcix)
636 return;
637
638 e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
639 e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
640 cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
641 PCIX_COMMAND_MMRBC_SHIFT;
642 stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
643 PCIX_STATUS_HI_MMRBC_SHIFT;
644 if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
645 stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
646 if (cmd_mmrbc > stat_mmrbc) {
647 pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
648 pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
649 e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
650 }
651 }
652
653 /**
654 * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
655 * @hw: pointer to the HW structure
656 *
657 * Clears the base hardware counters by reading the counter registers.
658 **/
e1000_clear_hw_cntrs_base_generic(struct e1000_hw * hw)659 void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
660 {
661 DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
662
663 E1000_READ_REG(hw, E1000_CRCERRS);
664 E1000_READ_REG(hw, E1000_SYMERRS);
665 E1000_READ_REG(hw, E1000_MPC);
666 E1000_READ_REG(hw, E1000_SCC);
667 E1000_READ_REG(hw, E1000_ECOL);
668 E1000_READ_REG(hw, E1000_MCC);
669 E1000_READ_REG(hw, E1000_LATECOL);
670 E1000_READ_REG(hw, E1000_COLC);
671 E1000_READ_REG(hw, E1000_DC);
672 E1000_READ_REG(hw, E1000_SEC);
673 E1000_READ_REG(hw, E1000_RLEC);
674 E1000_READ_REG(hw, E1000_XONRXC);
675 E1000_READ_REG(hw, E1000_XONTXC);
676 E1000_READ_REG(hw, E1000_XOFFRXC);
677 E1000_READ_REG(hw, E1000_XOFFTXC);
678 E1000_READ_REG(hw, E1000_FCRUC);
679 E1000_READ_REG(hw, E1000_GPRC);
680 E1000_READ_REG(hw, E1000_BPRC);
681 E1000_READ_REG(hw, E1000_MPRC);
682 E1000_READ_REG(hw, E1000_GPTC);
683 E1000_READ_REG(hw, E1000_GORCL);
684 E1000_READ_REG(hw, E1000_GORCH);
685 E1000_READ_REG(hw, E1000_GOTCL);
686 E1000_READ_REG(hw, E1000_GOTCH);
687 E1000_READ_REG(hw, E1000_RNBC);
688 E1000_READ_REG(hw, E1000_RUC);
689 E1000_READ_REG(hw, E1000_RFC);
690 E1000_READ_REG(hw, E1000_ROC);
691 E1000_READ_REG(hw, E1000_RJC);
692 E1000_READ_REG(hw, E1000_TORL);
693 E1000_READ_REG(hw, E1000_TORH);
694 E1000_READ_REG(hw, E1000_TOTL);
695 E1000_READ_REG(hw, E1000_TOTH);
696 E1000_READ_REG(hw, E1000_TPR);
697 E1000_READ_REG(hw, E1000_TPT);
698 E1000_READ_REG(hw, E1000_MPTC);
699 E1000_READ_REG(hw, E1000_BPTC);
700 }
701
702 /**
703 * e1000_check_for_copper_link_generic - Check for link (Copper)
704 * @hw: pointer to the HW structure
705 *
706 * Checks to see of the link status of the hardware has changed. If a
707 * change in link status has been detected, then we read the PHY registers
708 * to get the current speed/duplex if link exists.
709 **/
e1000_check_for_copper_link_generic(struct e1000_hw * hw)710 s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
711 {
712 struct e1000_mac_info *mac = &hw->mac;
713 s32 ret_val;
714 bool link;
715
716 DEBUGFUNC("e1000_check_for_copper_link");
717
718 /* We only want to go out to the PHY registers to see if Auto-Neg
719 * has completed and/or if our link status has changed. The
720 * get_link_status flag is set upon receiving a Link Status
721 * Change or Rx Sequence Error interrupt.
722 */
723 if (!mac->get_link_status)
724 return E1000_SUCCESS;
725
726 /* First we want to see if the MII Status Register reports
727 * link. If so, then we want to get the current speed/duplex
728 * of the PHY.
729 */
730 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
731 if (ret_val)
732 return ret_val;
733
734 if (!link)
735 return E1000_SUCCESS; /* No link detected */
736
737 mac->get_link_status = FALSE;
738
739 /* Check if there was DownShift, must be checked
740 * immediately after link-up
741 */
742 e1000_check_downshift_generic(hw);
743
744 /* If we are forcing speed/duplex, then we simply return since
745 * we have already determined whether we have link or not.
746 */
747 if (!mac->autoneg)
748 return -E1000_ERR_CONFIG;
749
750 /* Auto-Neg is enabled. Auto Speed Detection takes care
751 * of MAC speed/duplex configuration. So we only need to
752 * configure Collision Distance in the MAC.
753 */
754 mac->ops.config_collision_dist(hw);
755
756 /* Configure Flow Control now that Auto-Neg has completed.
757 * First, we need to restore the desired flow control
758 * settings because we may have had to re-autoneg with a
759 * different link partner.
760 */
761 ret_val = e1000_config_fc_after_link_up_generic(hw);
762 if (ret_val)
763 DEBUGOUT("Error configuring flow control\n");
764
765 return ret_val;
766 }
767
768 /**
769 * e1000_check_for_fiber_link_generic - Check for link (Fiber)
770 * @hw: pointer to the HW structure
771 *
772 * Checks for link up on the hardware. If link is not up and we have
773 * a signal, then we need to force link up.
774 **/
e1000_check_for_fiber_link_generic(struct e1000_hw * hw)775 s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
776 {
777 struct e1000_mac_info *mac = &hw->mac;
778 u32 rxcw;
779 u32 ctrl;
780 u32 status;
781 s32 ret_val;
782
783 DEBUGFUNC("e1000_check_for_fiber_link_generic");
784
785 ctrl = E1000_READ_REG(hw, E1000_CTRL);
786 status = E1000_READ_REG(hw, E1000_STATUS);
787 rxcw = E1000_READ_REG(hw, E1000_RXCW);
788
789 /* If we don't have link (auto-negotiation failed or link partner
790 * cannot auto-negotiate), the cable is plugged in (we have signal),
791 * and our link partner is not trying to auto-negotiate with us (we
792 * are receiving idles or data), we need to force link up. We also
793 * need to give auto-negotiation time to complete, in case the cable
794 * was just plugged in. The autoneg_failed flag does this.
795 */
796 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
797 if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
798 !(rxcw & E1000_RXCW_C)) {
799 if (!mac->autoneg_failed) {
800 mac->autoneg_failed = TRUE;
801 return E1000_SUCCESS;
802 }
803 DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
804
805 /* Disable auto-negotiation in the TXCW register */
806 E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
807
808 /* Force link-up and also force full-duplex. */
809 ctrl = E1000_READ_REG(hw, E1000_CTRL);
810 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
811 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
812
813 /* Configure Flow Control after forcing link up. */
814 ret_val = e1000_config_fc_after_link_up_generic(hw);
815 if (ret_val) {
816 DEBUGOUT("Error configuring flow control\n");
817 return ret_val;
818 }
819 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
820 /* If we are forcing link and we are receiving /C/ ordered
821 * sets, re-enable auto-negotiation in the TXCW register
822 * and disable forced link in the Device Control register
823 * in an attempt to auto-negotiate with our link partner.
824 */
825 DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
826 E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
827 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
828
829 mac->serdes_has_link = TRUE;
830 }
831
832 return E1000_SUCCESS;
833 }
834
835 /**
836 * e1000_check_for_serdes_link_generic - Check for link (Serdes)
837 * @hw: pointer to the HW structure
838 *
839 * Checks for link up on the hardware. If link is not up and we have
840 * a signal, then we need to force link up.
841 **/
e1000_check_for_serdes_link_generic(struct e1000_hw * hw)842 s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
843 {
844 struct e1000_mac_info *mac = &hw->mac;
845 u32 rxcw;
846 u32 ctrl;
847 u32 status;
848 s32 ret_val;
849
850 DEBUGFUNC("e1000_check_for_serdes_link_generic");
851
852 ctrl = E1000_READ_REG(hw, E1000_CTRL);
853 status = E1000_READ_REG(hw, E1000_STATUS);
854 rxcw = E1000_READ_REG(hw, E1000_RXCW);
855
856 /* If we don't have link (auto-negotiation failed or link partner
857 * cannot auto-negotiate), and our link partner is not trying to
858 * auto-negotiate with us (we are receiving idles or data),
859 * we need to force link up. We also need to give auto-negotiation
860 * time to complete.
861 */
862 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
863 if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
864 if (!mac->autoneg_failed) {
865 mac->autoneg_failed = TRUE;
866 return E1000_SUCCESS;
867 }
868 DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
869
870 /* Disable auto-negotiation in the TXCW register */
871 E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
872
873 /* Force link-up and also force full-duplex. */
874 ctrl = E1000_READ_REG(hw, E1000_CTRL);
875 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
876 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
877
878 /* Configure Flow Control after forcing link up. */
879 ret_val = e1000_config_fc_after_link_up_generic(hw);
880 if (ret_val) {
881 DEBUGOUT("Error configuring flow control\n");
882 return ret_val;
883 }
884 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
885 /* If we are forcing link and we are receiving /C/ ordered
886 * sets, re-enable auto-negotiation in the TXCW register
887 * and disable forced link in the Device Control register
888 * in an attempt to auto-negotiate with our link partner.
889 */
890 DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
891 E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
892 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
893
894 mac->serdes_has_link = TRUE;
895 } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
896 /* If we force link for non-auto-negotiation switch, check
897 * link status based on MAC synchronization for internal
898 * serdes media type.
899 */
900 /* SYNCH bit and IV bit are sticky. */
901 usec_delay(10);
902 rxcw = E1000_READ_REG(hw, E1000_RXCW);
903 if (rxcw & E1000_RXCW_SYNCH) {
904 if (!(rxcw & E1000_RXCW_IV)) {
905 mac->serdes_has_link = TRUE;
906 DEBUGOUT("SERDES: Link up - forced.\n");
907 }
908 } else {
909 mac->serdes_has_link = FALSE;
910 DEBUGOUT("SERDES: Link down - force failed.\n");
911 }
912 }
913
914 if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
915 status = E1000_READ_REG(hw, E1000_STATUS);
916 if (status & E1000_STATUS_LU) {
917 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
918 usec_delay(10);
919 rxcw = E1000_READ_REG(hw, E1000_RXCW);
920 if (rxcw & E1000_RXCW_SYNCH) {
921 if (!(rxcw & E1000_RXCW_IV)) {
922 mac->serdes_has_link = TRUE;
923 DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
924 } else {
925 mac->serdes_has_link = FALSE;
926 DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
927 }
928 } else {
929 mac->serdes_has_link = FALSE;
930 DEBUGOUT("SERDES: Link down - no sync.\n");
931 }
932 } else {
933 mac->serdes_has_link = FALSE;
934 DEBUGOUT("SERDES: Link down - autoneg failed\n");
935 }
936 }
937
938 return E1000_SUCCESS;
939 }
940
941 /**
942 * e1000_set_default_fc_generic - Set flow control default values
943 * @hw: pointer to the HW structure
944 *
945 * Read the EEPROM for the default values for flow control and store the
946 * values.
947 **/
e1000_set_default_fc_generic(struct e1000_hw * hw)948 s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
949 {
950 s32 ret_val;
951 u16 nvm_data;
952 u16 nvm_offset = 0;
953
954 DEBUGFUNC("e1000_set_default_fc_generic");
955
956 /* Read and store word 0x0F of the EEPROM. This word contains bits
957 * that determine the hardware's default PAUSE (flow control) mode,
958 * a bit that determines whether the HW defaults to enabling or
959 * disabling auto-negotiation, and the direction of the
960 * SW defined pins. If there is no SW over-ride of the flow
961 * control setting, then the variable hw->fc will
962 * be initialized based on a value in the EEPROM.
963 */
964 if (hw->mac.type == e1000_i350) {
965 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
966 ret_val = hw->nvm.ops.read(hw,
967 NVM_INIT_CONTROL2_REG +
968 nvm_offset,
969 1, &nvm_data);
970 } else {
971 ret_val = hw->nvm.ops.read(hw,
972 NVM_INIT_CONTROL2_REG,
973 1, &nvm_data);
974 }
975
976
977 if (ret_val) {
978 DEBUGOUT("NVM Read Error\n");
979 return ret_val;
980 }
981
982 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
983 hw->fc.requested_mode = e1000_fc_none;
984 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
985 NVM_WORD0F_ASM_DIR)
986 hw->fc.requested_mode = e1000_fc_tx_pause;
987 else
988 hw->fc.requested_mode = e1000_fc_full;
989
990 return E1000_SUCCESS;
991 }
992
993 /**
994 * e1000_setup_link_generic - Setup flow control and link settings
995 * @hw: pointer to the HW structure
996 *
997 * Determines which flow control settings to use, then configures flow
998 * control. Calls the appropriate media-specific link configuration
999 * function. Assuming the adapter has a valid link partner, a valid link
1000 * should be established. Assumes the hardware has previously been reset
1001 * and the transmitter and receiver are not enabled.
1002 **/
e1000_setup_link_generic(struct e1000_hw * hw)1003 s32 e1000_setup_link_generic(struct e1000_hw *hw)
1004 {
1005 s32 ret_val;
1006
1007 DEBUGFUNC("e1000_setup_link_generic");
1008
1009 /* In the case of the phy reset being blocked, we already have a link.
1010 * We do not need to set it up again.
1011 */
1012 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
1013 return E1000_SUCCESS;
1014
1015 /* If requested flow control is set to default, set flow control
1016 * based on the EEPROM flow control settings.
1017 */
1018 if (hw->fc.requested_mode == e1000_fc_default) {
1019 ret_val = e1000_set_default_fc_generic(hw);
1020 if (ret_val)
1021 return ret_val;
1022 }
1023
1024 /* Save off the requested flow control mode for use later. Depending
1025 * on the link partner's capabilities, we may or may not use this mode.
1026 */
1027 hw->fc.current_mode = hw->fc.requested_mode;
1028
1029 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
1030 hw->fc.current_mode);
1031
1032 /* Call the necessary media_type subroutine to configure the link. */
1033 ret_val = hw->mac.ops.setup_physical_interface(hw);
1034 if (ret_val)
1035 return ret_val;
1036
1037 /* Initialize the flow control address, type, and PAUSE timer
1038 * registers to their default values. This is done even if flow
1039 * control is disabled, because it does not hurt anything to
1040 * initialize these registers.
1041 */
1042 DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
1043 E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
1044 E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
1045 E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
1046
1047 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
1048
1049 return e1000_set_fc_watermarks_generic(hw);
1050 }
1051
1052 /**
1053 * e1000_commit_fc_settings_generic - Configure flow control
1054 * @hw: pointer to the HW structure
1055 *
1056 * Write the flow control settings to the Transmit Config Word Register (TXCW)
1057 * base on the flow control settings in e1000_mac_info.
1058 **/
e1000_commit_fc_settings_generic(struct e1000_hw * hw)1059 s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
1060 {
1061 struct e1000_mac_info *mac = &hw->mac;
1062 u32 txcw;
1063
1064 DEBUGFUNC("e1000_commit_fc_settings_generic");
1065
1066 /* Check for a software override of the flow control settings, and
1067 * setup the device accordingly. If auto-negotiation is enabled, then
1068 * software will have to set the "PAUSE" bits to the correct value in
1069 * the Transmit Config Word Register (TXCW) and re-start auto-
1070 * negotiation. However, if auto-negotiation is disabled, then
1071 * software will have to manually configure the two flow control enable
1072 * bits in the CTRL register.
1073 *
1074 * The possible values of the "fc" parameter are:
1075 * 0: Flow control is completely disabled
1076 * 1: Rx flow control is enabled (we can receive pause frames,
1077 * but not send pause frames).
1078 * 2: Tx flow control is enabled (we can send pause frames but we
1079 * do not support receiving pause frames).
1080 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1081 */
1082 switch (hw->fc.current_mode) {
1083 case e1000_fc_none:
1084 /* Flow control completely disabled by a software over-ride. */
1085 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
1086 break;
1087 case e1000_fc_rx_pause:
1088 /* Rx Flow control is enabled and Tx Flow control is disabled
1089 * by a software over-ride. Since there really isn't a way to
1090 * advertise that we are capable of Rx Pause ONLY, we will
1091 * advertise that we support both symmetric and asymmetric Rx
1092 * PAUSE. Later, we will disable the adapter's ability to send
1093 * PAUSE frames.
1094 */
1095 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1096 break;
1097 case e1000_fc_tx_pause:
1098 /* Tx Flow control is enabled, and Rx Flow control is disabled,
1099 * by a software over-ride.
1100 */
1101 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
1102 break;
1103 case e1000_fc_full:
1104 /* Flow control (both Rx and Tx) is enabled by a software
1105 * over-ride.
1106 */
1107 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
1108 break;
1109 default:
1110 DEBUGOUT("Flow control param set incorrectly\n");
1111 return -E1000_ERR_CONFIG;
1112 break;
1113 }
1114
1115 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
1116 mac->txcw = txcw;
1117
1118 return E1000_SUCCESS;
1119 }
1120
1121 /**
1122 * e1000_poll_fiber_serdes_link_generic - Poll for link up
1123 * @hw: pointer to the HW structure
1124 *
1125 * Polls for link up by reading the status register, if link fails to come
1126 * up with auto-negotiation, then the link is forced if a signal is detected.
1127 **/
e1000_poll_fiber_serdes_link_generic(struct e1000_hw * hw)1128 s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
1129 {
1130 struct e1000_mac_info *mac = &hw->mac;
1131 u32 i, status;
1132 s32 ret_val;
1133
1134 DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
1135
1136 /* If we have a signal (the cable is plugged in, or assumed TRUE for
1137 * serdes media) then poll for a "Link-Up" indication in the Device
1138 * Status Register. Time-out if a link isn't seen in 500 milliseconds
1139 * seconds (Auto-negotiation should complete in less than 500
1140 * milliseconds even if the other end is doing it in SW).
1141 */
1142 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
1143 msec_delay(10);
1144 status = E1000_READ_REG(hw, E1000_STATUS);
1145 if (status & E1000_STATUS_LU)
1146 break;
1147 }
1148 if (i == FIBER_LINK_UP_LIMIT) {
1149 DEBUGOUT("Never got a valid link from auto-neg!!!\n");
1150 mac->autoneg_failed = TRUE;
1151 /* AutoNeg failed to achieve a link, so we'll call
1152 * mac->check_for_link. This routine will force the
1153 * link up if we detect a signal. This will allow us to
1154 * communicate with non-autonegotiating link partners.
1155 */
1156 ret_val = mac->ops.check_for_link(hw);
1157 if (ret_val) {
1158 DEBUGOUT("Error while checking for link\n");
1159 return ret_val;
1160 }
1161 mac->autoneg_failed = FALSE;
1162 } else {
1163 mac->autoneg_failed = FALSE;
1164 DEBUGOUT("Valid Link Found\n");
1165 }
1166
1167 return E1000_SUCCESS;
1168 }
1169
1170 /**
1171 * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
1172 * @hw: pointer to the HW structure
1173 *
1174 * Configures collision distance and flow control for fiber and serdes
1175 * links. Upon successful setup, poll for link.
1176 **/
e1000_setup_fiber_serdes_link_generic(struct e1000_hw * hw)1177 s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
1178 {
1179 u32 ctrl;
1180 s32 ret_val;
1181
1182 DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
1183
1184 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1185
1186 /* Take the link out of reset */
1187 ctrl &= ~E1000_CTRL_LRST;
1188
1189 hw->mac.ops.config_collision_dist(hw);
1190
1191 ret_val = e1000_commit_fc_settings_generic(hw);
1192 if (ret_val)
1193 return ret_val;
1194
1195 /* Since auto-negotiation is enabled, take the link out of reset (the
1196 * link will be in reset, because we previously reset the chip). This
1197 * will restart auto-negotiation. If auto-negotiation is successful
1198 * then the link-up status bit will be set and the flow control enable
1199 * bits (RFCE and TFCE) will be set according to their negotiated value.
1200 */
1201 DEBUGOUT("Auto-negotiation enabled\n");
1202
1203 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1204 E1000_WRITE_FLUSH(hw);
1205 msec_delay(1);
1206
1207 /* For these adapters, the SW definable pin 1 is set when the optics
1208 * detect a signal. If we have a signal, then poll for a "Link-Up"
1209 * indication.
1210 */
1211 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1212 (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
1213 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
1214 } else {
1215 DEBUGOUT("No signal detected\n");
1216 }
1217
1218 return ret_val;
1219 }
1220
1221 /**
1222 * e1000_config_collision_dist_generic - Configure collision distance
1223 * @hw: pointer to the HW structure
1224 *
1225 * Configures the collision distance to the default value and is used
1226 * during link setup.
1227 **/
e1000_config_collision_dist_generic(struct e1000_hw * hw)1228 static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
1229 {
1230 u32 tctl;
1231
1232 DEBUGFUNC("e1000_config_collision_dist_generic");
1233
1234 tctl = E1000_READ_REG(hw, E1000_TCTL);
1235
1236 tctl &= ~E1000_TCTL_COLD;
1237 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1238
1239 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1240 E1000_WRITE_FLUSH(hw);
1241 }
1242
1243 /**
1244 * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
1245 * @hw: pointer to the HW structure
1246 *
1247 * Sets the flow control high/low threshold (watermark) registers. If
1248 * flow control XON frame transmission is enabled, then set XON frame
1249 * transmission as well.
1250 **/
e1000_set_fc_watermarks_generic(struct e1000_hw * hw)1251 s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
1252 {
1253 u32 fcrtl = 0, fcrth = 0;
1254
1255 DEBUGFUNC("e1000_set_fc_watermarks_generic");
1256
1257 /* Set the flow control receive threshold registers. Normally,
1258 * these registers will be set to a default threshold that may be
1259 * adjusted later by the driver's runtime code. However, if the
1260 * ability to transmit pause frames is not enabled, then these
1261 * registers will be set to 0.
1262 */
1263 if (hw->fc.current_mode & e1000_fc_tx_pause) {
1264 /* We need to set up the Receive Threshold high and low water
1265 * marks as well as (optionally) enabling the transmission of
1266 * XON frames.
1267 */
1268 fcrtl = hw->fc.low_water;
1269 if (hw->fc.send_xon)
1270 fcrtl |= E1000_FCRTL_XONE;
1271
1272 fcrth = hw->fc.high_water;
1273 }
1274 E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
1275 E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
1276
1277 return E1000_SUCCESS;
1278 }
1279
1280 /**
1281 * e1000_force_mac_fc_generic - Force the MAC's flow control settings
1282 * @hw: pointer to the HW structure
1283 *
1284 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
1285 * device control register to reflect the adapter settings. TFCE and RFCE
1286 * need to be explicitly set by software when a copper PHY is used because
1287 * autonegotiation is managed by the PHY rather than the MAC. Software must
1288 * also configure these bits when link is forced on a fiber connection.
1289 **/
e1000_force_mac_fc_generic(struct e1000_hw * hw)1290 s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
1291 {
1292 u32 ctrl;
1293
1294 DEBUGFUNC("e1000_force_mac_fc_generic");
1295
1296 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1297
1298 /* Because we didn't get link via the internal auto-negotiation
1299 * mechanism (we either forced link or we got link via PHY
1300 * auto-neg), we have to manually enable/disable transmit an
1301 * receive flow control.
1302 *
1303 * The "Case" statement below enables/disable flow control
1304 * according to the "hw->fc.current_mode" parameter.
1305 *
1306 * The possible values of the "fc" parameter are:
1307 * 0: Flow control is completely disabled
1308 * 1: Rx flow control is enabled (we can receive pause
1309 * frames but not send pause frames).
1310 * 2: Tx flow control is enabled (we can send pause frames
1311 * frames but we do not receive pause frames).
1312 * 3: Both Rx and Tx flow control (symmetric) is enabled.
1313 * other: No other values should be possible at this point.
1314 */
1315 DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
1316
1317 switch (hw->fc.current_mode) {
1318 case e1000_fc_none:
1319 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
1320 break;
1321 case e1000_fc_rx_pause:
1322 ctrl &= (~E1000_CTRL_TFCE);
1323 ctrl |= E1000_CTRL_RFCE;
1324 break;
1325 case e1000_fc_tx_pause:
1326 ctrl &= (~E1000_CTRL_RFCE);
1327 ctrl |= E1000_CTRL_TFCE;
1328 break;
1329 case e1000_fc_full:
1330 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
1331 break;
1332 default:
1333 DEBUGOUT("Flow control param set incorrectly\n");
1334 return -E1000_ERR_CONFIG;
1335 }
1336
1337 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1338
1339 return E1000_SUCCESS;
1340 }
1341
1342 /**
1343 * e1000_config_fc_after_link_up_generic - Configures flow control after link
1344 * @hw: pointer to the HW structure
1345 *
1346 * Checks the status of auto-negotiation after link up to ensure that the
1347 * speed and duplex were not forced. If the link needed to be forced, then
1348 * flow control needs to be forced also. If auto-negotiation is enabled
1349 * and did not fail, then we configure flow control based on our link
1350 * partner.
1351 **/
e1000_config_fc_after_link_up_generic(struct e1000_hw * hw)1352 s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
1353 {
1354 struct e1000_mac_info *mac = &hw->mac;
1355 s32 ret_val = E1000_SUCCESS;
1356 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
1357 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1358 u16 speed, duplex;
1359
1360 DEBUGFUNC("e1000_config_fc_after_link_up_generic");
1361
1362 /* Check for the case where we have fiber media and auto-neg failed
1363 * so we had to force link. In this case, we need to force the
1364 * configuration of the MAC to match the "fc" parameter.
1365 */
1366 if (mac->autoneg_failed) {
1367 if (hw->phy.media_type == e1000_media_type_fiber ||
1368 hw->phy.media_type == e1000_media_type_internal_serdes)
1369 ret_val = e1000_force_mac_fc_generic(hw);
1370 } else {
1371 if (hw->phy.media_type == e1000_media_type_copper)
1372 ret_val = e1000_force_mac_fc_generic(hw);
1373 }
1374
1375 if (ret_val) {
1376 DEBUGOUT("Error forcing flow control settings\n");
1377 return ret_val;
1378 }
1379
1380 /* Check for the case where we have copper media and auto-neg is
1381 * enabled. In this case, we need to check and see if Auto-Neg
1382 * has completed, and if so, how the PHY and link partner has
1383 * flow control configured.
1384 */
1385 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1386 /* Read the MII Status Register and check to see if AutoNeg
1387 * has completed. We read this twice because this reg has
1388 * some "sticky" (latched) bits.
1389 */
1390 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
1391 if (ret_val)
1392 return ret_val;
1393 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
1394 if (ret_val)
1395 return ret_val;
1396
1397 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1398 DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
1399 return ret_val;
1400 }
1401
1402 /* The AutoNeg process has completed, so we now need to
1403 * read both the Auto Negotiation Advertisement
1404 * Register (Address 4) and the Auto_Negotiation Base
1405 * Page Ability Register (Address 5) to determine how
1406 * flow control was negotiated.
1407 */
1408 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
1409 &mii_nway_adv_reg);
1410 if (ret_val)
1411 return ret_val;
1412 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
1413 &mii_nway_lp_ability_reg);
1414 if (ret_val)
1415 return ret_val;
1416
1417 /* Two bits in the Auto Negotiation Advertisement Register
1418 * (Address 4) and two bits in the Auto Negotiation Base
1419 * Page Ability Register (Address 5) determine flow control
1420 * for both the PHY and the link partner. The following
1421 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1422 * 1999, describes these PAUSE resolution bits and how flow
1423 * control is determined based upon these settings.
1424 * NOTE: DC = Don't Care
1425 *
1426 * LOCAL DEVICE | LINK PARTNER
1427 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1428 *-------|---------|-------|---------|--------------------
1429 * 0 | 0 | DC | DC | e1000_fc_none
1430 * 0 | 1 | 0 | DC | e1000_fc_none
1431 * 0 | 1 | 1 | 0 | e1000_fc_none
1432 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1433 * 1 | 0 | 0 | DC | e1000_fc_none
1434 * 1 | DC | 1 | DC | e1000_fc_full
1435 * 1 | 1 | 0 | 0 | e1000_fc_none
1436 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1437 *
1438 * Are both PAUSE bits set to 1? If so, this implies
1439 * Symmetric Flow Control is enabled at both ends. The
1440 * ASM_DIR bits are irrelevant per the spec.
1441 *
1442 * For Symmetric Flow Control:
1443 *
1444 * LOCAL DEVICE | LINK PARTNER
1445 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1446 *-------|---------|-------|---------|--------------------
1447 * 1 | DC | 1 | DC | E1000_fc_full
1448 *
1449 */
1450 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1451 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1452 /* Now we need to check if the user selected Rx ONLY
1453 * of pause frames. In this case, we had to advertise
1454 * FULL flow control because we could not advertise Rx
1455 * ONLY. Hence, we must now check to see if we need to
1456 * turn OFF the TRANSMISSION of PAUSE frames.
1457 */
1458 if (hw->fc.requested_mode == e1000_fc_full) {
1459 hw->fc.current_mode = e1000_fc_full;
1460 DEBUGOUT("Flow Control = FULL.\n");
1461 } else {
1462 hw->fc.current_mode = e1000_fc_rx_pause;
1463 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1464 }
1465 }
1466 /* For receiving PAUSE frames ONLY.
1467 *
1468 * LOCAL DEVICE | LINK PARTNER
1469 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1470 *-------|---------|-------|---------|--------------------
1471 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1472 */
1473 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1474 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1475 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1476 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1477 hw->fc.current_mode = e1000_fc_tx_pause;
1478 DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
1479 }
1480 /* For transmitting PAUSE frames ONLY.
1481 *
1482 * LOCAL DEVICE | LINK PARTNER
1483 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1484 *-------|---------|-------|---------|--------------------
1485 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1486 */
1487 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1488 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1489 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1490 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1491 hw->fc.current_mode = e1000_fc_rx_pause;
1492 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1493 } else {
1494 /* Per the IEEE spec, at this point flow control
1495 * should be disabled.
1496 */
1497 hw->fc.current_mode = e1000_fc_none;
1498 DEBUGOUT("Flow Control = NONE.\n");
1499 }
1500
1501 /* Now we need to do one last check... If we auto-
1502 * negotiated to HALF DUPLEX, flow control should not be
1503 * enabled per IEEE 802.3 spec.
1504 */
1505 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1506 if (ret_val) {
1507 DEBUGOUT("Error getting link speed and duplex\n");
1508 return ret_val;
1509 }
1510
1511 if (duplex == HALF_DUPLEX)
1512 hw->fc.current_mode = e1000_fc_none;
1513
1514 /* Now we call a subroutine to actually force the MAC
1515 * controller to use the correct flow control settings.
1516 */
1517 ret_val = e1000_force_mac_fc_generic(hw);
1518 if (ret_val) {
1519 DEBUGOUT("Error forcing flow control settings\n");
1520 return ret_val;
1521 }
1522 }
1523
1524 /* Check for the case where we have SerDes media and auto-neg is
1525 * enabled. In this case, we need to check and see if Auto-Neg
1526 * has completed, and if so, how the PHY and link partner has
1527 * flow control configured.
1528 */
1529 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1530 mac->autoneg) {
1531 /* Read the PCS_LSTS and check to see if AutoNeg
1532 * has completed.
1533 */
1534 pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
1535
1536 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1537 DEBUGOUT("PCS Auto Neg has not completed.\n");
1538 return ret_val;
1539 }
1540
1541 /* The AutoNeg process has completed, so we now need to
1542 * read both the Auto Negotiation Advertisement
1543 * Register (PCS_ANADV) and the Auto_Negotiation Base
1544 * Page Ability Register (PCS_LPAB) to determine how
1545 * flow control was negotiated.
1546 */
1547 pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
1548 pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
1549
1550 /* Two bits in the Auto Negotiation Advertisement Register
1551 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1552 * Page Ability Register (PCS_LPAB) determine flow control
1553 * for both the PHY and the link partner. The following
1554 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1555 * 1999, describes these PAUSE resolution bits and how flow
1556 * control is determined based upon these settings.
1557 * NOTE: DC = Don't Care
1558 *
1559 * LOCAL DEVICE | LINK PARTNER
1560 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1561 *-------|---------|-------|---------|--------------------
1562 * 0 | 0 | DC | DC | e1000_fc_none
1563 * 0 | 1 | 0 | DC | e1000_fc_none
1564 * 0 | 1 | 1 | 0 | e1000_fc_none
1565 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1566 * 1 | 0 | 0 | DC | e1000_fc_none
1567 * 1 | DC | 1 | DC | e1000_fc_full
1568 * 1 | 1 | 0 | 0 | e1000_fc_none
1569 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1570 *
1571 * Are both PAUSE bits set to 1? If so, this implies
1572 * Symmetric Flow Control is enabled at both ends. The
1573 * ASM_DIR bits are irrelevant per the spec.
1574 *
1575 * For Symmetric Flow Control:
1576 *
1577 * LOCAL DEVICE | LINK PARTNER
1578 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1579 *-------|---------|-------|---------|--------------------
1580 * 1 | DC | 1 | DC | e1000_fc_full
1581 *
1582 */
1583 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1584 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1585 /* Now we need to check if the user selected Rx ONLY
1586 * of pause frames. In this case, we had to advertise
1587 * FULL flow control because we could not advertise Rx
1588 * ONLY. Hence, we must now check to see if we need to
1589 * turn OFF the TRANSMISSION of PAUSE frames.
1590 */
1591 if (hw->fc.requested_mode == e1000_fc_full) {
1592 hw->fc.current_mode = e1000_fc_full;
1593 DEBUGOUT("Flow Control = FULL.\n");
1594 } else {
1595 hw->fc.current_mode = e1000_fc_rx_pause;
1596 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1597 }
1598 }
1599 /* For receiving PAUSE frames ONLY.
1600 *
1601 * LOCAL DEVICE | LINK PARTNER
1602 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1603 *-------|---------|-------|---------|--------------------
1604 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1605 */
1606 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1607 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1608 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1609 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1610 hw->fc.current_mode = e1000_fc_tx_pause;
1611 DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
1612 }
1613 /* For transmitting PAUSE frames ONLY.
1614 *
1615 * LOCAL DEVICE | LINK PARTNER
1616 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1617 *-------|---------|-------|---------|--------------------
1618 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1619 */
1620 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1621 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1622 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1623 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1624 hw->fc.current_mode = e1000_fc_rx_pause;
1625 DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
1626 } else {
1627 /* Per the IEEE spec, at this point flow control
1628 * should be disabled.
1629 */
1630 hw->fc.current_mode = e1000_fc_none;
1631 DEBUGOUT("Flow Control = NONE.\n");
1632 }
1633
1634 /* Now we call a subroutine to actually force the MAC
1635 * controller to use the correct flow control settings.
1636 */
1637 pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1638 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1639 E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
1640
1641 ret_val = e1000_force_mac_fc_generic(hw);
1642 if (ret_val) {
1643 DEBUGOUT("Error forcing flow control settings\n");
1644 return ret_val;
1645 }
1646 }
1647
1648 return E1000_SUCCESS;
1649 }
1650
1651 /**
1652 * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
1653 * @hw: pointer to the HW structure
1654 * @speed: stores the current speed
1655 * @duplex: stores the current duplex
1656 *
1657 * Read the status register for the current speed/duplex and store the current
1658 * speed and duplex for copper connections.
1659 **/
e1000_get_speed_and_duplex_copper_generic(struct e1000_hw * hw,u16 * speed,u16 * duplex)1660 s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
1661 u16 *duplex)
1662 {
1663 u32 status;
1664
1665 DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
1666
1667 status = E1000_READ_REG(hw, E1000_STATUS);
1668 if (status & E1000_STATUS_SPEED_1000) {
1669 *speed = SPEED_1000;
1670 DEBUGOUT("1000 Mbs, ");
1671 } else if (status & E1000_STATUS_SPEED_100) {
1672 *speed = SPEED_100;
1673 DEBUGOUT("100 Mbs, ");
1674 } else {
1675 *speed = SPEED_10;
1676 DEBUGOUT("10 Mbs, ");
1677 }
1678
1679 if (status & E1000_STATUS_FD) {
1680 *duplex = FULL_DUPLEX;
1681 DEBUGOUT("Full Duplex\n");
1682 } else {
1683 *duplex = HALF_DUPLEX;
1684 DEBUGOUT("Half Duplex\n");
1685 }
1686
1687 return E1000_SUCCESS;
1688 }
1689
1690 /**
1691 * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
1692 * @hw: pointer to the HW structure
1693 * @speed: stores the current speed
1694 * @duplex: stores the current duplex
1695 *
1696 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1697 * for fiber/serdes links.
1698 **/
e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG * hw,u16 * speed,u16 * duplex)1699 s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
1700 u16 *speed, u16 *duplex)
1701 {
1702 DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
1703
1704 *speed = SPEED_1000;
1705 *duplex = FULL_DUPLEX;
1706
1707 return E1000_SUCCESS;
1708 }
1709
1710 /**
1711 * e1000_get_auto_rd_done_generic - Check for auto read completion
1712 * @hw: pointer to the HW structure
1713 *
1714 * Check EEPROM for Auto Read done bit.
1715 **/
e1000_get_auto_rd_done_generic(struct e1000_hw * hw)1716 s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
1717 {
1718 s32 i = 0;
1719
1720 DEBUGFUNC("e1000_get_auto_rd_done_generic");
1721
1722 while (i < AUTO_READ_DONE_TIMEOUT) {
1723 if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
1724 break;
1725 msec_delay(1);
1726 i++;
1727 }
1728
1729 if (i == AUTO_READ_DONE_TIMEOUT) {
1730 DEBUGOUT("Auto read by HW from NVM has not completed.\n");
1731 return -E1000_ERR_RESET;
1732 }
1733
1734 return E1000_SUCCESS;
1735 }
1736
1737 /**
1738 * e1000_valid_led_default_generic - Verify a valid default LED config
1739 * @hw: pointer to the HW structure
1740 * @data: pointer to the NVM (EEPROM)
1741 *
1742 * Read the EEPROM for the current default LED configuration. If the
1743 * LED configuration is not valid, set to a valid LED configuration.
1744 **/
e1000_valid_led_default_generic(struct e1000_hw * hw,u16 * data)1745 s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
1746 {
1747 s32 ret_val;
1748
1749 DEBUGFUNC("e1000_valid_led_default_generic");
1750
1751 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1752 if (ret_val) {
1753 DEBUGOUT("NVM Read Error\n");
1754 return ret_val;
1755 }
1756
1757 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1758 *data = ID_LED_DEFAULT;
1759
1760 return E1000_SUCCESS;
1761 }
1762
1763 /**
1764 * e1000_id_led_init_generic -
1765 * @hw: pointer to the HW structure
1766 *
1767 **/
e1000_id_led_init_generic(struct e1000_hw * hw)1768 s32 e1000_id_led_init_generic(struct e1000_hw *hw)
1769 {
1770 struct e1000_mac_info *mac = &hw->mac;
1771 s32 ret_val;
1772 const u32 ledctl_mask = 0x000000FF;
1773 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1774 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1775 u16 data, i, temp;
1776 const u16 led_mask = 0x0F;
1777
1778 DEBUGFUNC("e1000_id_led_init_generic");
1779
1780 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1781 if (ret_val)
1782 return ret_val;
1783
1784 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
1785 mac->ledctl_mode1 = mac->ledctl_default;
1786 mac->ledctl_mode2 = mac->ledctl_default;
1787
1788 for (i = 0; i < 4; i++) {
1789 temp = (data >> (i << 2)) & led_mask;
1790 switch (temp) {
1791 case ID_LED_ON1_DEF2:
1792 case ID_LED_ON1_ON2:
1793 case ID_LED_ON1_OFF2:
1794 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1795 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1796 break;
1797 case ID_LED_OFF1_DEF2:
1798 case ID_LED_OFF1_ON2:
1799 case ID_LED_OFF1_OFF2:
1800 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1801 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1802 break;
1803 default:
1804 /* Do nothing */
1805 break;
1806 }
1807 switch (temp) {
1808 case ID_LED_DEF1_ON2:
1809 case ID_LED_ON1_ON2:
1810 case ID_LED_OFF1_ON2:
1811 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1812 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1813 break;
1814 case ID_LED_DEF1_OFF2:
1815 case ID_LED_ON1_OFF2:
1816 case ID_LED_OFF1_OFF2:
1817 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1818 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1819 break;
1820 default:
1821 /* Do nothing */
1822 break;
1823 }
1824 }
1825
1826 return E1000_SUCCESS;
1827 }
1828
1829 /**
1830 * e1000_setup_led_generic - Configures SW controllable LED
1831 * @hw: pointer to the HW structure
1832 *
1833 * This prepares the SW controllable LED for use and saves the current state
1834 * of the LED so it can be later restored.
1835 **/
e1000_setup_led_generic(struct e1000_hw * hw)1836 s32 e1000_setup_led_generic(struct e1000_hw *hw)
1837 {
1838 u32 ledctl;
1839
1840 DEBUGFUNC("e1000_setup_led_generic");
1841
1842 if (hw->mac.ops.setup_led != e1000_setup_led_generic)
1843 return -E1000_ERR_CONFIG;
1844
1845 if (hw->phy.media_type == e1000_media_type_fiber) {
1846 ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
1847 hw->mac.ledctl_default = ledctl;
1848 /* Turn off LED0 */
1849 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1850 E1000_LEDCTL_LED0_MODE_MASK);
1851 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1852 E1000_LEDCTL_LED0_MODE_SHIFT);
1853 E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
1854 } else if (hw->phy.media_type == e1000_media_type_copper) {
1855 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
1856 }
1857
1858 return E1000_SUCCESS;
1859 }
1860
1861 /**
1862 * e1000_cleanup_led_generic - Set LED config to default operation
1863 * @hw: pointer to the HW structure
1864 *
1865 * Remove the current LED configuration and set the LED configuration
1866 * to the default value, saved from the EEPROM.
1867 **/
e1000_cleanup_led_generic(struct e1000_hw * hw)1868 s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
1869 {
1870 DEBUGFUNC("e1000_cleanup_led_generic");
1871
1872 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
1873 return E1000_SUCCESS;
1874 }
1875
1876 /**
1877 * e1000_blink_led_generic - Blink LED
1878 * @hw: pointer to the HW structure
1879 *
1880 * Blink the LEDs which are set to be on.
1881 **/
e1000_blink_led_generic(struct e1000_hw * hw)1882 s32 e1000_blink_led_generic(struct e1000_hw *hw)
1883 {
1884 u32 ledctl_blink = 0;
1885 u32 i;
1886
1887 DEBUGFUNC("e1000_blink_led_generic");
1888
1889 if (hw->phy.media_type == e1000_media_type_fiber) {
1890 /* always blink LED0 for PCI-E fiber */
1891 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1892 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1893 } else {
1894 /* Set the blink bit for each LED that's "on" (0x0E)
1895 * (or "off" if inverted) in ledctl_mode2. The blink
1896 * logic in hardware only works when mode is set to "on"
1897 * so it must be changed accordingly when the mode is
1898 * "off" and inverted.
1899 */
1900 ledctl_blink = hw->mac.ledctl_mode2;
1901 for (i = 0; i < 32; i += 8) {
1902 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1903 E1000_LEDCTL_LED0_MODE_MASK;
1904 u32 led_default = hw->mac.ledctl_default >> i;
1905
1906 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1907 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1908 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1909 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1910 ledctl_blink &=
1911 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1912 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1913 E1000_LEDCTL_MODE_LED_ON) << i;
1914 }
1915 }
1916 }
1917
1918 E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
1919
1920 return E1000_SUCCESS;
1921 }
1922
1923 /**
1924 * e1000_led_on_generic - Turn LED on
1925 * @hw: pointer to the HW structure
1926 *
1927 * Turn LED on.
1928 **/
e1000_led_on_generic(struct e1000_hw * hw)1929 s32 e1000_led_on_generic(struct e1000_hw *hw)
1930 {
1931 u32 ctrl;
1932
1933 DEBUGFUNC("e1000_led_on_generic");
1934
1935 switch (hw->phy.media_type) {
1936 case e1000_media_type_fiber:
1937 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1938 ctrl &= ~E1000_CTRL_SWDPIN0;
1939 ctrl |= E1000_CTRL_SWDPIO0;
1940 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1941 break;
1942 case e1000_media_type_copper:
1943 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
1944 break;
1945 default:
1946 break;
1947 }
1948
1949 return E1000_SUCCESS;
1950 }
1951
1952 /**
1953 * e1000_led_off_generic - Turn LED off
1954 * @hw: pointer to the HW structure
1955 *
1956 * Turn LED off.
1957 **/
e1000_led_off_generic(struct e1000_hw * hw)1958 s32 e1000_led_off_generic(struct e1000_hw *hw)
1959 {
1960 u32 ctrl;
1961
1962 DEBUGFUNC("e1000_led_off_generic");
1963
1964 switch (hw->phy.media_type) {
1965 case e1000_media_type_fiber:
1966 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1967 ctrl |= E1000_CTRL_SWDPIN0;
1968 ctrl |= E1000_CTRL_SWDPIO0;
1969 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1970 break;
1971 case e1000_media_type_copper:
1972 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
1973 break;
1974 default:
1975 break;
1976 }
1977
1978 return E1000_SUCCESS;
1979 }
1980
1981 /**
1982 * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
1983 * @hw: pointer to the HW structure
1984 * @no_snoop: bitmap of snoop events
1985 *
1986 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1987 **/
e1000_set_pcie_no_snoop_generic(struct e1000_hw * hw,u32 no_snoop)1988 void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
1989 {
1990 u32 gcr;
1991
1992 DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
1993
1994 if (hw->bus.type != e1000_bus_type_pci_express)
1995 return;
1996
1997 if (no_snoop) {
1998 gcr = E1000_READ_REG(hw, E1000_GCR);
1999 gcr &= ~(PCIE_NO_SNOOP_ALL);
2000 gcr |= no_snoop;
2001 E1000_WRITE_REG(hw, E1000_GCR, gcr);
2002 }
2003 }
2004
2005 /**
2006 * e1000_disable_pcie_master_generic - Disables PCI-express master access
2007 * @hw: pointer to the HW structure
2008 *
2009 * Returns E1000_SUCCESS if successful, else returns -10
2010 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
2011 * the master requests to be disabled.
2012 *
2013 * Disables PCI-Express master access and verifies there are no pending
2014 * requests.
2015 **/
e1000_disable_pcie_master_generic(struct e1000_hw * hw)2016 s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
2017 {
2018 u32 ctrl;
2019 s32 timeout = MASTER_DISABLE_TIMEOUT;
2020
2021 DEBUGFUNC("e1000_disable_pcie_master_generic");
2022
2023 if (hw->bus.type != e1000_bus_type_pci_express)
2024 return E1000_SUCCESS;
2025
2026 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2027 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
2028 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
2029
2030 while (timeout) {
2031 if (!(E1000_READ_REG(hw, E1000_STATUS) &
2032 E1000_STATUS_GIO_MASTER_ENABLE) ||
2033 E1000_REMOVED(hw->hw_addr))
2034 break;
2035 usec_delay(100);
2036 timeout--;
2037 }
2038
2039 if (!timeout) {
2040 DEBUGOUT("Master requests are pending.\n");
2041 return -E1000_ERR_MASTER_REQUESTS_PENDING;
2042 }
2043
2044 return E1000_SUCCESS;
2045 }
2046
2047 /**
2048 * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
2049 * @hw: pointer to the HW structure
2050 *
2051 * Reset the Adaptive Interframe Spacing throttle to default values.
2052 **/
e1000_reset_adaptive_generic(struct e1000_hw * hw)2053 void e1000_reset_adaptive_generic(struct e1000_hw *hw)
2054 {
2055 struct e1000_mac_info *mac = &hw->mac;
2056
2057 DEBUGFUNC("e1000_reset_adaptive_generic");
2058
2059 if (!mac->adaptive_ifs) {
2060 DEBUGOUT("Not in Adaptive IFS mode!\n");
2061 return;
2062 }
2063
2064 mac->current_ifs_val = 0;
2065 mac->ifs_min_val = IFS_MIN;
2066 mac->ifs_max_val = IFS_MAX;
2067 mac->ifs_step_size = IFS_STEP;
2068 mac->ifs_ratio = IFS_RATIO;
2069
2070 mac->in_ifs_mode = FALSE;
2071 E1000_WRITE_REG(hw, E1000_AIT, 0);
2072 }
2073
2074 /**
2075 * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
2076 * @hw: pointer to the HW structure
2077 *
2078 * Update the Adaptive Interframe Spacing Throttle value based on the
2079 * time between transmitted packets and time between collisions.
2080 **/
e1000_update_adaptive_generic(struct e1000_hw * hw)2081 void e1000_update_adaptive_generic(struct e1000_hw *hw)
2082 {
2083 struct e1000_mac_info *mac = &hw->mac;
2084
2085 DEBUGFUNC("e1000_update_adaptive_generic");
2086
2087 if (!mac->adaptive_ifs) {
2088 DEBUGOUT("Not in Adaptive IFS mode!\n");
2089 return;
2090 }
2091
2092 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
2093 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
2094 mac->in_ifs_mode = TRUE;
2095 if (mac->current_ifs_val < mac->ifs_max_val) {
2096 if (!mac->current_ifs_val)
2097 mac->current_ifs_val = mac->ifs_min_val;
2098 else
2099 mac->current_ifs_val +=
2100 mac->ifs_step_size;
2101 E1000_WRITE_REG(hw, E1000_AIT,
2102 mac->current_ifs_val);
2103 }
2104 }
2105 } else {
2106 if (mac->in_ifs_mode &&
2107 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
2108 mac->current_ifs_val = 0;
2109 mac->in_ifs_mode = FALSE;
2110 E1000_WRITE_REG(hw, E1000_AIT, 0);
2111 }
2112 }
2113 }
2114
2115 /**
2116 * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
2117 * @hw: pointer to the HW structure
2118 *
2119 * Verify that when not using auto-negotiation that MDI/MDIx is correctly
2120 * set, which is forced to MDI mode only.
2121 **/
e1000_validate_mdi_setting_generic(struct e1000_hw * hw)2122 static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
2123 {
2124 DEBUGFUNC("e1000_validate_mdi_setting_generic");
2125
2126 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
2127 DEBUGOUT("Invalid MDI setting detected\n");
2128 hw->phy.mdix = 1;
2129 return -E1000_ERR_CONFIG;
2130 }
2131
2132 return E1000_SUCCESS;
2133 }
2134
2135 /**
2136 * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
2137 * @hw: pointer to the HW structure
2138 *
2139 * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
2140 * operation.
2141 **/
e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG * hw)2142 s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
2143 {
2144 DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
2145
2146 return E1000_SUCCESS;
2147 }
2148
2149 /**
2150 * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
2151 * @hw: pointer to the HW structure
2152 * @reg: 32bit register offset such as E1000_SCTL
2153 * @offset: register offset to write to
2154 * @data: data to write at register offset
2155 *
2156 * Writes an address/data control type register. There are several of these
2157 * and they all have the format address << 8 | data and bit 31 is polled for
2158 * completion.
2159 **/
e1000_write_8bit_ctrl_reg_generic(struct e1000_hw * hw,u32 reg,u32 offset,u8 data)2160 s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
2161 u32 offset, u8 data)
2162 {
2163 u32 i, regvalue = 0;
2164
2165 DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
2166
2167 /* Set up the address and data */
2168 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
2169 E1000_WRITE_REG(hw, reg, regvalue);
2170
2171 /* Poll the ready bit to see if the MDI read completed */
2172 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
2173 usec_delay(5);
2174 regvalue = E1000_READ_REG(hw, reg);
2175 if (regvalue & E1000_GEN_CTL_READY)
2176 break;
2177 }
2178 if (!(regvalue & E1000_GEN_CTL_READY)) {
2179 DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
2180 return -E1000_ERR_PHY;
2181 }
2182
2183 return E1000_SUCCESS;
2184 }
2185
2186 /**
2187 * e1000_get_hw_semaphore - Acquire hardware semaphore
2188 * @hw: pointer to the HW structure
2189 *
2190 * Acquire the HW semaphore to access the PHY or NVM
2191 **/
e1000_get_hw_semaphore(struct e1000_hw * hw)2192 s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
2193 {
2194 u32 swsm;
2195 s32 fw_timeout = hw->nvm.word_size + 1;
2196 s32 sw_timeout = hw->nvm.word_size + 1;
2197 s32 i = 0;
2198
2199 DEBUGFUNC("e1000_get_hw_semaphore");
2200
2201 /* _82571 */
2202 /* If we have timedout 3 times on trying to acquire
2203 * the inter-port SMBI semaphore, there is old code
2204 * operating on the other port, and it is not
2205 * releasing SMBI. Modify the number of times that
2206 * we try for the semaphore to interwork with this
2207 * older code.
2208 */
2209 if (hw->dev_spec._82571.smb_counter > 2)
2210 sw_timeout = 1;
2211
2212
2213 /* Get the SW semaphore */
2214 while (i < sw_timeout) {
2215 swsm = E1000_READ_REG(hw, E1000_SWSM);
2216 if (!(swsm & E1000_SWSM_SMBI))
2217 break;
2218
2219 usec_delay(50);
2220 i++;
2221 }
2222
2223 if (i == sw_timeout) {
2224 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
2225 hw->dev_spec._82571.smb_counter++;
2226 }
2227
2228 /* In rare circumstances, the SW semaphore may already be held
2229 * unintentionally. Clear the semaphore once before giving up.
2230 */
2231 if (hw->dev_spec._82575.clear_semaphore_once) {
2232 hw->dev_spec._82575.clear_semaphore_once = FALSE;
2233 e1000_put_hw_semaphore(hw);
2234 for (i = 0; i < fw_timeout; i++) {
2235 swsm = E1000_READ_REG(hw, E1000_SWSM);
2236 if (!(swsm & E1000_SWSM_SMBI))
2237 break;
2238
2239 usec_delay(50);
2240 }
2241 }
2242
2243 /* Get the FW semaphore. */
2244 for (i = 0; i < fw_timeout; i++) {
2245 swsm = E1000_READ_REG(hw, E1000_SWSM);
2246 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
2247
2248 /* Semaphore acquired if bit latched */
2249 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
2250 break;
2251
2252 usec_delay(50);
2253 }
2254
2255 if (i == fw_timeout) {
2256 /* Release semaphores */
2257 e1000_put_hw_semaphore(hw);
2258 DEBUGOUT("Driver can't access the NVM\n");
2259 return -E1000_ERR_NVM;
2260 }
2261
2262 return E1000_SUCCESS;
2263 }
2264
2265 /**
2266 * e1000_put_hw_semaphore - Release hardware semaphore
2267 * @hw: pointer to the HW structure
2268 *
2269 * Release hardware semaphore used to access the PHY or NVM
2270 **/
e1000_put_hw_semaphore(struct e1000_hw * hw)2271 void e1000_put_hw_semaphore(struct e1000_hw *hw)
2272 {
2273 u32 swsm;
2274
2275 DEBUGFUNC("e1000_put_hw_semaphore");
2276
2277 swsm = E1000_READ_REG(hw, E1000_SWSM);
2278
2279 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
2280
2281 E1000_WRITE_REG(hw, E1000_SWSM, swsm);
2282 }
2283
2284
2285 /**
2286 * e1000_acquire_swfw_sync - Acquire SW/FW semaphore
2287 * @hw: pointer to the HW structure
2288 * @mask: specifies which semaphore to acquire
2289 *
2290 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
2291 * will also specify which port we're acquiring the lock for.
2292 **/
2293 s32
e1000_acquire_swfw_sync(struct e1000_hw * hw,u16 mask)2294 e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask)
2295 {
2296 u32 swfw_sync;
2297 u32 swmask = mask;
2298 u32 fwmask = mask << 16;
2299 s32 ret_val = E1000_SUCCESS;
2300 s32 i = 0, timeout = 200;
2301
2302 DEBUGFUNC("e1000_acquire_swfw_sync");
2303 ASSERT_NO_LOCKS();
2304 while (i < timeout) {
2305 if (e1000_get_hw_semaphore(hw)) {
2306 ret_val = -E1000_ERR_SWFW_SYNC;
2307 goto out;
2308 }
2309
2310 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
2311 if (!(swfw_sync & (fwmask | swmask)))
2312 break;
2313
2314 /*
2315 * Firmware currently using resource (fwmask)
2316 * or other software thread using resource (swmask)
2317 */
2318 e1000_put_hw_semaphore(hw);
2319 msec_delay_irq(5);
2320 i++;
2321 }
2322
2323 if (i == timeout) {
2324 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2325 ret_val = -E1000_ERR_SWFW_SYNC;
2326 goto out;
2327 }
2328
2329 swfw_sync |= swmask;
2330 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
2331
2332 e1000_put_hw_semaphore(hw);
2333
2334 out:
2335 return ret_val;
2336 }
2337
2338 /**
2339 * e1000_release_swfw_sync - Release SW/FW semaphore
2340 * @hw: pointer to the HW structure
2341 * @mask: specifies which semaphore to acquire
2342 *
2343 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
2344 * will also specify which port we're releasing the lock for.
2345 **/
2346 void
e1000_release_swfw_sync(struct e1000_hw * hw,u16 mask)2347 e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask)
2348 {
2349 u32 swfw_sync;
2350
2351 DEBUGFUNC("e1000_release_swfw_sync");
2352
2353 while (e1000_get_hw_semaphore(hw) != E1000_SUCCESS)
2354 ; /* Empty */
2355
2356 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
2357 swfw_sync &= ~mask;
2358 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
2359
2360 e1000_put_hw_semaphore(hw);
2361 }
2362
2363