Line data Source code
1 : /* $OpenBSD: ixgbe.c,v 1.23 2016/12/02 15:22:57 mikeb Exp $ */
2 :
3 : /******************************************************************************
4 :
5 : Copyright (c) 2001-2015, Intel Corporation
6 : All rights reserved.
7 :
8 : Redistribution and use in source and binary forms, with or without
9 : modification, are permitted provided that the following conditions are met:
10 :
11 : 1. Redistributions of source code must retain the above copyright notice,
12 : this list of conditions and the following disclaimer.
13 :
14 : 2. Redistributions in binary form must reproduce the above copyright
15 : notice, this list of conditions and the following disclaimer in the
16 : documentation and/or other materials provided with the distribution.
17 :
18 : 3. Neither the name of the Intel Corporation nor the names of its
19 : contributors may be used to endorse or promote products derived from
20 : this software without specific prior written permission.
21 :
22 : THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 : AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 : IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 : ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 : LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 : CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 : SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 : INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 : CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 : ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 : POSSIBILITY OF SUCH DAMAGE.
33 :
34 : ******************************************************************************/
35 : /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 299200 2016-05-06 22:54:56Z pfg $*/
36 : /*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 299200 2016-05-06 22:54:56Z pfg $*/
37 :
38 : #include <dev/pci/ixgbe.h>
39 :
40 : #ifdef __sparc64__
41 : #include <dev/ofw/openfirm.h>
42 : #endif
43 :
44 : void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
45 : uint16_t link_status);
46 :
47 : int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
48 : int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
49 : void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
50 : int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw);
51 : void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
52 : void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
53 : uint16_t count);
54 : uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
55 : void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
56 : void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
57 : void ixgbe_release_eeprom(struct ixgbe_hw *hw);
58 :
59 : int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
60 : int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
61 : int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
62 : int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
63 : bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
64 : int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
65 : uint32_t lp_reg, uint32_t adv_sym, uint32_t adv_asm,
66 : uint32_t lp_sym, uint32_t lp_asm);
67 :
68 : int32_t prot_autoc_read_generic(struct ixgbe_hw *, bool *, uint32_t *);
69 : int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, bool);
70 :
71 : int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
72 :
73 : /* MBX */
74 : int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
75 : int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
76 : uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
77 : int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
78 : int32_t index);
79 : int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
80 : int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
81 : int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
82 : int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
83 : int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
84 : uint16_t vf_number);
85 : int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
86 : uint16_t vf_number);
87 :
88 :
89 : /**
90 : * ixgbe_init_ops_generic - Inits function ptrs
91 : * @hw: pointer to the hardware structure
92 : *
93 : * Initialize the function pointers.
94 : **/
95 0 : int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
96 : {
97 0 : struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
98 0 : struct ixgbe_mac_info *mac = &hw->mac;
99 0 : uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
100 :
101 : DEBUGFUNC("ixgbe_init_ops_generic");
102 :
103 : /* EEPROM */
104 0 : eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
105 : /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
106 0 : if (eec & IXGBE_EEC_PRES)
107 0 : eeprom->ops.read = ixgbe_read_eerd_generic;
108 : else
109 0 : eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
110 0 : eeprom->ops.write = ixgbe_write_eeprom_generic;
111 0 : eeprom->ops.validate_checksum =
112 : ixgbe_validate_eeprom_checksum_generic;
113 0 : eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
114 0 : eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
115 :
116 : /* MAC */
117 0 : mac->ops.init_hw = ixgbe_init_hw_generic;
118 0 : mac->ops.reset_hw = NULL;
119 0 : mac->ops.start_hw = ixgbe_start_hw_generic;
120 0 : mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
121 0 : mac->ops.get_media_type = NULL;
122 0 : mac->ops.get_supported_physical_layer = NULL;
123 0 : mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
124 0 : mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
125 0 : mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
126 0 : mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
127 0 : mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
128 0 : mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
129 0 : mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
130 0 : mac->ops.prot_autoc_read = prot_autoc_read_generic;
131 0 : mac->ops.prot_autoc_write = prot_autoc_write_generic;
132 :
133 : /* LEDs */
134 0 : mac->ops.led_on = ixgbe_led_on_generic;
135 0 : mac->ops.led_off = ixgbe_led_off_generic;
136 0 : mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
137 0 : mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
138 :
139 : /* RAR, Multicast, VLAN */
140 0 : mac->ops.set_rar = ixgbe_set_rar_generic;
141 0 : mac->ops.clear_rar = ixgbe_clear_rar_generic;
142 0 : mac->ops.insert_mac_addr = NULL;
143 0 : mac->ops.set_vmdq = NULL;
144 0 : mac->ops.clear_vmdq = NULL;
145 0 : mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
146 0 : mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
147 0 : mac->ops.enable_mc = ixgbe_enable_mc_generic;
148 0 : mac->ops.disable_mc = ixgbe_disable_mc_generic;
149 0 : mac->ops.clear_vfta = NULL;
150 0 : mac->ops.set_vfta = NULL;
151 0 : mac->ops.init_uta_tables = NULL;
152 0 : mac->ops.enable_rx = ixgbe_enable_rx_generic;
153 0 : mac->ops.disable_rx = ixgbe_disable_rx_generic;
154 :
155 : /* Flow Control */
156 0 : mac->ops.fc_enable = ixgbe_fc_enable_generic;
157 0 : mac->ops.setup_fc = ixgbe_setup_fc_generic;
158 :
159 : /* Link */
160 0 : mac->ops.get_link_capabilities = NULL;
161 0 : mac->ops.setup_link = NULL;
162 0 : mac->ops.check_link = NULL;
163 :
164 0 : return IXGBE_SUCCESS;
165 : }
166 :
167 : /**
168 : * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
169 : * of flow control
170 : * @hw: pointer to hardware structure
171 : *
172 : * This function returns TRUE if the device supports flow control
173 : * autonegotiation, and FALSE if it does not.
174 : *
175 : **/
176 0 : bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
177 : {
178 : bool supported = FALSE;
179 0 : ixgbe_link_speed speed;
180 0 : bool link_up;
181 :
182 : DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
183 :
184 0 : switch (hw->phy.media_type) {
185 : case ixgbe_media_type_fiber_fixed:
186 : case ixgbe_media_type_fiber_qsfp:
187 : case ixgbe_media_type_fiber:
188 0 : hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
189 : /* if link is down, assume supported */
190 0 : if (link_up)
191 0 : supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
192 : TRUE : FALSE;
193 : else
194 : supported = TRUE;
195 : break;
196 : case ixgbe_media_type_backplane:
197 : supported = TRUE;
198 0 : break;
199 : case ixgbe_media_type_copper:
200 : /* only some copper devices support flow control autoneg */
201 0 : switch (hw->device_id) {
202 : case IXGBE_DEV_ID_82599_T3_LOM:
203 : case IXGBE_DEV_ID_X540T:
204 : case IXGBE_DEV_ID_X540T1:
205 : case IXGBE_DEV_ID_X540_BYPASS:
206 : case IXGBE_DEV_ID_X550T:
207 : case IXGBE_DEV_ID_X550T1:
208 : case IXGBE_DEV_ID_X550EM_X_10G_T:
209 : supported = TRUE;
210 0 : break;
211 : default:
212 : supported = FALSE;
213 0 : }
214 : default:
215 : break;
216 : }
217 :
218 : if (!supported) {
219 : ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
220 : "Device %x does not support flow control autoneg",
221 : hw->device_id);
222 : }
223 :
224 0 : return supported;
225 0 : }
226 :
227 : /**
228 : * ixgbe_setup_fc_generic - Set up flow control
229 : * @hw: pointer to hardware structure
230 : *
231 : * Called at init time to set up flow control.
232 : **/
233 0 : int32_t ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
234 : {
235 : int32_t ret_val = IXGBE_SUCCESS;
236 0 : uint32_t reg = 0, reg_bp = 0;
237 0 : uint16_t reg_cu = 0;
238 0 : bool locked = FALSE;
239 :
240 : DEBUGFUNC("ixgbe_setup_fc");
241 :
242 : /* Validate the requested mode */
243 0 : if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
244 : ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
245 : "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
246 : ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
247 0 : goto out;
248 : }
249 :
250 : /*
251 : * 10gig parts do not have a word in the EEPROM to determine the
252 : * default flow control setting, so we explicitly set it to full.
253 : */
254 0 : if (hw->fc.requested_mode == ixgbe_fc_default)
255 0 : hw->fc.requested_mode = ixgbe_fc_full;
256 :
257 : /*
258 : * Set up the 1G and 10G flow control advertisement registers so the
259 : * HW will be able to do fc autoneg once the cable is plugged in. If
260 : * we link at 10G, the 1G advertisement is harmless and vice versa.
261 : */
262 0 : switch (hw->phy.media_type) {
263 : case ixgbe_media_type_backplane:
264 : /* some MAC's need RMW protection on AUTOC */
265 0 : ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
266 0 : if (ret_val != IXGBE_SUCCESS)
267 : goto out;
268 :
269 : /* only backplane uses autoc so fall though */
270 : case ixgbe_media_type_fiber_fixed:
271 : case ixgbe_media_type_fiber_qsfp:
272 : case ixgbe_media_type_fiber:
273 0 : reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
274 :
275 0 : break;
276 : case ixgbe_media_type_copper:
277 0 : hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
278 : IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
279 0 : break;
280 : default:
281 : break;
282 : }
283 :
284 : /*
285 : * The possible values of fc.requested_mode are:
286 : * 0: Flow control is completely disabled
287 : * 1: Rx flow control is enabled (we can receive pause frames,
288 : * but not send pause frames).
289 : * 2: Tx flow control is enabled (we can send pause frames but
290 : * we do not support receiving pause frames).
291 : * 3: Both Rx and Tx flow control (symmetric) are enabled.
292 : * other: Invalid.
293 : */
294 0 : switch (hw->fc.requested_mode) {
295 : case ixgbe_fc_none:
296 : /* Flow control completely disabled by software override. */
297 0 : reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
298 0 : if (hw->phy.media_type == ixgbe_media_type_backplane)
299 0 : reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
300 : IXGBE_AUTOC_ASM_PAUSE);
301 0 : else if (hw->phy.media_type == ixgbe_media_type_copper)
302 0 : reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
303 : break;
304 : case ixgbe_fc_tx_pause:
305 : /*
306 : * Tx Flow control is enabled, and Rx Flow control is
307 : * disabled by software override.
308 : */
309 0 : reg |= IXGBE_PCS1GANA_ASM_PAUSE;
310 0 : reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
311 0 : if (hw->phy.media_type == ixgbe_media_type_backplane) {
312 0 : reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
313 0 : reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
314 0 : } else if (hw->phy.media_type == ixgbe_media_type_copper) {
315 0 : reg_cu |= IXGBE_TAF_ASM_PAUSE;
316 0 : reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
317 0 : }
318 : break;
319 : case ixgbe_fc_rx_pause:
320 : /*
321 : * Rx Flow control is enabled and Tx Flow control is
322 : * disabled by software override. Since there really
323 : * isn't a way to advertise that we are capable of RX
324 : * Pause ONLY, we will advertise that we support both
325 : * symmetric and asymmetric Rx PAUSE, as such we fall
326 : * through to the fc_full statement. Later, we will
327 : * disable the adapter's ability to send PAUSE frames.
328 : */
329 : case ixgbe_fc_full:
330 : /* Flow control (both Rx and Tx) is enabled by SW override. */
331 0 : reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
332 0 : if (hw->phy.media_type == ixgbe_media_type_backplane)
333 0 : reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
334 : IXGBE_AUTOC_ASM_PAUSE;
335 0 : else if (hw->phy.media_type == ixgbe_media_type_copper)
336 0 : reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
337 : break;
338 : default:
339 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
340 : "Flow control param set incorrectly\n");
341 : ret_val = IXGBE_ERR_CONFIG;
342 0 : goto out;
343 : break;
344 : }
345 :
346 0 : if (hw->mac.type < ixgbe_mac_X540) {
347 : /*
348 : * Enable auto-negotiation between the MAC & PHY;
349 : * the MAC will advertise clause 37 flow control.
350 : */
351 0 : IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
352 0 : reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
353 :
354 : /* Disable AN timeout */
355 0 : if (hw->fc.strict_ieee)
356 0 : reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
357 :
358 0 : IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
359 : DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
360 0 : }
361 :
362 : /*
363 : * AUTOC restart handles negotiation of 1G and 10G on backplane
364 : * and copper. There is no need to set the PCS1GCTL register.
365 : *
366 : */
367 0 : if (hw->phy.media_type == ixgbe_media_type_backplane) {
368 0 : reg_bp |= IXGBE_AUTOC_AN_RESTART;
369 0 : ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
370 0 : if (ret_val)
371 : goto out;
372 0 : } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
373 0 : (ixgbe_device_supports_autoneg_fc(hw))) {
374 0 : hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
375 0 : IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
376 0 : }
377 :
378 : DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
379 : out:
380 0 : return ret_val;
381 0 : }
382 :
383 : /**
384 : * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
385 : * @hw: pointer to hardware structure
386 : *
387 : * Starts the hardware by filling the bus info structure and media type, clears
388 : * all on chip counters, initializes receive address registers, multicast
389 : * table, VLAN filter table, calls routine to set up link and flow control
390 : * settings, and leaves transmit and receive units disabled and uninitialized
391 : **/
392 0 : int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
393 : {
394 : int32_t ret_val = IXGBE_SUCCESS;
395 : uint32_t ctrl_ext;
396 :
397 : DEBUGFUNC("ixgbe_start_hw_generic");
398 :
399 : /* Set the media type */
400 0 : hw->phy.media_type = hw->mac.ops.get_media_type(hw);
401 :
402 : /* PHY ops initialization must be done in reset_hw() */
403 :
404 : /* Clear the VLAN filter table */
405 0 : hw->mac.ops.clear_vfta(hw);
406 :
407 : /* Clear statistics registers */
408 0 : hw->mac.ops.clear_hw_cntrs(hw);
409 :
410 : /* Set No Snoop Disable */
411 0 : ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
412 0 : ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
413 0 : IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
414 0 : IXGBE_WRITE_FLUSH(hw);
415 :
416 : /* Setup flow control */
417 0 : if (hw->mac.ops.setup_fc) {
418 0 : ret_val = hw->mac.ops.setup_fc(hw);
419 0 : if (ret_val != IXGBE_SUCCESS)
420 : goto out;
421 : }
422 :
423 : /* Clear adapter stopped flag */
424 0 : hw->adapter_stopped = FALSE;
425 :
426 : out:
427 0 : return ret_val;
428 : }
429 :
430 : /**
431 : * ixgbe_start_hw_gen2 - Init sequence for common device family
432 : * @hw: pointer to hw structure
433 : *
434 : * Performs the init sequence common to the second generation
435 : * of 10 GbE devices.
436 : * Devices in the second generation:
437 : * 82599
438 : * X540
439 : **/
440 0 : int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
441 : {
442 : uint32_t i;
443 : uint32_t regval;
444 :
445 : /* Clear the rate limiters */
446 0 : for (i = 0; i < hw->mac.max_tx_queues; i++) {
447 0 : IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
448 0 : IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
449 : }
450 0 : IXGBE_WRITE_FLUSH(hw);
451 :
452 : /* Disable relaxed ordering */
453 0 : for (i = 0; i < hw->mac.max_tx_queues; i++) {
454 0 : regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
455 0 : regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
456 0 : IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
457 : }
458 :
459 0 : for (i = 0; i < hw->mac.max_rx_queues; i++) {
460 0 : regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
461 0 : regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
462 : IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
463 0 : IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
464 : }
465 :
466 0 : return IXGBE_SUCCESS;
467 : }
468 :
469 : /**
470 : * ixgbe_init_hw_generic - Generic hardware initialization
471 : * @hw: pointer to hardware structure
472 : *
473 : * Initialize the hardware by resetting the hardware, filling the bus info
474 : * structure and media type, clears all on chip counters, initializes receive
475 : * address registers, multicast table, VLAN filter table, calls routine to set
476 : * up link and flow control settings, and leaves transmit and receive units
477 : * disabled and uninitialized
478 : **/
479 0 : int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
480 : {
481 : int32_t status;
482 :
483 : DEBUGFUNC("ixgbe_init_hw_generic");
484 :
485 : /* Reset the hardware */
486 0 : status = hw->mac.ops.reset_hw(hw);
487 :
488 0 : if (status == IXGBE_SUCCESS) {
489 : /* Start the HW */
490 0 : status = hw->mac.ops.start_hw(hw);
491 0 : }
492 :
493 0 : return status;
494 : }
495 :
496 : /**
497 : * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
498 : * @hw: pointer to hardware structure
499 : *
500 : * Clears all hardware statistics counters by reading them from the hardware
501 : * Statistics counters are clear on read.
502 : **/
503 0 : int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
504 : {
505 0 : uint16_t i = 0;
506 :
507 : DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
508 :
509 0 : IXGBE_READ_REG(hw, IXGBE_CRCERRS);
510 0 : IXGBE_READ_REG(hw, IXGBE_ILLERRC);
511 0 : IXGBE_READ_REG(hw, IXGBE_ERRBC);
512 0 : IXGBE_READ_REG(hw, IXGBE_MSPDC);
513 0 : for (i = 0; i < 8; i++)
514 0 : IXGBE_READ_REG(hw, IXGBE_MPC(i));
515 :
516 0 : IXGBE_READ_REG(hw, IXGBE_MLFC);
517 0 : IXGBE_READ_REG(hw, IXGBE_MRFC);
518 0 : IXGBE_READ_REG(hw, IXGBE_RLEC);
519 0 : IXGBE_READ_REG(hw, IXGBE_LXONTXC);
520 0 : IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
521 0 : if (hw->mac.type >= ixgbe_mac_82599EB) {
522 0 : IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
523 0 : IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
524 0 : } else {
525 0 : IXGBE_READ_REG(hw, IXGBE_LXONRXC);
526 0 : IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
527 : }
528 :
529 0 : for (i = 0; i < 8; i++) {
530 0 : IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
531 0 : IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
532 0 : if (hw->mac.type >= ixgbe_mac_82599EB) {
533 0 : IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
534 0 : IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
535 0 : } else {
536 0 : IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
537 0 : IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
538 : }
539 : }
540 0 : if (hw->mac.type >= ixgbe_mac_82599EB)
541 0 : for (i = 0; i < 8; i++)
542 0 : IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
543 0 : IXGBE_READ_REG(hw, IXGBE_PRC64);
544 0 : IXGBE_READ_REG(hw, IXGBE_PRC127);
545 0 : IXGBE_READ_REG(hw, IXGBE_PRC255);
546 0 : IXGBE_READ_REG(hw, IXGBE_PRC511);
547 0 : IXGBE_READ_REG(hw, IXGBE_PRC1023);
548 0 : IXGBE_READ_REG(hw, IXGBE_PRC1522);
549 0 : IXGBE_READ_REG(hw, IXGBE_GPRC);
550 0 : IXGBE_READ_REG(hw, IXGBE_BPRC);
551 0 : IXGBE_READ_REG(hw, IXGBE_MPRC);
552 0 : IXGBE_READ_REG(hw, IXGBE_GPTC);
553 0 : IXGBE_READ_REG(hw, IXGBE_GORCL);
554 0 : IXGBE_READ_REG(hw, IXGBE_GORCH);
555 0 : IXGBE_READ_REG(hw, IXGBE_GOTCL);
556 0 : IXGBE_READ_REG(hw, IXGBE_GOTCH);
557 0 : if (hw->mac.type == ixgbe_mac_82598EB)
558 0 : for (i = 0; i < 8; i++)
559 0 : IXGBE_READ_REG(hw, IXGBE_RNBC(i));
560 0 : IXGBE_READ_REG(hw, IXGBE_RUC);
561 0 : IXGBE_READ_REG(hw, IXGBE_RFC);
562 0 : IXGBE_READ_REG(hw, IXGBE_ROC);
563 0 : IXGBE_READ_REG(hw, IXGBE_RJC);
564 0 : IXGBE_READ_REG(hw, IXGBE_MNGPRC);
565 0 : IXGBE_READ_REG(hw, IXGBE_MNGPDC);
566 0 : IXGBE_READ_REG(hw, IXGBE_MNGPTC);
567 0 : IXGBE_READ_REG(hw, IXGBE_TORL);
568 0 : IXGBE_READ_REG(hw, IXGBE_TORH);
569 0 : IXGBE_READ_REG(hw, IXGBE_TPR);
570 0 : IXGBE_READ_REG(hw, IXGBE_TPT);
571 0 : IXGBE_READ_REG(hw, IXGBE_PTC64);
572 0 : IXGBE_READ_REG(hw, IXGBE_PTC127);
573 0 : IXGBE_READ_REG(hw, IXGBE_PTC255);
574 0 : IXGBE_READ_REG(hw, IXGBE_PTC511);
575 0 : IXGBE_READ_REG(hw, IXGBE_PTC1023);
576 0 : IXGBE_READ_REG(hw, IXGBE_PTC1522);
577 0 : IXGBE_READ_REG(hw, IXGBE_MPTC);
578 0 : IXGBE_READ_REG(hw, IXGBE_BPTC);
579 0 : for (i = 0; i < 16; i++) {
580 0 : IXGBE_READ_REG(hw, IXGBE_QPRC(i));
581 0 : IXGBE_READ_REG(hw, IXGBE_QPTC(i));
582 0 : if (hw->mac.type >= ixgbe_mac_82599EB) {
583 : IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
584 0 : IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
585 0 : IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
586 0 : IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
587 0 : IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
588 0 : } else {
589 : IXGBE_READ_REG(hw, IXGBE_QBRC(i));
590 0 : IXGBE_READ_REG(hw, IXGBE_QBTC(i));
591 : }
592 : }
593 :
594 0 : if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
595 0 : if (hw->phy.id == 0)
596 0 : ixgbe_identify_phy(hw);
597 0 : hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
598 : IXGBE_MDIO_PCS_DEV_TYPE, &i);
599 0 : hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
600 : IXGBE_MDIO_PCS_DEV_TYPE, &i);
601 0 : hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
602 : IXGBE_MDIO_PCS_DEV_TYPE, &i);
603 0 : hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
604 : IXGBE_MDIO_PCS_DEV_TYPE, &i);
605 0 : }
606 :
607 0 : return IXGBE_SUCCESS;
608 0 : }
609 :
610 : /**
611 : * ixgbe_get_mac_addr_generic - Generic get MAC address
612 : * @hw: pointer to hardware structure
613 : * @mac_addr: Adapter MAC address
614 : *
615 : * Reads the adapter's MAC address from first Receive Address Register (RAR0)
616 : * A reset of the adapter must be performed prior to calling this function
617 : * in order for the MAC address to have been loaded from the EEPROM into RAR0
618 : **/
619 0 : int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr)
620 : {
621 : uint32_t rar_high;
622 : uint32_t rar_low;
623 : uint16_t i;
624 :
625 : DEBUGFUNC("ixgbe_get_mac_addr_generic");
626 :
627 : #ifdef __sparc64__
628 : struct ixgbe_osdep *os = hw->back;
629 :
630 : if (OF_getprop(PCITAG_NODE(os->os_pa.pa_tag), "local-mac-address",
631 : mac_addr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
632 : return IXGBE_SUCCESS;
633 : #endif
634 :
635 0 : rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
636 0 : rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
637 :
638 0 : for (i = 0; i < 4; i++)
639 0 : mac_addr[i] = (uint8_t)(rar_low >> (i*8));
640 :
641 0 : for (i = 0; i < 2; i++)
642 0 : mac_addr[i+4] = (uint8_t)(rar_high >> (i*8));
643 :
644 0 : return IXGBE_SUCCESS;
645 : }
646 :
647 : /**
648 : * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
649 : * @hw: pointer to hardware structure
650 : * @link_status: the link status returned by the PCI config space
651 : *
652 : * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
653 : **/
654 0 : void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
655 : uint16_t link_status)
656 : {
657 0 : struct ixgbe_mac_info *mac = &hw->mac;
658 :
659 0 : hw->bus.type = ixgbe_bus_type_pci_express;
660 :
661 0 : switch (link_status & IXGBE_PCI_LINK_WIDTH) {
662 : case IXGBE_PCI_LINK_WIDTH_1:
663 0 : hw->bus.width = ixgbe_bus_width_pcie_x1;
664 0 : break;
665 : case IXGBE_PCI_LINK_WIDTH_2:
666 0 : hw->bus.width = ixgbe_bus_width_pcie_x2;
667 0 : break;
668 : case IXGBE_PCI_LINK_WIDTH_4:
669 0 : hw->bus.width = ixgbe_bus_width_pcie_x4;
670 0 : break;
671 : case IXGBE_PCI_LINK_WIDTH_8:
672 0 : hw->bus.width = ixgbe_bus_width_pcie_x8;
673 0 : break;
674 : default:
675 0 : hw->bus.width = ixgbe_bus_width_unknown;
676 0 : break;
677 : }
678 :
679 0 : switch (link_status & IXGBE_PCI_LINK_SPEED) {
680 : case IXGBE_PCI_LINK_SPEED_2500:
681 0 : hw->bus.speed = ixgbe_bus_speed_2500;
682 0 : break;
683 : case IXGBE_PCI_LINK_SPEED_5000:
684 0 : hw->bus.speed = ixgbe_bus_speed_5000;
685 0 : break;
686 : case IXGBE_PCI_LINK_SPEED_8000:
687 0 : hw->bus.speed = ixgbe_bus_speed_8000;
688 0 : break;
689 : default:
690 0 : hw->bus.speed = ixgbe_bus_speed_unknown;
691 0 : break;
692 : }
693 :
694 0 : mac->ops.set_lan_id(hw);
695 0 : }
696 :
697 : /**
698 : * ixgbe_get_bus_info_generic - Generic set PCI bus info
699 : * @hw: pointer to hardware structure
700 : *
701 : * Gets the PCI bus info (speed, width, type) then calls helper function to
702 : * store this data within the ixgbe_hw structure.
703 : **/
704 0 : int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
705 : {
706 : uint16_t link_status;
707 :
708 : DEBUGFUNC("ixgbe_get_bus_info_generic");
709 :
710 : /* Get the negotiated link width and speed from PCI config space */
711 0 : link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
712 :
713 0 : ixgbe_set_pci_config_data_generic(hw, link_status);
714 :
715 0 : return IXGBE_SUCCESS;
716 : }
717 :
718 : /**
719 : * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
720 : * @hw: pointer to the HW structure
721 : *
722 : * Determines the LAN function id by reading memory-mapped registers
723 : * and swaps the port value if requested.
724 : **/
725 0 : void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
726 : {
727 0 : struct ixgbe_bus_info *bus = &hw->bus;
728 : uint32_t reg;
729 :
730 : DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
731 :
732 0 : reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
733 0 : bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
734 0 : bus->lan_id = bus->func;
735 :
736 : /* check for a port swap */
737 0 : reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
738 0 : if (reg & IXGBE_FACTPS_LFS)
739 0 : bus->func ^= 0x1;
740 0 : }
741 :
742 : /**
743 : * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
744 : * @hw: pointer to hardware structure
745 : *
746 : * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
747 : * disables transmit and receive units. The adapter_stopped flag is used by
748 : * the shared code and drivers to determine if the adapter is in a stopped
749 : * state and should not touch the hardware.
750 : **/
751 0 : int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
752 : {
753 : uint32_t reg_val;
754 : uint16_t i;
755 :
756 : DEBUGFUNC("ixgbe_stop_adapter_generic");
757 :
758 : /*
759 : * Set the adapter_stopped flag so other driver functions stop touching
760 : * the hardware
761 : */
762 0 : hw->adapter_stopped = TRUE;
763 :
764 : /* Disable the receive unit */
765 0 : ixgbe_disable_rx(hw);
766 :
767 : /* Clear interrupt mask to stop interrupts from being generated */
768 0 : IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
769 :
770 : /* Clear any pending interrupts, flush previous writes */
771 0 : IXGBE_READ_REG(hw, IXGBE_EICR);
772 :
773 : /* Disable the transmit unit. Each queue must be disabled. */
774 0 : for (i = 0; i < hw->mac.max_tx_queues; i++)
775 0 : IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
776 :
777 : /* Disable the receive unit by stopping each queue */
778 0 : for (i = 0; i < hw->mac.max_rx_queues; i++) {
779 0 : reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
780 0 : reg_val &= ~IXGBE_RXDCTL_ENABLE;
781 0 : reg_val |= IXGBE_RXDCTL_SWFLSH;
782 0 : IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
783 : }
784 :
785 : /* flush all queues disables */
786 0 : IXGBE_WRITE_FLUSH(hw);
787 0 : msec_delay(2);
788 :
789 : /*
790 : * Prevent the PCI-E bus from hanging by disabling PCI-E master
791 : * access and verify no pending requests
792 : */
793 0 : return ixgbe_disable_pcie_master(hw);
794 : }
795 :
796 : /**
797 : * ixgbe_led_on_generic - Turns on the software controllable LEDs.
798 : * @hw: pointer to hardware structure
799 : * @index: led number to turn on
800 : **/
801 0 : int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
802 : {
803 0 : uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
804 :
805 : DEBUGFUNC("ixgbe_led_on_generic");
806 :
807 : /* To turn on the LED, set mode to ON. */
808 0 : led_reg &= ~IXGBE_LED_MODE_MASK(index);
809 0 : led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
810 0 : IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
811 0 : IXGBE_WRITE_FLUSH(hw);
812 :
813 0 : return IXGBE_SUCCESS;
814 : }
815 :
816 : /**
817 : * ixgbe_led_off_generic - Turns off the software controllable LEDs.
818 : * @hw: pointer to hardware structure
819 : * @index: led number to turn off
820 : **/
821 0 : int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
822 : {
823 0 : uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
824 :
825 : DEBUGFUNC("ixgbe_led_off_generic");
826 :
827 : /* To turn off the LED, set mode to OFF. */
828 0 : led_reg &= ~IXGBE_LED_MODE_MASK(index);
829 0 : led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
830 0 : IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
831 0 : IXGBE_WRITE_FLUSH(hw);
832 :
833 0 : return IXGBE_SUCCESS;
834 : }
835 :
836 : /**
837 : * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
838 : * @hw: pointer to hardware structure
839 : *
840 : * Initializes the EEPROM parameters ixgbe_eeprom_info within the
841 : * ixgbe_hw struct in order to set up EEPROM access.
842 : **/
843 0 : int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
844 : {
845 0 : struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
846 : uint32_t eec;
847 : uint16_t eeprom_size;
848 :
849 : DEBUGFUNC("ixgbe_init_eeprom_params_generic");
850 :
851 0 : if (eeprom->type == ixgbe_eeprom_uninitialized) {
852 0 : eeprom->type = ixgbe_eeprom_none;
853 : /* Set default semaphore delay to 10ms which is a well
854 : * tested value */
855 0 : eeprom->semaphore_delay = 10;
856 : /* Clear EEPROM page size, it will be initialized as needed */
857 0 : eeprom->word_page_size = 0;
858 :
859 : /*
860 : * Check for EEPROM present first.
861 : * If not present leave as none
862 : */
863 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
864 0 : if (eec & IXGBE_EEC_PRES) {
865 0 : eeprom->type = ixgbe_eeprom_spi;
866 :
867 : /*
868 : * SPI EEPROM is assumed here. This code would need to
869 : * change if a future EEPROM is not SPI.
870 : */
871 0 : eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
872 : IXGBE_EEC_SIZE_SHIFT);
873 0 : eeprom->word_size = 1 << (eeprom_size +
874 : IXGBE_EEPROM_WORD_SIZE_SHIFT);
875 0 : }
876 :
877 0 : if (eec & IXGBE_EEC_ADDR_SIZE)
878 0 : eeprom->address_bits = 16;
879 : else
880 0 : eeprom->address_bits = 8;
881 : DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
882 : "%d\n", eeprom->type, eeprom->word_size,
883 : eeprom->address_bits);
884 : }
885 :
886 0 : return IXGBE_SUCCESS;
887 : }
888 :
889 : /**
890 : * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
891 : * @hw: pointer to hardware structure
892 : * @offset: offset within the EEPROM to be written to
893 : * @words: number of word(s)
894 : * @data: 16 bit word(s) to be written to the EEPROM
895 : *
896 : * If ixgbe_eeprom_update_checksum is not called after this function, the
897 : * EEPROM will most likely contain an invalid checksum.
898 : **/
899 0 : static int32_t ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
900 : uint16_t words, uint16_t *data)
901 : {
902 : int32_t status;
903 : uint16_t word;
904 : uint16_t page_size;
905 : uint16_t i;
906 : uint8_t write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
907 :
908 : DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
909 :
910 : /* Prepare the EEPROM for writing */
911 0 : status = ixgbe_acquire_eeprom(hw);
912 :
913 0 : if (status == IXGBE_SUCCESS) {
914 0 : if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
915 0 : ixgbe_release_eeprom(hw);
916 : status = IXGBE_ERR_EEPROM;
917 0 : }
918 : }
919 :
920 0 : if (status == IXGBE_SUCCESS) {
921 0 : for (i = 0; i < words; i++) {
922 0 : ixgbe_standby_eeprom(hw);
923 :
924 : /* Send the WRITE ENABLE command (8 bit opcode ) */
925 0 : ixgbe_shift_out_eeprom_bits(hw,
926 : IXGBE_EEPROM_WREN_OPCODE_SPI,
927 : IXGBE_EEPROM_OPCODE_BITS);
928 :
929 0 : ixgbe_standby_eeprom(hw);
930 :
931 : /*
932 : * Some SPI eeproms use the 8th address bit embedded
933 : * in the opcode
934 : */
935 0 : if ((hw->eeprom.address_bits == 8) &&
936 0 : ((offset + i) >= 128))
937 0 : write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
938 :
939 : /* Send the Write command (8-bit opcode + addr) */
940 0 : ixgbe_shift_out_eeprom_bits(hw, write_opcode,
941 : IXGBE_EEPROM_OPCODE_BITS);
942 0 : ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
943 0 : hw->eeprom.address_bits);
944 :
945 0 : page_size = hw->eeprom.word_page_size;
946 :
947 : /* Send the data in burst via SPI*/
948 0 : do {
949 0 : word = data[i];
950 0 : word = (word >> 8) | (word << 8);
951 0 : ixgbe_shift_out_eeprom_bits(hw, word, 16);
952 :
953 0 : if (page_size == 0)
954 : break;
955 :
956 : /* do not wrap around page */
957 0 : if (((offset + i) & (page_size - 1)) ==
958 : (page_size - 1))
959 : break;
960 0 : } while (++i < words);
961 :
962 0 : ixgbe_standby_eeprom(hw);
963 0 : msec_delay(10);
964 : }
965 : /* Done with writing - release the EEPROM */
966 0 : ixgbe_release_eeprom(hw);
967 0 : }
968 :
969 0 : return status;
970 : }
971 :
972 : /**
973 : * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
974 : * @hw: pointer to hardware structure
975 : * @offset: offset within the EEPROM to be written to
976 : * @data: 16 bit word to be written to the EEPROM
977 : *
978 : * If ixgbe_eeprom_update_checksum is not called after this function, the
979 : * EEPROM will most likely contain an invalid checksum.
980 : **/
981 0 : int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
982 : {
983 : int32_t status;
984 :
985 : DEBUGFUNC("ixgbe_write_eeprom_generic");
986 :
987 0 : hw->eeprom.ops.init_params(hw);
988 :
989 0 : if (offset >= hw->eeprom.word_size) {
990 : status = IXGBE_ERR_EEPROM;
991 0 : goto out;
992 : }
993 :
994 0 : status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
995 :
996 : out:
997 0 : return status;
998 : }
999 :
1000 : /**
1001 : * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1002 : * @hw: pointer to hardware structure
1003 : * @offset: offset within the EEPROM to be read
1004 : * @words: number of word(s)
1005 : * @data: read 16 bit word(s) from EEPROM
1006 : *
1007 : * Reads 16 bit word(s) from EEPROM through bit-bang method
1008 : **/
1009 0 : static int32_t ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, uint16_t offset,
1010 : uint16_t words, uint16_t *data)
1011 : {
1012 : int32_t status;
1013 : uint16_t word_in;
1014 : uint8_t read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1015 : uint16_t i;
1016 :
1017 : DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1018 :
1019 : /* Prepare the EEPROM for reading */
1020 0 : status = ixgbe_acquire_eeprom(hw);
1021 :
1022 0 : if (status == IXGBE_SUCCESS) {
1023 0 : if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1024 0 : ixgbe_release_eeprom(hw);
1025 : status = IXGBE_ERR_EEPROM;
1026 0 : }
1027 : }
1028 :
1029 0 : if (status == IXGBE_SUCCESS) {
1030 0 : for (i = 0; i < words; i++) {
1031 0 : ixgbe_standby_eeprom(hw);
1032 : /*
1033 : * Some SPI eeproms use the 8th address bit embedded
1034 : * in the opcode
1035 : */
1036 0 : if ((hw->eeprom.address_bits == 8) &&
1037 0 : ((offset + i) >= 128))
1038 0 : read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1039 :
1040 : /* Send the READ command (opcode + addr) */
1041 0 : ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1042 : IXGBE_EEPROM_OPCODE_BITS);
1043 0 : ixgbe_shift_out_eeprom_bits(hw, (uint16_t)((offset + i) * 2),
1044 0 : hw->eeprom.address_bits);
1045 :
1046 : /* Read the data. */
1047 0 : word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1048 0 : data[i] = (word_in >> 8) | (word_in << 8);
1049 : }
1050 :
1051 : /* End this read operation */
1052 0 : ixgbe_release_eeprom(hw);
1053 0 : }
1054 :
1055 0 : return status;
1056 : }
1057 :
1058 : /**
1059 : * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1060 : * @hw: pointer to hardware structure
1061 : * @offset: offset within the EEPROM to be read
1062 : * @data: read 16 bit value from EEPROM
1063 : *
1064 : * Reads 16 bit value from EEPROM through bit-bang method
1065 : **/
1066 0 : int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
1067 : uint16_t *data)
1068 : {
1069 : int32_t status;
1070 :
1071 : DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1072 :
1073 0 : hw->eeprom.ops.init_params(hw);
1074 :
1075 0 : if (offset >= hw->eeprom.word_size) {
1076 : status = IXGBE_ERR_EEPROM;
1077 0 : goto out;
1078 : }
1079 :
1080 0 : status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1081 :
1082 : out:
1083 0 : return status;
1084 : }
1085 :
1086 : /**
1087 : * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1088 : * @hw: pointer to hardware structure
1089 : * @offset: offset of word in the EEPROM to read
1090 : * @words: number of word(s)
1091 : * @data: 16 bit word(s) from the EEPROM
1092 : *
1093 : * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1094 : **/
1095 0 : int32_t ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1096 : uint16_t words, uint16_t *data)
1097 : {
1098 : uint32_t eerd;
1099 : int32_t status = IXGBE_SUCCESS;
1100 : uint32_t i;
1101 :
1102 : DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1103 :
1104 0 : hw->eeprom.ops.init_params(hw);
1105 :
1106 0 : if (words == 0) {
1107 : status = IXGBE_ERR_INVALID_ARGUMENT;
1108 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1109 0 : goto out;
1110 : }
1111 :
1112 0 : if (offset >= hw->eeprom.word_size) {
1113 : status = IXGBE_ERR_EEPROM;
1114 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1115 0 : goto out;
1116 : }
1117 :
1118 0 : for (i = 0; i < words; i++) {
1119 0 : eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1120 : IXGBE_EEPROM_RW_REG_START;
1121 :
1122 0 : IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1123 0 : status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1124 :
1125 0 : if (status == IXGBE_SUCCESS) {
1126 0 : data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1127 : IXGBE_EEPROM_RW_REG_DATA);
1128 : } else {
1129 : DEBUGOUT("Eeprom read timed out\n");
1130 : goto out;
1131 : }
1132 : }
1133 : out:
1134 0 : return status;
1135 : }
1136 :
1137 : /**
1138 : * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1139 : * @hw: pointer to hardware structure
1140 : * @offset: offset of word in the EEPROM to read
1141 : * @data: word read from the EEPROM
1142 : *
1143 : * Reads a 16 bit word from the EEPROM using the EERD register.
1144 : **/
1145 0 : int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data)
1146 : {
1147 0 : return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1148 : }
1149 :
1150 : /**
1151 : * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1152 : * @hw: pointer to hardware structure
1153 : * @offset: offset of word in the EEPROM to write
1154 : * @words: number of word(s)
1155 : * @data: word(s) write to the EEPROM
1156 : *
1157 : * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1158 : **/
1159 0 : int32_t ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, uint16_t offset,
1160 : uint16_t words, uint16_t *data)
1161 : {
1162 : uint32_t eewr;
1163 : int32_t status = IXGBE_SUCCESS;
1164 : uint16_t i;
1165 :
1166 : DEBUGFUNC("ixgbe_write_eewr_generic");
1167 :
1168 0 : hw->eeprom.ops.init_params(hw);
1169 :
1170 0 : if (words == 0) {
1171 : status = IXGBE_ERR_INVALID_ARGUMENT;
1172 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1173 0 : goto out;
1174 : }
1175 :
1176 0 : if (offset >= hw->eeprom.word_size) {
1177 : status = IXGBE_ERR_EEPROM;
1178 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1179 0 : goto out;
1180 : }
1181 :
1182 0 : for (i = 0; i < words; i++) {
1183 0 : eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1184 0 : (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1185 : IXGBE_EEPROM_RW_REG_START;
1186 :
1187 0 : status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1188 0 : if (status != IXGBE_SUCCESS) {
1189 : DEBUGOUT("Eeprom write EEWR timed out\n");
1190 : goto out;
1191 : }
1192 :
1193 0 : IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1194 :
1195 0 : status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1196 0 : if (status != IXGBE_SUCCESS) {
1197 : DEBUGOUT("Eeprom write EEWR timed out\n");
1198 : goto out;
1199 : }
1200 : }
1201 :
1202 : out:
1203 0 : return status;
1204 : }
1205 :
1206 : /**
1207 : * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1208 : * @hw: pointer to hardware structure
1209 : * @offset: offset of word in the EEPROM to write
1210 : * @data: word write to the EEPROM
1211 : *
1212 : * Write a 16 bit word to the EEPROM using the EEWR register.
1213 : **/
1214 0 : int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
1215 : {
1216 0 : return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1217 : }
1218 :
1219 : /**
1220 : * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1221 : * @hw: pointer to hardware structure
1222 : * @ee_reg: EEPROM flag for polling
1223 : *
1224 : * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1225 : * read or write is done respectively.
1226 : **/
1227 0 : int32_t ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, uint32_t ee_reg)
1228 : {
1229 : uint32_t i;
1230 : uint32_t reg;
1231 : int32_t status = IXGBE_ERR_EEPROM;
1232 :
1233 : DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1234 :
1235 0 : for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1236 0 : if (ee_reg == IXGBE_NVM_POLL_READ)
1237 0 : reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1238 : else
1239 0 : reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1240 :
1241 0 : if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1242 : status = IXGBE_SUCCESS;
1243 0 : break;
1244 : }
1245 0 : usec_delay(5);
1246 : }
1247 :
1248 : if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1249 : ERROR_REPORT1(IXGBE_ERROR_POLLING,
1250 : "EEPROM read/write done polling timed out");
1251 :
1252 0 : return status;
1253 : }
1254 :
1255 : /**
1256 : * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1257 : * @hw: pointer to hardware structure
1258 : *
1259 : * Prepares EEPROM for access using bit-bang method. This function should
1260 : * be called before issuing a command to the EEPROM.
1261 : **/
1262 0 : int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1263 : {
1264 : int32_t status = IXGBE_SUCCESS;
1265 : uint32_t eec;
1266 : uint32_t i;
1267 :
1268 : DEBUGFUNC("ixgbe_acquire_eeprom");
1269 :
1270 0 : if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1271 0 : != IXGBE_SUCCESS)
1272 0 : status = IXGBE_ERR_SWFW_SYNC;
1273 :
1274 0 : if (status == IXGBE_SUCCESS) {
1275 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1276 :
1277 : /* Request EEPROM Access */
1278 0 : eec |= IXGBE_EEC_REQ;
1279 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1280 :
1281 0 : for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1282 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1283 0 : if (eec & IXGBE_EEC_GNT)
1284 : break;
1285 0 : usec_delay(5);
1286 : }
1287 :
1288 : /* Release if grant not acquired */
1289 0 : if (!(eec & IXGBE_EEC_GNT)) {
1290 0 : eec &= ~IXGBE_EEC_REQ;
1291 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1292 : DEBUGOUT("Could not acquire EEPROM grant\n");
1293 :
1294 0 : hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1295 : status = IXGBE_ERR_EEPROM;
1296 0 : }
1297 :
1298 : /* Setup EEPROM for Read/Write */
1299 0 : if (status == IXGBE_SUCCESS) {
1300 : /* Clear CS and SK */
1301 0 : eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1302 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1303 0 : IXGBE_WRITE_FLUSH(hw);
1304 0 : usec_delay(1);
1305 0 : }
1306 : }
1307 0 : return status;
1308 : }
1309 :
1310 : /**
1311 : * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1312 : * @hw: pointer to hardware structure
1313 : *
1314 : * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1315 : **/
1316 0 : int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1317 : {
1318 : int32_t status = IXGBE_ERR_EEPROM;
1319 : uint32_t timeout = 2000;
1320 : uint32_t i;
1321 : uint32_t swsm;
1322 :
1323 : DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1324 :
1325 :
1326 : /* Get SMBI software semaphore between device drivers first */
1327 0 : for (i = 0; i < timeout; i++) {
1328 : /*
1329 : * If the SMBI bit is 0 when we read it, then the bit will be
1330 : * set and we have the semaphore
1331 : */
1332 0 : swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1333 0 : if (!(swsm & IXGBE_SWSM_SMBI)) {
1334 : status = IXGBE_SUCCESS;
1335 0 : break;
1336 : }
1337 0 : usec_delay(50);
1338 : }
1339 :
1340 0 : if (i == timeout) {
1341 : DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1342 : "not granted.\n");
1343 : /*
1344 : * this release is particularly important because our attempts
1345 : * above to get the semaphore may have succeeded, and if there
1346 : * was a timeout, we should unconditionally clear the semaphore
1347 : * bits to free the driver to make progress
1348 : */
1349 0 : ixgbe_release_eeprom_semaphore(hw);
1350 :
1351 0 : usec_delay(50);
1352 : /*
1353 : * one last try
1354 : * If the SMBI bit is 0 when we read it, then the bit will be
1355 : * set and we have the semaphore
1356 : */
1357 0 : swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1358 0 : if (!(swsm & IXGBE_SWSM_SMBI))
1359 0 : status = IXGBE_SUCCESS;
1360 : }
1361 :
1362 : /* Now get the semaphore between SW/FW through the SWESMBI bit */
1363 0 : if (status == IXGBE_SUCCESS) {
1364 0 : for (i = 0; i < timeout; i++) {
1365 0 : swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1366 :
1367 : /* Set the SW EEPROM semaphore bit to request access */
1368 0 : swsm |= IXGBE_SWSM_SWESMBI;
1369 0 : IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1370 :
1371 : /*
1372 : * If we set the bit successfully then we got the
1373 : * semaphore.
1374 : */
1375 0 : swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1376 0 : if (swsm & IXGBE_SWSM_SWESMBI)
1377 : break;
1378 :
1379 0 : usec_delay(50);
1380 : }
1381 :
1382 : /*
1383 : * Release semaphores and return error if SW EEPROM semaphore
1384 : * was not granted because we don't have access to the EEPROM
1385 : */
1386 0 : if (i >= timeout) {
1387 : ERROR_REPORT1(IXGBE_ERROR_POLLING,
1388 : "SWESMBI Software EEPROM semaphore not granted.\n");
1389 0 : ixgbe_release_eeprom_semaphore(hw);
1390 : status = IXGBE_ERR_EEPROM;
1391 0 : }
1392 : } else {
1393 : ERROR_REPORT1(IXGBE_ERROR_POLLING,
1394 : "Software semaphore SMBI between device drivers "
1395 : "not granted.\n");
1396 : }
1397 :
1398 0 : return status;
1399 : }
1400 :
1401 : /**
1402 : * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1403 : * @hw: pointer to hardware structure
1404 : *
1405 : * This function clears hardware semaphore bits.
1406 : **/
1407 0 : void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1408 : {
1409 : uint32_t swsm;
1410 :
1411 : DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1412 :
1413 0 : swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1414 :
1415 : /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1416 0 : swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1417 0 : IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1418 0 : IXGBE_WRITE_FLUSH(hw);
1419 0 : }
1420 :
1421 : /**
1422 : * ixgbe_ready_eeprom - Polls for EEPROM ready
1423 : * @hw: pointer to hardware structure
1424 : **/
1425 0 : int32_t ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1426 : {
1427 : int32_t status = IXGBE_SUCCESS;
1428 : uint16_t i;
1429 : uint8_t spi_stat_reg;
1430 :
1431 : DEBUGFUNC("ixgbe_ready_eeprom");
1432 :
1433 : /*
1434 : * Read "Status Register" repeatedly until the LSB is cleared. The
1435 : * EEPROM will signal that the command has been completed by clearing
1436 : * bit 0 of the internal status register. If it's not cleared within
1437 : * 5 milliseconds, then error out.
1438 : */
1439 0 : for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1440 0 : ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1441 : IXGBE_EEPROM_OPCODE_BITS);
1442 0 : spi_stat_reg = (uint8_t)ixgbe_shift_in_eeprom_bits(hw, 8);
1443 0 : if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1444 : break;
1445 :
1446 0 : usec_delay(5);
1447 0 : ixgbe_standby_eeprom(hw);
1448 : }
1449 :
1450 : /*
1451 : * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1452 : * devices (and only 0-5mSec on 5V devices)
1453 : */
1454 0 : if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1455 : DEBUGOUT("SPI EEPROM Status error\n");
1456 : status = IXGBE_ERR_EEPROM;
1457 0 : }
1458 :
1459 0 : return status;
1460 : }
1461 :
1462 : /**
1463 : * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1464 : * @hw: pointer to hardware structure
1465 : **/
1466 0 : void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1467 : {
1468 : uint32_t eec;
1469 :
1470 : DEBUGFUNC("ixgbe_standby_eeprom");
1471 :
1472 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1473 :
1474 : /* Toggle CS to flush commands */
1475 0 : eec |= IXGBE_EEC_CS;
1476 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1477 0 : IXGBE_WRITE_FLUSH(hw);
1478 0 : usec_delay(1);
1479 0 : eec &= ~IXGBE_EEC_CS;
1480 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1481 0 : IXGBE_WRITE_FLUSH(hw);
1482 0 : usec_delay(1);
1483 0 : }
1484 :
1485 : /**
1486 : * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1487 : * @hw: pointer to hardware structure
1488 : * @data: data to send to the EEPROM
1489 : * @count: number of bits to shift out
1490 : **/
1491 0 : void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
1492 : uint16_t count)
1493 : {
1494 0 : uint32_t eec;
1495 : uint32_t mask;
1496 : uint32_t i;
1497 :
1498 : DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1499 :
1500 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1501 :
1502 : /*
1503 : * Mask is used to shift "count" bits of "data" out to the EEPROM
1504 : * one bit at a time. Determine the starting bit based on count
1505 : */
1506 0 : mask = 0x01 << (count - 1);
1507 :
1508 0 : for (i = 0; i < count; i++) {
1509 : /*
1510 : * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1511 : * "1", and then raising and then lowering the clock (the SK
1512 : * bit controls the clock input to the EEPROM). A "0" is
1513 : * shifted out to the EEPROM by setting "DI" to "0" and then
1514 : * raising and then lowering the clock.
1515 : */
1516 0 : if (data & mask)
1517 0 : eec |= IXGBE_EEC_DI;
1518 : else
1519 0 : eec &= ~IXGBE_EEC_DI;
1520 :
1521 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1522 0 : IXGBE_WRITE_FLUSH(hw);
1523 :
1524 0 : usec_delay(1);
1525 :
1526 0 : ixgbe_raise_eeprom_clk(hw, &eec);
1527 0 : ixgbe_lower_eeprom_clk(hw, &eec);
1528 :
1529 : /*
1530 : * Shift mask to signify next bit of data to shift in to the
1531 : * EEPROM
1532 : */
1533 0 : mask = mask >> 1;
1534 : }
1535 :
1536 : /* We leave the "DI" bit set to "0" when we leave this routine. */
1537 0 : eec &= ~IXGBE_EEC_DI;
1538 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1539 0 : IXGBE_WRITE_FLUSH(hw);
1540 0 : }
1541 :
1542 : /**
1543 : * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1544 : * @hw: pointer to hardware structure
1545 : **/
1546 0 : uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
1547 : {
1548 0 : uint32_t eec;
1549 : uint32_t i;
1550 : uint16_t data = 0;
1551 :
1552 : DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1553 :
1554 : /*
1555 : * In order to read a register from the EEPROM, we need to shift
1556 : * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1557 : * the clock input to the EEPROM (setting the SK bit), and then reading
1558 : * the value of the "DO" bit. During this "shifting in" process the
1559 : * "DI" bit should always be clear.
1560 : */
1561 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1562 :
1563 0 : eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1564 :
1565 0 : for (i = 0; i < count; i++) {
1566 0 : data = data << 1;
1567 0 : ixgbe_raise_eeprom_clk(hw, &eec);
1568 :
1569 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1570 :
1571 0 : eec &= ~(IXGBE_EEC_DI);
1572 0 : if (eec & IXGBE_EEC_DO)
1573 0 : data |= 1;
1574 :
1575 0 : ixgbe_lower_eeprom_clk(hw, &eec);
1576 : }
1577 :
1578 0 : return data;
1579 0 : }
1580 :
1581 : /**
1582 : * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1583 : * @hw: pointer to hardware structure
1584 : * @eec: EEC register's current value
1585 : **/
1586 0 : void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1587 : {
1588 : DEBUGFUNC("ixgbe_raise_eeprom_clk");
1589 :
1590 : /*
1591 : * Raise the clock input to the EEPROM
1592 : * (setting the SK bit), then delay
1593 : */
1594 0 : *eec = *eec | IXGBE_EEC_SK;
1595 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1596 0 : IXGBE_WRITE_FLUSH(hw);
1597 0 : usec_delay(1);
1598 0 : }
1599 :
1600 : /**
1601 : * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1602 : * @hw: pointer to hardware structure
1603 : * @eecd: EECD's current value
1604 : **/
1605 0 : void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
1606 : {
1607 : DEBUGFUNC("ixgbe_lower_eeprom_clk");
1608 :
1609 : /*
1610 : * Lower the clock input to the EEPROM (clearing the SK bit), then
1611 : * delay
1612 : */
1613 0 : *eec = *eec & ~IXGBE_EEC_SK;
1614 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1615 0 : IXGBE_WRITE_FLUSH(hw);
1616 0 : usec_delay(1);
1617 0 : }
1618 :
1619 : /**
1620 : * ixgbe_release_eeprom - Release EEPROM, release semaphores
1621 : * @hw: pointer to hardware structure
1622 : **/
1623 0 : void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1624 : {
1625 : uint32_t eec;
1626 :
1627 : DEBUGFUNC("ixgbe_release_eeprom");
1628 :
1629 0 : eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1630 :
1631 0 : eec |= IXGBE_EEC_CS; /* Pull CS high */
1632 0 : eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1633 :
1634 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1635 0 : IXGBE_WRITE_FLUSH(hw);
1636 :
1637 0 : usec_delay(1);
1638 :
1639 : /* Stop requesting EEPROM access */
1640 0 : eec &= ~IXGBE_EEC_REQ;
1641 0 : IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1642 :
1643 0 : hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1644 :
1645 : /* Delay before attempt to obtain semaphore again to allow FW access */
1646 0 : msec_delay(hw->eeprom.semaphore_delay);
1647 0 : }
1648 :
1649 : /**
1650 : * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1651 : * @hw: pointer to hardware structure
1652 : *
1653 : * Returns a negative error code on error, or the 16-bit checksum
1654 : **/
1655 0 : int32_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1656 : {
1657 : uint16_t i;
1658 : uint16_t j;
1659 : uint16_t checksum = 0;
1660 0 : uint16_t length = 0;
1661 0 : uint16_t pointer = 0;
1662 0 : uint16_t word = 0;
1663 :
1664 : DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1665 :
1666 : /* Include 0x0-0x3F in the checksum */
1667 0 : for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1668 0 : if (hw->eeprom.ops.read(hw, i, &word)) {
1669 : DEBUGOUT("EEPROM read failed\n");
1670 0 : return IXGBE_ERR_EEPROM;
1671 : }
1672 0 : checksum += word;
1673 : }
1674 :
1675 : /* Include all data from pointers except for the fw pointer */
1676 0 : for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1677 0 : if (hw->eeprom.ops.read(hw, i, &pointer)) {
1678 : DEBUGOUT("EEPROM read failed\n");
1679 0 : return IXGBE_ERR_EEPROM;
1680 : }
1681 :
1682 : /* If the pointer seems invalid */
1683 0 : if (pointer == 0xFFFF || pointer == 0)
1684 : continue;
1685 :
1686 0 : if (hw->eeprom.ops.read(hw, pointer, &length)) {
1687 : DEBUGOUT("EEPROM read failed\n");
1688 0 : return IXGBE_ERR_EEPROM;
1689 : }
1690 :
1691 0 : if (length == 0xFFFF || length == 0)
1692 : continue;
1693 :
1694 0 : for (j = pointer + 1; j <= pointer + length; j++) {
1695 0 : if (hw->eeprom.ops.read(hw, j, &word)) {
1696 : DEBUGOUT("EEPROM read failed\n");
1697 0 : return IXGBE_ERR_EEPROM;
1698 : }
1699 0 : checksum += word;
1700 : }
1701 : }
1702 :
1703 0 : checksum = (uint16_t)IXGBE_EEPROM_SUM - checksum;
1704 :
1705 0 : return (int32_t)checksum;
1706 0 : }
1707 :
1708 : /**
1709 : * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1710 : * @hw: pointer to hardware structure
1711 : * @checksum_val: calculated checksum
1712 : *
1713 : * Performs checksum calculation and validates the EEPROM checksum. If the
1714 : * caller does not need checksum_val, the value can be NULL.
1715 : **/
1716 0 : int32_t ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1717 : uint16_t *checksum_val)
1718 : {
1719 : int32_t status;
1720 0 : uint16_t checksum;
1721 0 : uint16_t read_checksum = 0;
1722 :
1723 : DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1724 :
1725 : /* Read the first word from the EEPROM. If this times out or fails, do
1726 : * not continue or we could be in for a very long wait while every
1727 : * EEPROM read fails
1728 : */
1729 0 : status = hw->eeprom.ops.read(hw, 0, &checksum);
1730 0 : if (status) {
1731 : DEBUGOUT("EEPROM read failed\n");
1732 0 : return status;
1733 : }
1734 :
1735 0 : status = hw->eeprom.ops.calc_checksum(hw);
1736 0 : if (status < 0)
1737 0 : return status;
1738 :
1739 0 : checksum = (uint16_t)(status & 0xffff);
1740 :
1741 0 : status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1742 0 : if (status) {
1743 : DEBUGOUT("EEPROM read failed\n");
1744 0 : return status;
1745 : }
1746 :
1747 : /* Verify read checksum from EEPROM is the same as
1748 : * calculated checksum
1749 : */
1750 0 : if (read_checksum != checksum)
1751 0 : status = IXGBE_ERR_EEPROM_CHECKSUM;
1752 :
1753 : /* If the user cares, return the calculated checksum */
1754 0 : if (checksum_val)
1755 0 : *checksum_val = checksum;
1756 :
1757 0 : return status;
1758 0 : }
1759 :
1760 : /**
1761 : * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1762 : * @hw: pointer to hardware structure
1763 : **/
1764 0 : int32_t ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1765 : {
1766 : int32_t status;
1767 0 : uint16_t checksum;
1768 :
1769 : DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1770 :
1771 : /* Read the first word from the EEPROM. If this times out or fails, do
1772 : * not continue or we could be in for a very long wait while every
1773 : * EEPROM read fails
1774 : */
1775 0 : status = hw->eeprom.ops.read(hw, 0, &checksum);
1776 0 : if (status) {
1777 : DEBUGOUT("EEPROM read failed\n");
1778 0 : return status;
1779 : }
1780 :
1781 0 : status = hw->eeprom.ops.calc_checksum(hw);
1782 0 : if (status < 0)
1783 0 : return status;
1784 :
1785 0 : checksum = (uint16_t)(status & 0xffff);
1786 :
1787 0 : status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1788 :
1789 0 : return status;
1790 0 : }
1791 :
1792 : /**
1793 : * ixgbe_validate_mac_addr - Validate MAC address
1794 : * @mac_addr: pointer to MAC address.
1795 : *
1796 : * Tests a MAC address to ensure it is a valid Individual Address
1797 : **/
1798 0 : int32_t ixgbe_validate_mac_addr(uint8_t *mac_addr)
1799 : {
1800 : int32_t status = IXGBE_SUCCESS;
1801 :
1802 : DEBUGFUNC("ixgbe_validate_mac_addr");
1803 :
1804 : /* Make sure it is not a multicast address */
1805 0 : if (IXGBE_IS_MULTICAST(mac_addr)) {
1806 : DEBUGOUT("MAC address is multicast\n");
1807 : status = IXGBE_ERR_INVALID_MAC_ADDR;
1808 : /* Not a broadcast address */
1809 0 : } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1810 : DEBUGOUT("MAC address is broadcast\n");
1811 : status = IXGBE_ERR_INVALID_MAC_ADDR;
1812 : /* Reject the zero address */
1813 0 : } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1814 0 : mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1815 : DEBUGOUT("MAC address is all zeros\n");
1816 : status = IXGBE_ERR_INVALID_MAC_ADDR;
1817 0 : }
1818 0 : return status;
1819 : }
1820 :
1821 : /**
1822 : * ixgbe_set_rar_generic - Set Rx address register
1823 : * @hw: pointer to hardware structure
1824 : * @index: Receive address register to write
1825 : * @addr: Address to put into receive address register
1826 : * @vmdq: VMDq "set" or "pool" index
1827 : * @enable_addr: set flag that address is active
1828 : *
1829 : * Puts an ethernet address into a receive address register.
1830 : **/
1831 0 : int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
1832 : uint32_t vmdq, uint32_t enable_addr)
1833 : {
1834 : uint32_t rar_low, rar_high;
1835 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
1836 :
1837 : DEBUGFUNC("ixgbe_set_rar_generic");
1838 :
1839 : /* Make sure we are using a valid rar index range */
1840 0 : if (index >= rar_entries) {
1841 : ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1842 : "RAR index %d is out of range.\n", index);
1843 0 : return IXGBE_ERR_INVALID_ARGUMENT;
1844 : }
1845 :
1846 : /* setup VMDq pool selection before this RAR gets enabled */
1847 0 : hw->mac.ops.set_vmdq(hw, index, vmdq);
1848 :
1849 : /*
1850 : * HW expects these in little endian so we reverse the byte
1851 : * order from network order (big endian) to little endian
1852 : */
1853 0 : rar_low = ((uint32_t)addr[0] |
1854 0 : ((uint32_t)addr[1] << 8) |
1855 0 : ((uint32_t)addr[2] << 16) |
1856 0 : ((uint32_t)addr[3] << 24));
1857 : /*
1858 : * Some parts put the VMDq setting in the extra RAH bits,
1859 : * so save everything except the lower 16 bits that hold part
1860 : * of the address and the address valid bit.
1861 : */
1862 0 : rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1863 0 : rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1864 0 : rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
1865 :
1866 0 : if (enable_addr != 0)
1867 0 : rar_high |= IXGBE_RAH_AV;
1868 :
1869 0 : IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1870 0 : IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1871 :
1872 0 : return IXGBE_SUCCESS;
1873 0 : }
1874 :
1875 : /**
1876 : * ixgbe_clear_rar_generic - Remove Rx address register
1877 : * @hw: pointer to hardware structure
1878 : * @index: Receive address register to write
1879 : *
1880 : * Clears an ethernet address from a receive address register.
1881 : **/
1882 0 : int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
1883 : {
1884 : uint32_t rar_high;
1885 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
1886 :
1887 : DEBUGFUNC("ixgbe_clear_rar_generic");
1888 :
1889 : /* Make sure we are using a valid rar index range */
1890 0 : if (index >= rar_entries) {
1891 : ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
1892 : "RAR index %d is out of range.\n", index);
1893 0 : return IXGBE_ERR_INVALID_ARGUMENT;
1894 : }
1895 :
1896 : /*
1897 : * Some parts put the VMDq setting in the extra RAH bits,
1898 : * so save everything except the lower 16 bits that hold part
1899 : * of the address and the address valid bit.
1900 : */
1901 0 : rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1902 0 : rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1903 :
1904 0 : IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1905 0 : IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1906 :
1907 : /* clear VMDq pool/queue selection for this RAR */
1908 0 : hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1909 :
1910 0 : return IXGBE_SUCCESS;
1911 0 : }
1912 :
1913 : /**
1914 : * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1915 : * @hw: pointer to hardware structure
1916 : *
1917 : * Places the MAC address in receive address register 0 and clears the rest
1918 : * of the receive address registers. Clears the multicast table. Assumes
1919 : * the receiver is in reset when the routine is called.
1920 : **/
1921 0 : int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1922 : {
1923 : uint32_t i;
1924 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
1925 :
1926 : DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1927 :
1928 : /*
1929 : * If the current mac address is valid, assume it is a software override
1930 : * to the permanent address.
1931 : * Otherwise, use the permanent address from the eeprom.
1932 : */
1933 0 : if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1934 : IXGBE_ERR_INVALID_MAC_ADDR) {
1935 : /* Get the MAC address from the RAR0 for later reference */
1936 0 : hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1937 :
1938 : DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1939 : hw->mac.addr[0], hw->mac.addr[1],
1940 : hw->mac.addr[2]);
1941 : DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1942 : hw->mac.addr[4], hw->mac.addr[5]);
1943 0 : } else {
1944 : /* Setup the receive address. */
1945 : DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1946 : DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1947 : hw->mac.addr[0], hw->mac.addr[1],
1948 : hw->mac.addr[2]);
1949 : DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1950 : hw->mac.addr[4], hw->mac.addr[5]);
1951 :
1952 0 : hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1953 :
1954 : /* clear VMDq pool/queue selection for RAR 0 */
1955 0 : hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1956 : }
1957 0 : hw->addr_ctrl.overflow_promisc = 0;
1958 :
1959 0 : hw->addr_ctrl.rar_used_count = 1;
1960 :
1961 : /* Zero out the other receive addresses. */
1962 : DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1963 0 : for (i = 1; i < rar_entries; i++) {
1964 0 : IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1965 0 : IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1966 : }
1967 :
1968 : /* Clear the MTA */
1969 0 : hw->addr_ctrl.mta_in_use = 0;
1970 0 : IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1971 :
1972 : DEBUGOUT(" Clearing MTA\n");
1973 0 : for (i = 0; i < hw->mac.mcft_size; i++)
1974 0 : IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1975 :
1976 0 : ixgbe_init_uta_tables(hw);
1977 :
1978 0 : return IXGBE_SUCCESS;
1979 : }
1980 :
1981 : /**
1982 : * ixgbe_add_uc_addr - Adds a secondary unicast address.
1983 : * @hw: pointer to hardware structure
1984 : * @addr: new address
1985 : *
1986 : * Adds it to unused receive address register or goes into promiscuous mode.
1987 : **/
1988 0 : void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
1989 : {
1990 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
1991 : uint32_t rar;
1992 :
1993 : DEBUGFUNC("ixgbe_add_uc_addr");
1994 :
1995 : DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1996 : addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1997 :
1998 : /*
1999 : * Place this address in the RAR if there is room,
2000 : * else put the controller into promiscuous mode
2001 : */
2002 0 : if (hw->addr_ctrl.rar_used_count < rar_entries) {
2003 : rar = hw->addr_ctrl.rar_used_count;
2004 0 : hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2005 : DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2006 0 : hw->addr_ctrl.rar_used_count++;
2007 0 : } else {
2008 0 : hw->addr_ctrl.overflow_promisc++;
2009 : }
2010 :
2011 : DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2012 0 : }
2013 :
2014 : /**
2015 : * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2016 : * @hw: pointer to hardware structure
2017 : * @mc_addr: the multicast address
2018 : *
2019 : * Extracts the 12 bits, from a multicast address, to determine which
2020 : * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2021 : * incoming rx multicast addresses, to determine the bit-vector to check in
2022 : * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2023 : * by the MO field of the MCSTCTRL. The MO field is set during initialization
2024 : * to mc_filter_type.
2025 : **/
2026 0 : int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
2027 : {
2028 : uint32_t vector = 0;
2029 :
2030 : DEBUGFUNC("ixgbe_mta_vector");
2031 :
2032 0 : switch (hw->mac.mc_filter_type) {
2033 : case 0: /* use bits [47:36] of the address */
2034 0 : vector = ((mc_addr[4] >> 4) | (((uint16_t)mc_addr[5]) << 4));
2035 0 : break;
2036 : case 1: /* use bits [46:35] of the address */
2037 0 : vector = ((mc_addr[4] >> 3) | (((uint16_t)mc_addr[5]) << 5));
2038 0 : break;
2039 : case 2: /* use bits [45:34] of the address */
2040 0 : vector = ((mc_addr[4] >> 2) | (((uint16_t)mc_addr[5]) << 6));
2041 0 : break;
2042 : case 3: /* use bits [43:32] of the address */
2043 0 : vector = ((mc_addr[4]) | (((uint16_t)mc_addr[5]) << 8));
2044 0 : break;
2045 : default: /* Invalid mc_filter_type */
2046 : DEBUGOUT("MC filter type param set incorrectly\n");
2047 0 : panic("incorrect multicast filter type");
2048 : break;
2049 : }
2050 :
2051 : /* vector can only be 12-bits or boundary will be exceeded */
2052 0 : vector &= 0xFFF;
2053 0 : return vector;
2054 : }
2055 :
2056 : /**
2057 : * ixgbe_set_mta - Set bit-vector in multicast table
2058 : * @hw: pointer to hardware structure
2059 : * @hash_value: Multicast address hash value
2060 : *
2061 : * Sets the bit-vector in the multicast table.
2062 : **/
2063 0 : void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
2064 : {
2065 : uint32_t vector;
2066 : uint32_t vector_bit;
2067 : uint32_t vector_reg;
2068 :
2069 : DEBUGFUNC("ixgbe_set_mta");
2070 :
2071 0 : hw->addr_ctrl.mta_in_use++;
2072 :
2073 0 : vector = ixgbe_mta_vector(hw, mc_addr);
2074 : DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2075 :
2076 : /*
2077 : * The MTA is a register array of 128 32-bit registers. It is treated
2078 : * like an array of 4096 bits. We want to set bit
2079 : * BitArray[vector_value]. So we figure out what register the bit is
2080 : * in, read it, OR in the new bit, then write back the new value. The
2081 : * register is determined by the upper 7 bits of the vector value and
2082 : * the bit within that register are determined by the lower 5 bits of
2083 : * the value.
2084 : */
2085 0 : vector_reg = (vector >> 5) & 0x7F;
2086 0 : vector_bit = vector & 0x1F;
2087 0 : hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2088 0 : }
2089 :
2090 : /**
2091 : * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2092 : * @hw: pointer to hardware structure
2093 : * @mc_addr_list: the list of new multicast addresses
2094 : * @mc_addr_count: number of addresses
2095 : * @next: iterator function to walk the multicast address list
2096 : * @clear: flag, when set clears the table beforehand
2097 : *
2098 : * When the clear flag is set, the given list replaces any existing list.
2099 : * Hashes the given addresses into the multicast table.
2100 : **/
2101 0 : int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
2102 : uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
2103 : bool clear)
2104 : {
2105 : uint32_t i;
2106 0 : uint32_t vmdq;
2107 :
2108 : DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2109 :
2110 : /*
2111 : * Set the new number of MC addresses that we are being requested to
2112 : * use.
2113 : */
2114 0 : hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2115 0 : hw->addr_ctrl.mta_in_use = 0;
2116 :
2117 : /* Clear mta_shadow */
2118 0 : if (clear) {
2119 : DEBUGOUT(" Clearing MTA\n");
2120 0 : memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2121 0 : }
2122 :
2123 : /* Update mta_shadow */
2124 0 : for (i = 0; i < mc_addr_count; i++) {
2125 : DEBUGOUT(" Adding the multicast addresses:\n");
2126 0 : ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2127 : }
2128 :
2129 : /* Enable mta */
2130 0 : for (i = 0; i < hw->mac.mcft_size; i++)
2131 0 : IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2132 : hw->mac.mta_shadow[i]);
2133 :
2134 0 : if (hw->addr_ctrl.mta_in_use > 0)
2135 0 : IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2136 : IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2137 :
2138 : DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2139 0 : return IXGBE_SUCCESS;
2140 0 : }
2141 :
2142 : /**
2143 : * ixgbe_enable_mc_generic - Enable multicast address in RAR
2144 : * @hw: pointer to hardware structure
2145 : *
2146 : * Enables multicast address in RAR and the use of the multicast hash table.
2147 : **/
2148 0 : int32_t ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2149 : {
2150 0 : struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2151 :
2152 : DEBUGFUNC("ixgbe_enable_mc_generic");
2153 :
2154 0 : if (a->mta_in_use > 0)
2155 0 : IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2156 : hw->mac.mc_filter_type);
2157 :
2158 0 : return IXGBE_SUCCESS;
2159 : }
2160 :
2161 : /**
2162 : * ixgbe_disable_mc_generic - Disable multicast address in RAR
2163 : * @hw: pointer to hardware structure
2164 : *
2165 : * Disables multicast address in RAR and the use of the multicast hash table.
2166 : **/
2167 0 : int32_t ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2168 : {
2169 0 : struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2170 :
2171 : DEBUGFUNC("ixgbe_disable_mc_generic");
2172 :
2173 0 : if (a->mta_in_use > 0)
2174 0 : IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2175 :
2176 0 : return IXGBE_SUCCESS;
2177 : }
2178 :
2179 : /**
2180 : * ixgbe_fc_enable_generic - Enable flow control
2181 : * @hw: pointer to hardware structure
2182 : *
2183 : * Enable flow control according to the current settings.
2184 : **/
2185 0 : int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2186 : {
2187 : int32_t ret_val = IXGBE_SUCCESS;
2188 : uint32_t mflcn_reg, fccfg_reg;
2189 : uint32_t reg;
2190 : uint32_t fcrtl, fcrth;
2191 : int i;
2192 :
2193 : DEBUGFUNC("ixgbe_fc_enable_generic");
2194 :
2195 : /* Validate the water mark configuration */
2196 0 : if (!hw->fc.pause_time) {
2197 : ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2198 0 : goto out;
2199 : }
2200 :
2201 : /* Low water mark of zero causes XOFF floods */
2202 0 : for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2203 0 : if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2204 0 : hw->fc.high_water[i]) {
2205 0 : if (!hw->fc.low_water[i] ||
2206 0 : hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2207 : DEBUGOUT("Invalid water mark configuration\n");
2208 : ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2209 0 : goto out;
2210 : }
2211 : }
2212 : }
2213 :
2214 : /* Negotiate the fc mode to use */
2215 0 : ixgbe_fc_autoneg(hw);
2216 :
2217 : /* Disable any previous flow control settings */
2218 0 : mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2219 0 : mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2220 :
2221 0 : fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2222 0 : fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2223 :
2224 : /*
2225 : * The possible values of fc.current_mode are:
2226 : * 0: Flow control is completely disabled
2227 : * 1: Rx flow control is enabled (we can receive pause frames,
2228 : * but not send pause frames).
2229 : * 2: Tx flow control is enabled (we can send pause frames but
2230 : * we do not support receiving pause frames).
2231 : * 3: Both Rx and Tx flow control (symmetric) are enabled.
2232 : * other: Invalid.
2233 : */
2234 0 : switch (hw->fc.current_mode) {
2235 : case ixgbe_fc_none:
2236 : /*
2237 : * Flow control is disabled by software override or autoneg.
2238 : * The code below will actually disable it in the HW.
2239 : */
2240 : break;
2241 : case ixgbe_fc_rx_pause:
2242 : /*
2243 : * Rx Flow control is enabled and Tx Flow control is
2244 : * disabled by software override. Since there really
2245 : * isn't a way to advertise that we are capable of RX
2246 : * Pause ONLY, we will advertise that we support both
2247 : * symmetric and asymmetric Rx PAUSE. Later, we will
2248 : * disable the adapter's ability to send PAUSE frames.
2249 : */
2250 0 : mflcn_reg |= IXGBE_MFLCN_RFCE;
2251 0 : break;
2252 : case ixgbe_fc_tx_pause:
2253 : /*
2254 : * Tx Flow control is enabled, and Rx Flow control is
2255 : * disabled by software override.
2256 : */
2257 0 : fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2258 0 : break;
2259 : case ixgbe_fc_full:
2260 : /* Flow control (both Rx and Tx) is enabled by SW override. */
2261 0 : mflcn_reg |= IXGBE_MFLCN_RFCE;
2262 0 : fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2263 0 : break;
2264 : default:
2265 : ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2266 : "Flow control param set incorrectly\n");
2267 : ret_val = IXGBE_ERR_CONFIG;
2268 0 : goto out;
2269 : break;
2270 : }
2271 :
2272 : /* Set 802.3x based flow control settings. */
2273 0 : mflcn_reg |= IXGBE_MFLCN_DPF;
2274 0 : IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2275 0 : IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2276 :
2277 :
2278 : /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2279 0 : for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2280 0 : if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2281 0 : hw->fc.high_water[i]) {
2282 0 : fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2283 0 : IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2284 0 : fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2285 0 : } else {
2286 0 : IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2287 : /*
2288 : * In order to prevent Tx hangs when the internal Tx
2289 : * switch is enabled we must set the high water mark
2290 : * to the Rx packet buffer size - 24KB. This allows
2291 : * the Tx switch to function even under heavy Rx
2292 : * workloads.
2293 : */
2294 0 : fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 0x6000;
2295 : }
2296 :
2297 0 : IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2298 : }
2299 :
2300 : /* Configure pause time (2 TCs per register) */
2301 0 : reg = hw->fc.pause_time * 0x00010001;
2302 0 : for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2303 0 : IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2304 :
2305 : /* Configure flow control refresh threshold value */
2306 0 : IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2307 :
2308 : out:
2309 0 : return ret_val;
2310 : }
2311 :
2312 : /**
2313 : * ixgbe_negotiate_fc - Negotiate flow control
2314 : * @hw: pointer to hardware structure
2315 : * @adv_reg: flow control advertised settings
2316 : * @lp_reg: link partner's flow control settings
2317 : * @adv_sym: symmetric pause bit in advertisement
2318 : * @adv_asm: asymmetric pause bit in advertisement
2319 : * @lp_sym: symmetric pause bit in link partner advertisement
2320 : * @lp_asm: asymmetric pause bit in link partner advertisement
2321 : *
2322 : * Find the intersection between advertised settings and link partner's
2323 : * advertised settings
2324 : **/
2325 0 : int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
2326 : uint32_t lp_reg, uint32_t adv_sym,
2327 : uint32_t adv_asm, uint32_t lp_sym,
2328 : uint32_t lp_asm)
2329 : {
2330 0 : if ((!(adv_reg)) || (!(lp_reg))) {
2331 : ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2332 : "Local or link partner's advertised flow control "
2333 : "settings are NULL. Local: %x, link partner: %x\n",
2334 : adv_reg, lp_reg);
2335 0 : return IXGBE_ERR_FC_NOT_NEGOTIATED;
2336 : }
2337 :
2338 0 : if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2339 : /*
2340 : * Now we need to check if the user selected Rx ONLY
2341 : * of pause frames. In this case, we had to advertise
2342 : * FULL flow control because we could not advertise RX
2343 : * ONLY. Hence, we must now check to see if we need to
2344 : * turn OFF the TRANSMISSION of PAUSE frames.
2345 : */
2346 0 : if (hw->fc.requested_mode == ixgbe_fc_full) {
2347 0 : hw->fc.current_mode = ixgbe_fc_full;
2348 : DEBUGOUT("Flow Control = FULL.\n");
2349 0 : } else {
2350 0 : hw->fc.current_mode = ixgbe_fc_rx_pause;
2351 : DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2352 : }
2353 0 : } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2354 0 : (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2355 0 : hw->fc.current_mode = ixgbe_fc_tx_pause;
2356 : DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2357 0 : } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2358 0 : !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2359 0 : hw->fc.current_mode = ixgbe_fc_rx_pause;
2360 : DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2361 0 : } else {
2362 0 : hw->fc.current_mode = ixgbe_fc_none;
2363 : DEBUGOUT("Flow Control = NONE.\n");
2364 : }
2365 0 : return IXGBE_SUCCESS;
2366 0 : }
2367 :
2368 : /**
2369 : * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2370 : * @hw: pointer to hardware structure
2371 : *
2372 : * Enable flow control according on 1 gig fiber.
2373 : **/
2374 0 : int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2375 : {
2376 : uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
2377 : int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2378 :
2379 : /*
2380 : * On multispeed fiber at 1g, bail out if
2381 : * - link is up but AN did not complete, or if
2382 : * - link is up and AN completed but timed out
2383 : */
2384 :
2385 0 : linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2386 0 : if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2387 0 : (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2388 : DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2389 : goto out;
2390 : }
2391 :
2392 0 : pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2393 0 : pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2394 :
2395 0 : ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2396 : pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2397 : IXGBE_PCS1GANA_ASM_PAUSE,
2398 : IXGBE_PCS1GANA_SYM_PAUSE,
2399 : IXGBE_PCS1GANA_ASM_PAUSE);
2400 :
2401 : out:
2402 0 : return ret_val;
2403 : }
2404 :
2405 : /**
2406 : * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2407 : * @hw: pointer to hardware structure
2408 : *
2409 : * Enable flow control according to IEEE clause 37.
2410 : **/
2411 0 : int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2412 : {
2413 : uint32_t links2, anlp1_reg, autoc_reg, links;
2414 : int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2415 :
2416 : /*
2417 : * On backplane, bail out if
2418 : * - backplane autoneg was not completed, or if
2419 : * - we are 82599 and link partner is not AN enabled
2420 : */
2421 0 : links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2422 0 : if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2423 : DEBUGOUT("Auto-Negotiation did not complete\n");
2424 : goto out;
2425 : }
2426 :
2427 0 : if (hw->mac.type == ixgbe_mac_82599EB) {
2428 0 : links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2429 0 : if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2430 : DEBUGOUT("Link partner is not AN enabled\n");
2431 : goto out;
2432 : }
2433 : }
2434 : /*
2435 : * Read the 10g AN autoc and LP ability registers and resolve
2436 : * local flow control settings accordingly
2437 : */
2438 0 : autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2439 0 : anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2440 :
2441 0 : ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2442 : anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2443 : IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2444 :
2445 : out:
2446 0 : return ret_val;
2447 : }
2448 :
2449 : /**
2450 : * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2451 : * @hw: pointer to hardware structure
2452 : *
2453 : * Enable flow control according to IEEE clause 37.
2454 : **/
2455 0 : int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2456 : {
2457 0 : uint16_t technology_ability_reg = 0;
2458 0 : uint16_t lp_technology_ability_reg = 0;
2459 :
2460 0 : hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2461 : IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2462 : &technology_ability_reg);
2463 0 : hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2464 : IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2465 : &lp_technology_ability_reg);
2466 :
2467 0 : return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
2468 0 : (uint32_t)lp_technology_ability_reg,
2469 : IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2470 : IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2471 0 : }
2472 :
2473 : /**
2474 : * ixgbe_fc_autoneg - Configure flow control
2475 : * @hw: pointer to hardware structure
2476 : *
2477 : * Compares our advertised flow control capabilities to those advertised by
2478 : * our link partner, and determines the proper flow control mode to use.
2479 : **/
2480 0 : void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2481 : {
2482 : int32_t ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2483 0 : ixgbe_link_speed speed;
2484 0 : bool link_up;
2485 :
2486 : DEBUGFUNC("ixgbe_fc_autoneg");
2487 :
2488 : /*
2489 : * AN should have completed when the cable was plugged in.
2490 : * Look for reasons to bail out. Bail out if:
2491 : * - FC autoneg is disabled, or if
2492 : * - link is not up.
2493 : */
2494 0 : if (hw->fc.disable_fc_autoneg) {
2495 : ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2496 : "Flow control autoneg is disabled");
2497 : goto out;
2498 : }
2499 :
2500 0 : hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2501 0 : if (!link_up) {
2502 : ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2503 : goto out;
2504 : }
2505 :
2506 0 : switch (hw->phy.media_type) {
2507 : /* Autoneg flow control on fiber adapters */
2508 : case ixgbe_media_type_fiber_fixed:
2509 : case ixgbe_media_type_fiber_qsfp:
2510 : case ixgbe_media_type_fiber:
2511 0 : if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2512 0 : ret_val = ixgbe_fc_autoneg_fiber(hw);
2513 : break;
2514 :
2515 : /* Autoneg flow control on backplane adapters */
2516 : case ixgbe_media_type_backplane:
2517 0 : ret_val = ixgbe_fc_autoneg_backplane(hw);
2518 0 : break;
2519 :
2520 : /* Autoneg flow control on copper adapters */
2521 : case ixgbe_media_type_copper:
2522 0 : if (ixgbe_device_supports_autoneg_fc(hw))
2523 0 : ret_val = ixgbe_fc_autoneg_copper(hw);
2524 : break;
2525 :
2526 : default:
2527 : break;
2528 : }
2529 :
2530 : out:
2531 0 : if (ret_val == IXGBE_SUCCESS) {
2532 0 : hw->fc.fc_was_autonegged = TRUE;
2533 0 : } else {
2534 0 : hw->fc.fc_was_autonegged = FALSE;
2535 0 : hw->fc.current_mode = hw->fc.requested_mode;
2536 : }
2537 0 : }
2538 :
2539 : /*
2540 : * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2541 : * @hw: pointer to hardware structure
2542 : *
2543 : * System-wide timeout range is encoded in PCIe Device Control2 register.
2544 : *
2545 : * Add 10% to specified maximum and return the number of times to poll for
2546 : * completion timeout, in units of 100 microsec. Never return less than
2547 : * 800 = 80 millisec.
2548 : */
2549 0 : static uint32_t ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2550 : {
2551 : int16_t devctl2;
2552 : uint32_t pollcnt;
2553 :
2554 0 : devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
2555 0 : devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2556 :
2557 0 : switch (devctl2) {
2558 : case IXGBE_PCIDEVCTRL2_65_130ms:
2559 : pollcnt = 1300; /* 130 millisec */
2560 0 : break;
2561 : case IXGBE_PCIDEVCTRL2_260_520ms:
2562 : pollcnt = 5200; /* 520 millisec */
2563 0 : break;
2564 : case IXGBE_PCIDEVCTRL2_1_2s:
2565 : pollcnt = 20000; /* 2 sec */
2566 0 : break;
2567 : case IXGBE_PCIDEVCTRL2_4_8s:
2568 : pollcnt = 80000; /* 8 sec */
2569 0 : break;
2570 : case IXGBE_PCIDEVCTRL2_17_34s:
2571 : pollcnt = 34000; /* 34 sec */
2572 0 : break;
2573 : case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
2574 : case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
2575 : case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
2576 : case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
2577 : default:
2578 : pollcnt = 800; /* 80 millisec minimum */
2579 0 : break;
2580 : }
2581 :
2582 : /* add 10% to spec maximum */
2583 0 : return (pollcnt * 11) / 10;
2584 : }
2585 :
2586 : /**
2587 : * ixgbe_disable_pcie_master - Disable PCI-express master access
2588 : * @hw: pointer to hardware structure
2589 : *
2590 : * Disables PCI-Express master access and verifies there are no pending
2591 : * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2592 : * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2593 : * is returned signifying master requests disabled.
2594 : **/
2595 0 : int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2596 : {
2597 : int32_t status = IXGBE_SUCCESS;
2598 : uint32_t i, poll;
2599 :
2600 : DEBUGFUNC("ixgbe_disable_pcie_master");
2601 :
2602 : /* Always set this bit to ensure any future transactions are blocked */
2603 0 : IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2604 :
2605 : /* Exit if master requests are blocked */
2606 0 : if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2607 : goto out;
2608 :
2609 : /* Poll for master request bit to clear */
2610 0 : for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2611 0 : usec_delay(100);
2612 0 : if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2613 : goto out;
2614 : }
2615 :
2616 : /*
2617 : * Two consecutive resets are required via CTRL.RST per datasheet
2618 : * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2619 : * of this need. The first reset prevents new master requests from
2620 : * being issued by our device. We then must wait 1usec or more for any
2621 : * remaining completions from the PCIe bus to trickle in, and then reset
2622 : * again to clear out any effects they may have had on our device.
2623 : */
2624 : DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2625 0 : hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2626 :
2627 0 : if (hw->mac.type >= ixgbe_mac_X550)
2628 : goto out;
2629 :
2630 : /*
2631 : * Before proceeding, make sure that the PCIe block does not have
2632 : * transactions pending.
2633 : */
2634 0 : poll = ixgbe_pcie_timeout_poll(hw);
2635 0 : for (i = 0; i < poll; i++) {
2636 0 : usec_delay(100);
2637 0 : if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2638 : IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2639 : goto out;
2640 : }
2641 :
2642 : ERROR_REPORT1(IXGBE_ERROR_POLLING,
2643 : "PCIe transaction pending bit also did not clear.\n");
2644 0 : status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2645 :
2646 : out:
2647 0 : return status;
2648 : }
2649 :
2650 : /**
2651 : * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2652 : * @hw: pointer to hardware structure
2653 : * @mask: Mask to specify which semaphore to acquire
2654 : *
2655 : * Acquires the SWFW semaphore through the GSSR register for the specified
2656 : * function (CSR, PHY0, PHY1, EEPROM, Flash)
2657 : **/
2658 0 : int32_t ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2659 : {
2660 : uint32_t gssr = 0;
2661 : uint32_t swmask = mask;
2662 0 : uint32_t fwmask = mask << 5;
2663 : uint32_t timeout = 200;
2664 : uint32_t i;
2665 :
2666 : DEBUGFUNC("ixgbe_acquire_swfw_sync");
2667 :
2668 0 : for (i = 0; i < timeout; i++) {
2669 : /*
2670 : * SW NVM semaphore bit is used for access to all
2671 : * SW_FW_SYNC bits (not just NVM)
2672 : */
2673 0 : if (ixgbe_get_eeprom_semaphore(hw))
2674 0 : return IXGBE_ERR_SWFW_SYNC;
2675 :
2676 0 : gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2677 0 : if (!(gssr & (fwmask | swmask))) {
2678 0 : gssr |= swmask;
2679 0 : IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2680 0 : ixgbe_release_eeprom_semaphore(hw);
2681 0 : return IXGBE_SUCCESS;
2682 : } else {
2683 : /* Resource is currently in use by FW or SW */
2684 0 : ixgbe_release_eeprom_semaphore(hw);
2685 0 : msec_delay(5);
2686 : }
2687 : }
2688 :
2689 : /* If time expired clear the bits holding the lock and retry */
2690 0 : if (gssr & (fwmask | swmask))
2691 0 : ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2692 :
2693 0 : msec_delay(5);
2694 0 : return IXGBE_ERR_SWFW_SYNC;
2695 0 : }
2696 :
2697 : /**
2698 : * ixgbe_release_swfw_sync - Release SWFW semaphore
2699 : * @hw: pointer to hardware structure
2700 : * @mask: Mask to specify which semaphore to release
2701 : *
2702 : * Releases the SWFW semaphore through the GSSR register for the specified
2703 : * function (CSR, PHY0, PHY1, EEPROM, Flash)
2704 : **/
2705 0 : void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, uint32_t mask)
2706 : {
2707 : uint32_t gssr;
2708 : uint32_t swmask = mask;
2709 :
2710 : DEBUGFUNC("ixgbe_release_swfw_sync");
2711 :
2712 0 : ixgbe_get_eeprom_semaphore(hw);
2713 :
2714 0 : gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2715 0 : gssr &= ~swmask;
2716 0 : IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2717 :
2718 0 : ixgbe_release_eeprom_semaphore(hw);
2719 0 : }
2720 :
2721 : /**
2722 : * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2723 : * @hw: pointer to hardware structure
2724 : *
2725 : * Stops the receive data path and waits for the HW to internally empty
2726 : * the Rx security block
2727 : **/
2728 0 : int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2729 : {
2730 : #define IXGBE_MAX_SECRX_POLL 40
2731 :
2732 : int i;
2733 : int secrxreg;
2734 :
2735 : DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2736 :
2737 :
2738 0 : secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2739 0 : secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2740 0 : IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2741 0 : for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2742 0 : secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2743 0 : if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2744 : break;
2745 : else
2746 : /* Use interrupt-safe sleep just in case */
2747 0 : usec_delay(1000);
2748 : }
2749 :
2750 : /* For informational purposes only */
2751 : if (i >= IXGBE_MAX_SECRX_POLL)
2752 : DEBUGOUT("Rx unit being enabled before security "
2753 : "path fully disabled. Continuing with init.\n");
2754 :
2755 0 : return IXGBE_SUCCESS;
2756 : }
2757 :
2758 : /**
2759 : * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2760 : * @hw: pointer to hardware structure
2761 : * @reg_val: Value we read from AUTOC
2762 : *
2763 : * The default case requires no protection so just to the register read.
2764 : */
2765 0 : int32_t prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked,
2766 : uint32_t *reg_val)
2767 : {
2768 0 : *locked = FALSE;
2769 0 : *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2770 0 : return IXGBE_SUCCESS;
2771 : }
2772 :
2773 : /**
2774 : * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2775 : * @hw: pointer to hardware structure
2776 : * @reg_val: value to write to AUTOC
2777 : * @locked: bool to indicate whether the SW/FW lock was already taken by
2778 : * previous read.
2779 : *
2780 : * The default case requires no protection so just to the register write.
2781 : */
2782 0 : int32_t prot_autoc_write_generic(struct ixgbe_hw *hw, uint32_t reg_val,
2783 : bool locked)
2784 : {
2785 0 : IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2786 0 : return IXGBE_SUCCESS;
2787 : }
2788 :
2789 : /**
2790 : * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2791 : * @hw: pointer to hardware structure
2792 : *
2793 : * Enables the receive data path.
2794 : **/
2795 0 : int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2796 : {
2797 : int secrxreg;
2798 :
2799 : DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2800 :
2801 0 : secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2802 0 : secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2803 0 : IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2804 0 : IXGBE_WRITE_FLUSH(hw);
2805 :
2806 0 : return IXGBE_SUCCESS;
2807 : }
2808 :
2809 : /**
2810 : * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2811 : * @hw: pointer to hardware structure
2812 : * @regval: register value to write to RXCTRL
2813 : *
2814 : * Enables the Rx DMA unit
2815 : **/
2816 0 : int32_t ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, uint32_t regval)
2817 : {
2818 : DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2819 :
2820 0 : if (regval & IXGBE_RXCTRL_RXEN)
2821 0 : ixgbe_enable_rx(hw);
2822 : else
2823 0 : ixgbe_disable_rx(hw);
2824 :
2825 0 : return IXGBE_SUCCESS;
2826 : }
2827 :
2828 : /**
2829 : * ixgbe_blink_led_start_generic - Blink LED based on index.
2830 : * @hw: pointer to hardware structure
2831 : * @index: led number to blink
2832 : **/
2833 0 : int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
2834 : {
2835 0 : ixgbe_link_speed speed = 0;
2836 0 : bool link_up = 0;
2837 0 : uint32_t autoc_reg = 0;
2838 0 : uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2839 : int32_t ret_val = IXGBE_SUCCESS;
2840 0 : bool locked = FALSE;
2841 :
2842 : DEBUGFUNC("ixgbe_blink_led_start_generic");
2843 :
2844 : /*
2845 : * Link must be up to auto-blink the LEDs;
2846 : * Force it if link is down.
2847 : */
2848 0 : hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2849 :
2850 0 : if (!link_up) {
2851 0 : ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2852 0 : if (ret_val != IXGBE_SUCCESS)
2853 : goto out;
2854 :
2855 0 : autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2856 0 : autoc_reg |= IXGBE_AUTOC_FLU;
2857 :
2858 0 : ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2859 0 : if (ret_val != IXGBE_SUCCESS)
2860 : goto out;
2861 :
2862 0 : IXGBE_WRITE_FLUSH(hw);
2863 0 : msec_delay(10);
2864 0 : }
2865 :
2866 0 : led_reg &= ~IXGBE_LED_MODE_MASK(index);
2867 0 : led_reg |= IXGBE_LED_BLINK(index);
2868 0 : IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2869 0 : IXGBE_WRITE_FLUSH(hw);
2870 :
2871 : out:
2872 0 : return ret_val;
2873 0 : }
2874 :
2875 : /**
2876 : * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2877 : * @hw: pointer to hardware structure
2878 : * @index: led number to stop blinking
2879 : **/
2880 0 : int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
2881 : {
2882 0 : uint32_t autoc_reg = 0;
2883 0 : uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2884 : int32_t ret_val = IXGBE_SUCCESS;
2885 0 : bool locked = FALSE;
2886 :
2887 : DEBUGFUNC("ixgbe_blink_led_stop_generic");
2888 :
2889 0 : ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2890 0 : if (ret_val != IXGBE_SUCCESS)
2891 : goto out;
2892 :
2893 0 : autoc_reg &= ~IXGBE_AUTOC_FLU;
2894 0 : autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2895 :
2896 0 : ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2897 0 : if (ret_val != IXGBE_SUCCESS)
2898 : goto out;
2899 :
2900 0 : led_reg &= ~IXGBE_LED_MODE_MASK(index);
2901 0 : led_reg &= ~IXGBE_LED_BLINK(index);
2902 0 : led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2903 0 : IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2904 0 : IXGBE_WRITE_FLUSH(hw);
2905 :
2906 : out:
2907 0 : return ret_val;
2908 0 : }
2909 :
2910 : /**
2911 : * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2912 : * @hw: pointer to hardware structure
2913 : *
2914 : * Read PCIe configuration space, and get the MSI-X vector count from
2915 : * the capabilities table.
2916 : **/
2917 0 : uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2918 : {
2919 : uint16_t msix_count = 1;
2920 : uint16_t max_msix_count;
2921 : uint16_t pcie_offset;
2922 :
2923 0 : switch (hw->mac.type) {
2924 : case ixgbe_mac_82598EB:
2925 : pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2926 : max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2927 0 : break;
2928 : case ixgbe_mac_82599EB:
2929 : case ixgbe_mac_X540:
2930 : case ixgbe_mac_X550:
2931 : case ixgbe_mac_X550EM_x:
2932 : pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2933 : max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2934 0 : break;
2935 : default:
2936 0 : return msix_count;
2937 : }
2938 :
2939 : DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2940 0 : msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
2941 0 : msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2942 :
2943 : /* MSI-X count is zero-based in HW */
2944 0 : msix_count++;
2945 :
2946 0 : if (msix_count > max_msix_count)
2947 0 : msix_count = max_msix_count;
2948 :
2949 0 : return msix_count;
2950 0 : }
2951 :
2952 : /**
2953 : * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2954 : * @hw: pointer to hardware structure
2955 : * @addr: Address to put into receive address register
2956 : * @vmdq: VMDq pool to assign
2957 : *
2958 : * Puts an ethernet address into a receive address register, or
2959 : * finds the rar that it is already in; adds to the pool list
2960 : **/
2961 0 : int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq)
2962 : {
2963 : static const uint32_t NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2964 : uint32_t first_empty_rar = NO_EMPTY_RAR_FOUND;
2965 : uint32_t rar;
2966 : uint32_t rar_low, rar_high;
2967 : uint32_t addr_low, addr_high;
2968 :
2969 : DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2970 :
2971 : /* swap bytes for HW little endian */
2972 0 : addr_low = addr[0] | (addr[1] << 8)
2973 0 : | (addr[2] << 16)
2974 0 : | (addr[3] << 24);
2975 0 : addr_high = addr[4] | (addr[5] << 8);
2976 :
2977 : /*
2978 : * Either find the mac_id in rar or find the first empty space.
2979 : * rar_highwater points to just after the highest currently used
2980 : * rar in order to shorten the search. It grows when we add a new
2981 : * rar to the top.
2982 : */
2983 0 : for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2984 0 : rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2985 :
2986 0 : if (((IXGBE_RAH_AV & rar_high) == 0)
2987 0 : && first_empty_rar == NO_EMPTY_RAR_FOUND) {
2988 : first_empty_rar = rar;
2989 0 : } else if ((rar_high & 0xFFFF) == addr_high) {
2990 0 : rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2991 0 : if (rar_low == addr_low)
2992 : break; /* found it already in the rars */
2993 : }
2994 : }
2995 :
2996 0 : if (rar < hw->mac.rar_highwater) {
2997 : /* already there so just add to the pool bits */
2998 0 : ixgbe_set_vmdq(hw, rar, vmdq);
2999 0 : } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3000 : /* stick it into first empty RAR slot we found */
3001 : rar = first_empty_rar;
3002 0 : ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3003 0 : } else if (rar == hw->mac.rar_highwater) {
3004 : /* add it to the top of the list and inc the highwater mark */
3005 0 : ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3006 0 : hw->mac.rar_highwater++;
3007 0 : } else if (rar >= hw->mac.num_rar_entries) {
3008 0 : return IXGBE_ERR_INVALID_MAC_ADDR;
3009 : }
3010 :
3011 : /*
3012 : * If we found rar[0], make sure the default pool bit (we use pool 0)
3013 : * remains cleared to be sure default pool packets will get delivered
3014 : */
3015 0 : if (rar == 0)
3016 0 : ixgbe_clear_vmdq(hw, rar, 0);
3017 :
3018 0 : return rar;
3019 0 : }
3020 :
3021 : /**
3022 : * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3023 : * @hw: pointer to hardware struct
3024 : * @rar: receive address register index to disassociate
3025 : * @vmdq: VMDq pool index to remove from the rar
3026 : **/
3027 0 : int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3028 : {
3029 : uint32_t mpsar_lo, mpsar_hi;
3030 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
3031 :
3032 : DEBUGFUNC("ixgbe_clear_vmdq_generic");
3033 :
3034 : /* Make sure we are using a valid rar index range */
3035 0 : if (rar >= rar_entries) {
3036 : ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3037 : "RAR index %d is out of range.\n", rar);
3038 0 : return IXGBE_ERR_INVALID_ARGUMENT;
3039 : }
3040 :
3041 0 : mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3042 0 : mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3043 :
3044 0 : if (!mpsar_lo && !mpsar_hi)
3045 : goto done;
3046 :
3047 0 : if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3048 0 : if (mpsar_lo) {
3049 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3050 : mpsar_lo = 0;
3051 0 : }
3052 0 : if (mpsar_hi) {
3053 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3054 : mpsar_hi = 0;
3055 0 : }
3056 0 : } else if (vmdq < 32) {
3057 0 : mpsar_lo &= ~(1 << vmdq);
3058 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3059 0 : } else {
3060 0 : mpsar_hi &= ~(1 << (vmdq - 32));
3061 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3062 : }
3063 :
3064 : /* was that the last pool using this rar? */
3065 0 : if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3066 0 : hw->mac.ops.clear_rar(hw, rar);
3067 : done:
3068 0 : return IXGBE_SUCCESS;
3069 0 : }
3070 :
3071 : /**
3072 : * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3073 : * @hw: pointer to hardware struct
3074 : * @rar: receive address register index to associate with a VMDq index
3075 : * @vmdq: VMDq pool index
3076 : **/
3077 0 : int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
3078 : {
3079 : uint32_t mpsar;
3080 0 : uint32_t rar_entries = hw->mac.num_rar_entries;
3081 :
3082 : DEBUGFUNC("ixgbe_set_vmdq_generic");
3083 :
3084 : /* Make sure we are using a valid rar index range */
3085 0 : if (rar >= rar_entries) {
3086 : ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3087 : "RAR index %d is out of range.\n", rar);
3088 0 : return IXGBE_ERR_INVALID_ARGUMENT;
3089 : }
3090 :
3091 0 : if (vmdq < 32) {
3092 0 : mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3093 0 : mpsar |= 1 << vmdq;
3094 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3095 0 : } else {
3096 0 : mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3097 0 : mpsar |= 1 << (vmdq - 32);
3098 0 : IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3099 : }
3100 0 : return IXGBE_SUCCESS;
3101 0 : }
3102 :
3103 : /**
3104 : * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3105 : * @hw: pointer to hardware structure
3106 : **/
3107 0 : int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3108 : {
3109 : int i;
3110 :
3111 : DEBUGFUNC("ixgbe_init_uta_tables_generic");
3112 : DEBUGOUT(" Clearing UTA\n");
3113 :
3114 0 : for (i = 0; i < 128; i++)
3115 0 : IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3116 :
3117 0 : return IXGBE_SUCCESS;
3118 : }
3119 :
3120 : /**
3121 : * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3122 : * @hw: pointer to hardware structure
3123 : * @vlan: VLAN id to write to VLAN filter
3124 : *
3125 : * return the VLVF index where this VLAN id should be placed
3126 : *
3127 : **/
3128 0 : int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
3129 : {
3130 : uint32_t bits = 0;
3131 : uint32_t first_empty_slot = 0;
3132 : int32_t regindex;
3133 :
3134 : /* short cut the special case */
3135 0 : if (vlan == 0)
3136 0 : return 0;
3137 :
3138 : /*
3139 : * Search for the vlan id in the VLVF entries. Save off the first empty
3140 : * slot found along the way
3141 : */
3142 0 : for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3143 0 : bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3144 0 : if (!bits && !(first_empty_slot))
3145 0 : first_empty_slot = regindex;
3146 0 : else if ((bits & 0x0FFF) == vlan)
3147 : break;
3148 : }
3149 :
3150 : /*
3151 : * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3152 : * in the VLVF. Else use the first empty VLVF register for this
3153 : * vlan id.
3154 : */
3155 0 : if (regindex >= IXGBE_VLVF_ENTRIES) {
3156 0 : if (first_empty_slot)
3157 0 : regindex = first_empty_slot;
3158 : else {
3159 : ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3160 : "No space in VLVF.\n");
3161 : regindex = IXGBE_ERR_NO_SPACE;
3162 : }
3163 : }
3164 :
3165 0 : return regindex;
3166 0 : }
3167 :
3168 : /**
3169 : * ixgbe_set_vfta_generic - Set VLAN filter table
3170 : * @hw: pointer to hardware structure
3171 : * @vlan: VLAN id to write to VLAN filter
3172 : * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3173 : * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3174 : *
3175 : * Turn on/off specified VLAN in the VLAN filter table.
3176 : **/
3177 0 : int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3178 : bool vlan_on)
3179 : {
3180 : int32_t regindex;
3181 : uint32_t bitindex;
3182 : uint32_t vfta;
3183 : uint32_t targetbit;
3184 : int32_t ret_val = IXGBE_SUCCESS;
3185 0 : bool vfta_changed = FALSE;
3186 :
3187 : DEBUGFUNC("ixgbe_set_vfta_generic");
3188 :
3189 0 : if (vlan > 4095)
3190 0 : return IXGBE_ERR_PARAM;
3191 :
3192 : /*
3193 : * this is a 2 part operation - first the VFTA, then the
3194 : * VLVF and VLVFB if VT Mode is set
3195 : * We don't write the VFTA until we know the VLVF part succeeded.
3196 : */
3197 :
3198 : /* Part 1
3199 : * The VFTA is a bitstring made up of 128 32-bit registers
3200 : * that enable the particular VLAN id, much like the MTA:
3201 : * bits[11-5]: which register
3202 : * bits[4-0]: which bit in the register
3203 : */
3204 0 : regindex = (vlan >> 5) & 0x7F;
3205 0 : bitindex = vlan & 0x1F;
3206 0 : targetbit = (1 << bitindex);
3207 0 : vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3208 :
3209 0 : if (vlan_on) {
3210 0 : if (!(vfta & targetbit)) {
3211 0 : vfta |= targetbit;
3212 0 : vfta_changed = TRUE;
3213 0 : }
3214 : } else {
3215 0 : if ((vfta & targetbit)) {
3216 0 : vfta &= ~targetbit;
3217 0 : vfta_changed = TRUE;
3218 0 : }
3219 : }
3220 :
3221 : /* Part 2
3222 : * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3223 : */
3224 0 : ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3225 : &vfta_changed);
3226 0 : if (ret_val != IXGBE_SUCCESS)
3227 0 : return ret_val;
3228 :
3229 0 : if (vfta_changed)
3230 0 : IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3231 :
3232 0 : return IXGBE_SUCCESS;
3233 0 : }
3234 :
3235 : /**
3236 : * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3237 : * @hw: pointer to hardware structure
3238 : * @vlan: VLAN id to write to VLAN filter
3239 : * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3240 : * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3241 : * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3242 : * should be changed
3243 : *
3244 : * Turn on/off specified bit in VLVF table.
3245 : **/
3246 0 : int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
3247 : bool vlan_on, bool *vfta_changed)
3248 : {
3249 : uint32_t vt;
3250 :
3251 : DEBUGFUNC("ixgbe_set_vlvf_generic");
3252 :
3253 0 : if (vlan > 4095)
3254 0 : return IXGBE_ERR_PARAM;
3255 :
3256 : /* If VT Mode is set
3257 : * Either vlan_on
3258 : * make sure the vlan is in VLVF
3259 : * set the vind bit in the matching VLVFB
3260 : * Or !vlan_on
3261 : * clear the pool bit and possibly the vind
3262 : */
3263 0 : vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3264 0 : if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3265 : int32_t vlvf_index;
3266 : uint32_t bits;
3267 :
3268 0 : vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3269 0 : if (vlvf_index < 0)
3270 0 : return vlvf_index;
3271 :
3272 0 : if (vlan_on) {
3273 : /* set the pool bit */
3274 0 : if (vind < 32) {
3275 0 : bits = IXGBE_READ_REG(hw,
3276 : IXGBE_VLVFB(vlvf_index * 2));
3277 0 : bits |= (1 << vind);
3278 0 : IXGBE_WRITE_REG(hw,
3279 : IXGBE_VLVFB(vlvf_index * 2),
3280 : bits);
3281 0 : } else {
3282 0 : bits = IXGBE_READ_REG(hw,
3283 : IXGBE_VLVFB((vlvf_index * 2) + 1));
3284 0 : bits |= (1 << (vind - 32));
3285 0 : IXGBE_WRITE_REG(hw,
3286 : IXGBE_VLVFB((vlvf_index * 2) + 1),
3287 : bits);
3288 : }
3289 : } else {
3290 : /* clear the pool bit */
3291 0 : if (vind < 32) {
3292 0 : bits = IXGBE_READ_REG(hw,
3293 : IXGBE_VLVFB(vlvf_index * 2));
3294 0 : bits &= ~(1 << vind);
3295 0 : IXGBE_WRITE_REG(hw,
3296 : IXGBE_VLVFB(vlvf_index * 2),
3297 : bits);
3298 0 : bits |= IXGBE_READ_REG(hw,
3299 : IXGBE_VLVFB((vlvf_index * 2) + 1));
3300 0 : } else {
3301 0 : bits = IXGBE_READ_REG(hw,
3302 : IXGBE_VLVFB((vlvf_index * 2) + 1));
3303 0 : bits &= ~(1 << (vind - 32));
3304 0 : IXGBE_WRITE_REG(hw,
3305 : IXGBE_VLVFB((vlvf_index * 2) + 1),
3306 : bits);
3307 0 : bits |= IXGBE_READ_REG(hw,
3308 : IXGBE_VLVFB(vlvf_index * 2));
3309 : }
3310 : }
3311 :
3312 : /*
3313 : * If there are still bits set in the VLVFB registers
3314 : * for the VLAN ID indicated we need to see if the
3315 : * caller is requesting that we clear the VFTA entry bit.
3316 : * If the caller has requested that we clear the VFTA
3317 : * entry bit but there are still pools/VFs using this VLAN
3318 : * ID entry then ignore the request. We're not worried
3319 : * about the case where we're turning the VFTA VLAN ID
3320 : * entry bit on, only when requested to turn it off as
3321 : * there may be multiple pools and/or VFs using the
3322 : * VLAN ID entry. In that case we cannot clear the
3323 : * VFTA bit until all pools/VFs using that VLAN ID have also
3324 : * been cleared. This will be indicated by "bits" being
3325 : * zero.
3326 : */
3327 0 : if (bits) {
3328 0 : IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3329 : (IXGBE_VLVF_VIEN | vlan));
3330 0 : if ((!vlan_on) && (vfta_changed != NULL)) {
3331 : /* someone wants to clear the vfta entry
3332 : * but some pools/VFs are still using it.
3333 : * Ignore it. */
3334 0 : *vfta_changed = FALSE;
3335 0 : }
3336 : } else
3337 0 : IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3338 0 : }
3339 :
3340 0 : return IXGBE_SUCCESS;
3341 0 : }
3342 :
3343 : /**
3344 : * ixgbe_clear_vfta_generic - Clear VLAN filter table
3345 : * @hw: pointer to hardware structure
3346 : *
3347 : * Clears the VLAN filer table, and the VMDq index associated with the filter
3348 : **/
3349 0 : int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3350 : {
3351 : uint32_t offset;
3352 :
3353 : DEBUGFUNC("ixgbe_clear_vfta_generic");
3354 :
3355 0 : for (offset = 0; offset < hw->mac.vft_size; offset++)
3356 0 : IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3357 :
3358 0 : for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3359 0 : IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3360 0 : IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3361 0 : IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3362 : }
3363 :
3364 0 : return IXGBE_SUCCESS;
3365 : }
3366 :
3367 : /**
3368 : * ixgbe_check_mac_link_generic - Determine link and speed status
3369 : * @hw: pointer to hardware structure
3370 : * @speed: pointer to link speed
3371 : * @link_up: TRUE when link is up
3372 : * @link_up_wait_to_complete: bool used to wait for link up or not
3373 : *
3374 : * Reads the links register to determine if link is up and the current speed
3375 : **/
3376 0 : int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3377 : bool *link_up, bool link_up_wait_to_complete)
3378 : {
3379 : uint32_t links_reg, links_orig;
3380 : uint32_t i;
3381 :
3382 : DEBUGFUNC("ixgbe_check_mac_link_generic");
3383 :
3384 : /* clear the old state */
3385 0 : links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3386 :
3387 0 : links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3388 :
3389 : if (links_orig != links_reg) {
3390 : DEBUGOUT2("LINKS changed from %08X to %08X\n",
3391 : links_orig, links_reg);
3392 : }
3393 :
3394 0 : if (link_up_wait_to_complete) {
3395 0 : for (i = 0; i < hw->mac.max_link_up_time; i++) {
3396 0 : if (links_reg & IXGBE_LINKS_UP) {
3397 0 : *link_up = TRUE;
3398 0 : break;
3399 : } else {
3400 0 : *link_up = FALSE;
3401 : }
3402 0 : msec_delay(100);
3403 0 : links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3404 : }
3405 : } else {
3406 0 : if (links_reg & IXGBE_LINKS_UP)
3407 0 : *link_up = TRUE;
3408 : else
3409 0 : *link_up = FALSE;
3410 : }
3411 :
3412 0 : switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3413 : case IXGBE_LINKS_SPEED_10G_82599:
3414 0 : *speed = IXGBE_LINK_SPEED_10GB_FULL;
3415 0 : if (hw->mac.type >= ixgbe_mac_X550) {
3416 0 : if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3417 0 : *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3418 : }
3419 : break;
3420 : case IXGBE_LINKS_SPEED_1G_82599:
3421 0 : *speed = IXGBE_LINK_SPEED_1GB_FULL;
3422 0 : break;
3423 : case IXGBE_LINKS_SPEED_100_82599:
3424 0 : *speed = IXGBE_LINK_SPEED_100_FULL;
3425 0 : if (hw->mac.type >= ixgbe_mac_X550) {
3426 0 : if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3427 0 : *speed = IXGBE_LINK_SPEED_5GB_FULL;
3428 : }
3429 : break;
3430 : default:
3431 0 : *speed = IXGBE_LINK_SPEED_UNKNOWN;
3432 0 : }
3433 :
3434 0 : return IXGBE_SUCCESS;
3435 : }
3436 :
3437 : /**
3438 : * ixgbe_get_device_caps_generic - Get additional device capabilities
3439 : * @hw: pointer to hardware structure
3440 : * @device_caps: the EEPROM word with the extra device capabilities
3441 : *
3442 : * This function will read the EEPROM location for the device capabilities,
3443 : * and return the word through device_caps.
3444 : **/
3445 0 : int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
3446 : {
3447 : DEBUGFUNC("ixgbe_get_device_caps_generic");
3448 :
3449 0 : hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3450 :
3451 0 : return IXGBE_SUCCESS;
3452 : }
3453 :
3454 : /**
3455 : * ixgbe_host_interface_command - Issue command to manageability block
3456 : * @hw: pointer to the HW structure
3457 : * @buffer: contains the command to write and where the return status will
3458 : * be placed
3459 : * @length: length of buffer, must be multiple of 4 bytes
3460 : * @timeout: time in ms to wait for command completion
3461 : * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
3462 : * Needed because FW structures are big endian and decoding of
3463 : * these fields can be 8 bit or 16 bit based on command. Decoding
3464 : * is not easily understood without making a table of commands.
3465 : * So we will leave this up to the caller to read back the data
3466 : * in these cases.
3467 : *
3468 : * Communicates with the manageability block. On success return IXGBE_SUCCESS
3469 : * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3470 : **/
3471 0 : int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
3472 : uint32_t length, uint32_t timeout,
3473 : bool return_data)
3474 : {
3475 : uint32_t hicr, i, bi, fwsts;
3476 : uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
3477 : uint16_t buf_len;
3478 : uint16_t dword_len;
3479 :
3480 : DEBUGFUNC("ixgbe_host_interface_command");
3481 :
3482 0 : if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3483 : DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
3484 0 : return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3485 : }
3486 : /* Set bit 9 of FWSTS clearing FW reset indication */
3487 0 : fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3488 0 : IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3489 :
3490 : /* Check that the host interface is enabled. */
3491 0 : hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3492 0 : if ((hicr & IXGBE_HICR_EN) == 0) {
3493 : DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3494 0 : return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3495 : }
3496 :
3497 : /* Calculate length in DWORDs. We must be DWORD aligned */
3498 0 : if ((length % (sizeof(uint32_t))) != 0) {
3499 : DEBUGOUT("Buffer length failure, not aligned to dword");
3500 0 : return IXGBE_ERR_INVALID_ARGUMENT;
3501 : }
3502 :
3503 0 : dword_len = length >> 2;
3504 :
3505 : /* The device driver writes the relevant command block
3506 : * into the ram area.
3507 : */
3508 0 : for (i = 0; i < dword_len; i++)
3509 0 : IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3510 : i, htole32(buffer[i]));
3511 :
3512 : /* Setting this bit tells the ARC that a new command is pending. */
3513 0 : IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3514 :
3515 0 : for (i = 0; i < timeout; i++) {
3516 0 : hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3517 0 : if (!(hicr & IXGBE_HICR_C))
3518 : break;
3519 0 : msec_delay(1);
3520 : }
3521 :
3522 : /* Check command completion */
3523 0 : if ((timeout != 0 && i == timeout) ||
3524 0 : !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3525 : DEBUGOUT("Command has failed with no status valid.\n");
3526 0 : return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3527 : }
3528 :
3529 0 : if (!return_data)
3530 0 : return 0;
3531 :
3532 : /* Calculate length in DWORDs */
3533 : dword_len = hdr_size >> 2;
3534 :
3535 : /* first pull in the header so we know the buffer length */
3536 0 : for (bi = 0; bi < dword_len; bi++) {
3537 0 : buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3538 : IXGBE_FLEX_MNG, bi));
3539 : }
3540 :
3541 : /* If there is any thing in data position pull it in */
3542 0 : buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3543 0 : if (buf_len == 0)
3544 0 : return 0;
3545 :
3546 0 : if (length < buf_len + hdr_size) {
3547 : DEBUGOUT("Buffer not large enough for reply message.\n");
3548 0 : return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3549 : }
3550 :
3551 : /* Calculate length in DWORDs, add 3 for odd lengths */
3552 0 : dword_len = (buf_len + 3) >> 2;
3553 :
3554 : /* Pull in the rest of the buffer (bi is where we left off) */
3555 0 : for (; bi <= dword_len; bi++) {
3556 0 : buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
3557 : IXGBE_FLEX_MNG, bi));
3558 : }
3559 :
3560 0 : return 0;
3561 0 : }
3562 :
3563 : /**
3564 : * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3565 : * @hw: pointer to the hardware structure
3566 : *
3567 : * The 82599 and x540 MACs can experience issues if TX work is still pending
3568 : * when a reset occurs. This function prevents this by flushing the PCIe
3569 : * buffers on the system.
3570 : **/
3571 0 : void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3572 : {
3573 : uint32_t gcr_ext, hlreg0, i, poll;
3574 : uint16_t value;
3575 :
3576 : /*
3577 : * If double reset is not requested then all transactions should
3578 : * already be clear and as such there is no work to do
3579 : */
3580 0 : if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3581 0 : return;
3582 :
3583 : /*
3584 : * Set loopback enable to prevent any transmits from being sent
3585 : * should the link come up. This assumes that the RXCTRL.RXEN bit
3586 : * has already been cleared.
3587 : */
3588 0 : hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3589 0 : IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3590 :
3591 : /* Wait for a last completion before clearing buffers */
3592 0 : IXGBE_WRITE_FLUSH(hw);
3593 0 : msec_delay(3);
3594 :
3595 : /*
3596 : * Before proceeding, make sure that the PCIe block does not have
3597 : * transactions pending.
3598 : */
3599 0 : poll = ixgbe_pcie_timeout_poll(hw);
3600 0 : for (i = 0; i < poll; i++) {
3601 0 : usec_delay(100);
3602 0 : value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3603 0 : if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3604 : goto out;
3605 : }
3606 :
3607 : out:
3608 : /* initiate cleaning flow for buffers in the PCIe transaction layer */
3609 0 : gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3610 0 : IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3611 : gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3612 :
3613 : /* Flush all writes and allow 20usec for all transactions to clear */
3614 0 : IXGBE_WRITE_FLUSH(hw);
3615 0 : usec_delay(20);
3616 :
3617 : /* restore previous register values */
3618 0 : IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3619 0 : IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3620 0 : }
3621 :
3622 0 : void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
3623 : {
3624 : uint32_t rxctrl;
3625 :
3626 0 : rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3627 0 : if (rxctrl & IXGBE_RXCTRL_RXEN) {
3628 0 : rxctrl &= ~IXGBE_RXCTRL_RXEN;
3629 0 : IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3630 0 : }
3631 0 : }
3632 :
3633 0 : void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
3634 : {
3635 : uint32_t rxctrl;
3636 :
3637 0 : rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3638 0 : IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
3639 0 : }
3640 :
3641 : /**
3642 : * ixgbe_mng_present - returns TRUE when management capability is present
3643 : * @hw: pointer to hardware structure
3644 : */
3645 0 : bool ixgbe_mng_present(struct ixgbe_hw *hw)
3646 : {
3647 : uint32_t fwsm;
3648 :
3649 0 : if (hw->mac.type < ixgbe_mac_82599EB)
3650 0 : return FALSE;
3651 :
3652 0 : fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3653 0 : fwsm &= IXGBE_FWSM_MODE_MASK;
3654 0 : return fwsm == IXGBE_FWSM_FW_MODE_PT;
3655 0 : }
3656 :
3657 : /**
3658 : * ixgbe_mng_enabled - Is the manageability engine enabled?
3659 : * @hw: pointer to hardware structure
3660 : *
3661 : * Returns TRUE if the manageability engine is enabled.
3662 : **/
3663 0 : bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
3664 : {
3665 : uint32_t fwsm, manc, factps;
3666 :
3667 0 : fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3668 0 : if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
3669 0 : return FALSE;
3670 :
3671 0 : manc = IXGBE_READ_REG(hw, IXGBE_MANC);
3672 0 : if (!(manc & IXGBE_MANC_RCV_TCO_EN))
3673 0 : return FALSE;
3674 :
3675 0 : if (hw->mac.type <= ixgbe_mac_X540) {
3676 0 : factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
3677 0 : if (factps & IXGBE_FACTPS_MNGCG)
3678 0 : return FALSE;
3679 : }
3680 :
3681 0 : return TRUE;
3682 0 : }
3683 :
3684 : /**
3685 : * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
3686 : * @hw: pointer to hardware structure
3687 : * @speed: new link speed
3688 : * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
3689 : *
3690 : * Set the link speed in the MAC and/or PHY register and restarts link.
3691 : **/
3692 0 : int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
3693 : ixgbe_link_speed speed,
3694 : bool autoneg_wait_to_complete)
3695 : {
3696 0 : ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3697 : ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3698 : int32_t status = IXGBE_SUCCESS;
3699 : uint32_t speedcnt = 0;
3700 : uint32_t i = 0;
3701 0 : bool autoneg, link_up = FALSE;
3702 :
3703 : DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
3704 :
3705 : /* Mask off requested but non-supported speeds */
3706 0 : if (!hw->mac.ops.get_link_capabilities)
3707 0 : return IXGBE_NOT_IMPLEMENTED;
3708 0 : status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
3709 0 : if (status != IXGBE_SUCCESS)
3710 0 : return status;
3711 :
3712 0 : speed &= link_speed;
3713 :
3714 : /* Try each speed one by one, highest priority first. We do this in
3715 : * software because 10Gb fiber doesn't support speed autonegotiation.
3716 : */
3717 0 : if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
3718 : speedcnt++;
3719 : highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
3720 :
3721 : /* If we already have link at this speed, just jump out */
3722 0 : status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3723 0 : if (status != IXGBE_SUCCESS)
3724 0 : return status;
3725 :
3726 0 : if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
3727 : goto out;
3728 :
3729 : /* Set the module link speed */
3730 0 : switch (hw->phy.media_type) {
3731 : case ixgbe_media_type_fiber_fixed:
3732 : case ixgbe_media_type_fiber:
3733 0 : if (hw->mac.ops.set_rate_select_speed)
3734 0 : hw->mac.ops.set_rate_select_speed(hw,
3735 : IXGBE_LINK_SPEED_10GB_FULL);
3736 : break;
3737 : case ixgbe_media_type_fiber_qsfp:
3738 : /* QSFP module automatically detects MAC link speed */
3739 : break;
3740 : default:
3741 : DEBUGOUT("Unexpected media type.\n");
3742 : break;
3743 : }
3744 :
3745 : /* Allow module to change analog characteristics (1G->10G) */
3746 0 : msec_delay(40);
3747 :
3748 0 : if (!hw->mac.ops.setup_mac_link)
3749 0 : return IXGBE_NOT_IMPLEMENTED;
3750 0 : status = hw->mac.ops.setup_mac_link(hw,
3751 : IXGBE_LINK_SPEED_10GB_FULL,
3752 : autoneg_wait_to_complete);
3753 0 : if (status != IXGBE_SUCCESS)
3754 0 : return status;
3755 :
3756 : /* Flap the Tx laser if it has not already been done */
3757 0 : ixgbe_flap_tx_laser(hw);
3758 :
3759 : /* Wait for the controller to acquire link. Per IEEE 802.3ap,
3760 : * Section 73.10.2, we may have to wait up to 500ms if KR is
3761 : * attempted. 82599 uses the same timing for 10g SFI.
3762 : */
3763 0 : for (i = 0; i < 5; i++) {
3764 : /* Wait for the link partner to also set speed */
3765 0 : msec_delay(100);
3766 :
3767 : /* If we have link, just jump out */
3768 0 : status = ixgbe_check_link(hw, &link_speed,
3769 : &link_up, FALSE);
3770 0 : if (status != IXGBE_SUCCESS)
3771 0 : return status;
3772 :
3773 0 : if (link_up)
3774 : goto out;
3775 : }
3776 : }
3777 :
3778 0 : if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
3779 0 : speedcnt++;
3780 0 : if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
3781 0 : highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
3782 :
3783 : /* If we already have link at this speed, just jump out */
3784 0 : status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3785 0 : if (status != IXGBE_SUCCESS)
3786 0 : return status;
3787 :
3788 0 : if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
3789 : goto out;
3790 :
3791 : /* Set the module link speed */
3792 0 : switch (hw->phy.media_type) {
3793 : case ixgbe_media_type_fiber_fixed:
3794 : case ixgbe_media_type_fiber:
3795 0 : if (hw->mac.ops.set_rate_select_speed)
3796 0 : hw->mac.ops.set_rate_select_speed(hw,
3797 : IXGBE_LINK_SPEED_1GB_FULL);
3798 : break;
3799 : case ixgbe_media_type_fiber_qsfp:
3800 : /* QSFP module automatically detects link speed */
3801 : break;
3802 : default:
3803 : DEBUGOUT("Unexpected media type.\n");
3804 : break;
3805 : }
3806 :
3807 : /* Allow module to change analog characteristics (10G->1G) */
3808 0 : msec_delay(40);
3809 :
3810 0 : if (!hw->mac.ops.setup_mac_link)
3811 0 : return IXGBE_NOT_IMPLEMENTED;
3812 0 : status = hw->mac.ops.setup_mac_link(hw,
3813 : IXGBE_LINK_SPEED_1GB_FULL,
3814 : autoneg_wait_to_complete);
3815 0 : if (status != IXGBE_SUCCESS)
3816 0 : return status;
3817 :
3818 : /* Flap the Tx laser if it has not already been done */
3819 0 : ixgbe_flap_tx_laser(hw);
3820 :
3821 : /* Wait for the link partner to also set speed */
3822 0 : msec_delay(100);
3823 :
3824 : /* If we have link, just jump out */
3825 0 : status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
3826 0 : if (status != IXGBE_SUCCESS)
3827 0 : return status;
3828 :
3829 0 : if (link_up)
3830 : goto out;
3831 : }
3832 :
3833 : /* We didn't get link. Configure back to the highest speed we tried,
3834 : * (if there was more than one). We call ourselves back with just the
3835 : * single highest speed that the user requested.
3836 : */
3837 0 : if (speedcnt > 1)
3838 0 : status = ixgbe_setup_mac_link_multispeed_fiber(hw,
3839 : highest_link_speed,
3840 : autoneg_wait_to_complete);
3841 :
3842 : out:
3843 : /* Set autoneg_advertised value based on input link speed */
3844 0 : hw->phy.autoneg_advertised = 0;
3845 :
3846 0 : if (speed & IXGBE_LINK_SPEED_10GB_FULL)
3847 0 : hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3848 :
3849 0 : if (speed & IXGBE_LINK_SPEED_1GB_FULL)
3850 0 : hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3851 :
3852 0 : return status;
3853 0 : }
3854 :
3855 : /**
3856 : * ixgbe_set_soft_rate_select_speed - Set module link speed
3857 : * @hw: pointer to hardware structure
3858 : * @speed: link speed to set
3859 : *
3860 : * Set module link speed via the soft rate select.
3861 : */
3862 0 : void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
3863 : ixgbe_link_speed speed)
3864 : {
3865 : int32_t status;
3866 0 : uint8_t rs, eeprom_data;
3867 :
3868 0 : switch (speed) {
3869 : case IXGBE_LINK_SPEED_10GB_FULL:
3870 : /* one bit mask same as setting on */
3871 : rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
3872 0 : break;
3873 : case IXGBE_LINK_SPEED_1GB_FULL:
3874 : rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
3875 0 : break;
3876 : default:
3877 : DEBUGOUT("Invalid fixed module speed\n");
3878 0 : return;
3879 : }
3880 :
3881 : /* Set RS0 */
3882 0 : status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
3883 : IXGBE_I2C_EEPROM_DEV_ADDR2,
3884 : &eeprom_data);
3885 0 : if (status) {
3886 : DEBUGOUT("Failed to read Rx Rate Select RS0\n");
3887 : goto out;
3888 : }
3889 :
3890 0 : eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
3891 :
3892 0 : status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
3893 : IXGBE_I2C_EEPROM_DEV_ADDR2,
3894 : eeprom_data);
3895 0 : if (status) {
3896 : DEBUGOUT("Failed to write Rx Rate Select RS0\n");
3897 : goto out;
3898 : }
3899 :
3900 : /* Set RS1 */
3901 0 : status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
3902 : IXGBE_I2C_EEPROM_DEV_ADDR2,
3903 : &eeprom_data);
3904 0 : if (status) {
3905 : DEBUGOUT("Failed to read Rx Rate Select RS1\n");
3906 : goto out;
3907 : }
3908 :
3909 0 : eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
3910 :
3911 0 : status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
3912 : IXGBE_I2C_EEPROM_DEV_ADDR2,
3913 : eeprom_data);
3914 : if (status) {
3915 : DEBUGOUT("Failed to write Rx Rate Select RS1\n");
3916 0 : goto out;
3917 : }
3918 : out:
3919 0 : return;
3920 0 : }
3921 :
3922 : /* MAC Operations */
3923 :
3924 : /**
3925 : * ixgbe_init_shared_code - Initialize the shared code
3926 : * @hw: pointer to hardware structure
3927 : *
3928 : * This will assign function pointers and assign the MAC type and PHY code.
3929 : * Does not touch the hardware. This function must be called prior to any
3930 : * other function in the shared code. The ixgbe_hw structure should be
3931 : * memset to 0 prior to calling this function. The following fields in
3932 : * hw structure should be filled in prior to calling this function:
3933 : * hw_addr, back, device_id, vendor_id, subsystem_device_id,
3934 : * subsystem_vendor_id, and revision_id
3935 : **/
3936 0 : int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
3937 : {
3938 : int32_t status;
3939 :
3940 : DEBUGFUNC("ixgbe_init_shared_code");
3941 :
3942 : /*
3943 : * Set the mac type
3944 : */
3945 0 : ixgbe_set_mac_type(hw);
3946 :
3947 0 : switch (hw->mac.type) {
3948 : case ixgbe_mac_82598EB:
3949 0 : status = ixgbe_init_ops_82598(hw);
3950 0 : break;
3951 : case ixgbe_mac_82599EB:
3952 0 : status = ixgbe_init_ops_82599(hw);
3953 0 : break;
3954 : case ixgbe_mac_X540:
3955 0 : status = ixgbe_init_ops_X540(hw);
3956 0 : break;
3957 : case ixgbe_mac_X550:
3958 0 : status = ixgbe_init_ops_X550(hw);
3959 0 : break;
3960 : case ixgbe_mac_X550EM_x:
3961 0 : status = ixgbe_init_ops_X550EM(hw);
3962 0 : break;
3963 : default:
3964 : status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3965 0 : break;
3966 : }
3967 0 : hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
3968 :
3969 0 : return status;
3970 : }
3971 :
3972 : /**
3973 : * ixgbe_set_mac_type - Sets MAC type
3974 : * @hw: pointer to the HW structure
3975 : *
3976 : * This function sets the mac type of the adapter based on the
3977 : * vendor ID and device ID stored in the hw structure.
3978 : **/
3979 0 : int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
3980 : {
3981 : int32_t ret_val = IXGBE_SUCCESS;
3982 :
3983 : DEBUGFUNC("ixgbe_set_mac_type\n");
3984 :
3985 0 : if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID)
3986 0 : return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
3987 :
3988 0 : switch (hw->device_id) {
3989 : case IXGBE_DEV_ID_82598:
3990 : case IXGBE_DEV_ID_82598_BX:
3991 : case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
3992 : case IXGBE_DEV_ID_82598AF_DUAL_PORT:
3993 : case IXGBE_DEV_ID_82598AT:
3994 : case IXGBE_DEV_ID_82598AT2:
3995 : case IXGBE_DEV_ID_82598AT_DUAL_PORT:
3996 : case IXGBE_DEV_ID_82598EB_CX4:
3997 : case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
3998 : case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
3999 : case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
4000 : case IXGBE_DEV_ID_82598EB_XF_LR:
4001 : case IXGBE_DEV_ID_82598EB_SFP_LOM:
4002 0 : hw->mac.type = ixgbe_mac_82598EB;
4003 0 : break;
4004 : case IXGBE_DEV_ID_82599_KX4:
4005 : case IXGBE_DEV_ID_82599_KX4_MEZZ:
4006 : case IXGBE_DEV_ID_82599_XAUI_LOM:
4007 : case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
4008 : case IXGBE_DEV_ID_82599_KR:
4009 : case IXGBE_DEV_ID_82599_SFP:
4010 : case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
4011 : case IXGBE_DEV_ID_82599_SFP_FCOE:
4012 : case IXGBE_DEV_ID_82599_SFP_EM:
4013 : case IXGBE_DEV_ID_82599_SFP_SF2:
4014 : case IXGBE_DEV_ID_82599_SFP_SF_QP:
4015 : case IXGBE_DEV_ID_82599_QSFP_SF_QP:
4016 : case IXGBE_DEV_ID_82599EN_SFP:
4017 : case IXGBE_DEV_ID_82599_CX4:
4018 : case IXGBE_DEV_ID_82599_BYPASS:
4019 : case IXGBE_DEV_ID_82599_T3_LOM:
4020 0 : hw->mac.type = ixgbe_mac_82599EB;
4021 0 : break;
4022 : case IXGBE_DEV_ID_82599_VF:
4023 : case IXGBE_DEV_ID_82599_VF_HV:
4024 0 : hw->mac.type = ixgbe_mac_82599_vf;
4025 0 : break;
4026 : case IXGBE_DEV_ID_X540_VF:
4027 : case IXGBE_DEV_ID_X540_VF_HV:
4028 0 : hw->mac.type = ixgbe_mac_X540_vf;
4029 0 : break;
4030 : case IXGBE_DEV_ID_X540T:
4031 : case IXGBE_DEV_ID_X540T1:
4032 : case IXGBE_DEV_ID_X540_BYPASS:
4033 0 : hw->mac.type = ixgbe_mac_X540;
4034 0 : break;
4035 : case IXGBE_DEV_ID_X550T:
4036 : case IXGBE_DEV_ID_X550T1:
4037 0 : hw->mac.type = ixgbe_mac_X550;
4038 0 : break;
4039 : case IXGBE_DEV_ID_X550EM_X_KX4:
4040 : case IXGBE_DEV_ID_X550EM_X_KR:
4041 : case IXGBE_DEV_ID_X550EM_X_10G_T:
4042 : case IXGBE_DEV_ID_X550EM_X_1G_T:
4043 : case IXGBE_DEV_ID_X550EM_X_SFP:
4044 0 : hw->mac.type = ixgbe_mac_X550EM_x;
4045 0 : break;
4046 : case IXGBE_DEV_ID_X550_VF:
4047 : case IXGBE_DEV_ID_X550_VF_HV:
4048 0 : hw->mac.type = ixgbe_mac_X550_vf;
4049 0 : break;
4050 : case IXGBE_DEV_ID_X550EM_X_VF:
4051 : case IXGBE_DEV_ID_X550EM_X_VF_HV:
4052 0 : hw->mac.type = ixgbe_mac_X550EM_x_vf;
4053 0 : break;
4054 : default:
4055 : ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
4056 0 : break;
4057 : }
4058 :
4059 0 : return ret_val;
4060 0 : }
4061 :
4062 : /**
4063 : * ixgbe_init_hw - Initialize the hardware
4064 : * @hw: pointer to hardware structure
4065 : *
4066 : * Initialize the hardware by resetting and then starting the hardware
4067 : **/
4068 0 : int32_t ixgbe_init_hw(struct ixgbe_hw *hw)
4069 : {
4070 0 : if (hw->mac.ops.init_hw)
4071 0 : return hw->mac.ops.init_hw(hw);
4072 : else
4073 0 : return IXGBE_NOT_IMPLEMENTED;
4074 0 : }
4075 :
4076 : /**
4077 : * ixgbe_get_media_type - Get media type
4078 : * @hw: pointer to hardware structure
4079 : *
4080 : * Returns the media type (fiber, copper, backplane)
4081 : **/
4082 0 : enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
4083 : {
4084 0 : if (hw->mac.ops.get_media_type)
4085 0 : return hw->mac.ops.get_media_type(hw);
4086 : else
4087 0 : return ixgbe_media_type_unknown;
4088 0 : }
4089 :
4090 : /**
4091 : * ixgbe_identify_phy - Get PHY type
4092 : * @hw: pointer to hardware structure
4093 : *
4094 : * Determines the physical layer module found on the current adapter.
4095 : **/
4096 0 : int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
4097 : {
4098 : int32_t status = IXGBE_SUCCESS;
4099 :
4100 0 : if (hw->phy.type == ixgbe_phy_unknown) {
4101 0 : if (hw->phy.ops.identify)
4102 0 : status = hw->phy.ops.identify(hw);
4103 : else
4104 : status = IXGBE_NOT_IMPLEMENTED;
4105 : }
4106 :
4107 0 : return status;
4108 : }
4109 :
4110 : /**
4111 : * ixgbe_check_link - Get link and speed status
4112 : * @hw: pointer to hardware structure
4113 : *
4114 : * Reads the links register to determine if link is up and the current speed
4115 : **/
4116 0 : int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4117 : bool *link_up, bool link_up_wait_to_complete)
4118 : {
4119 0 : if (hw->mac.ops.check_link)
4120 0 : return hw->mac.ops.check_link(hw, speed, link_up,
4121 : link_up_wait_to_complete);
4122 : else
4123 0 : return IXGBE_NOT_IMPLEMENTED;
4124 0 : }
4125 :
4126 : /**
4127 : * ixgbe_flap_tx_laser - flap Tx laser to start autotry process
4128 : * @hw: pointer to hardware structure
4129 : *
4130 : * When the driver changes the link speeds that it can support then
4131 : * flap the tx laser to alert the link partner to start autotry
4132 : * process on its end.
4133 : **/
4134 0 : void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
4135 : {
4136 0 : if (hw->mac.ops.flap_tx_laser)
4137 0 : hw->mac.ops.flap_tx_laser(hw);
4138 0 : }
4139 :
4140 : /**
4141 : * ixgbe_set_rar - Set Rx address register
4142 : * @hw: pointer to hardware structure
4143 : * @index: Receive address register to write
4144 : * @addr: Address to put into receive address register
4145 : * @vmdq: VMDq "set"
4146 : * @enable_addr: set flag that address is active
4147 : *
4148 : * Puts an ethernet address into a receive address register.
4149 : **/
4150 0 : int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
4151 : uint32_t vmdq, uint32_t enable_addr)
4152 : {
4153 0 : if (hw->mac.ops.set_rar)
4154 0 : return hw->mac.ops.set_rar(hw, index, addr, vmdq, enable_addr);
4155 : else
4156 0 : return IXGBE_NOT_IMPLEMENTED;
4157 0 : }
4158 :
4159 : /**
4160 : * ixgbe_set_vmdq - Associate a VMDq index with a receive address
4161 : * @hw: pointer to hardware structure
4162 : * @rar: receive address register index to associate with VMDq index
4163 : * @vmdq: VMDq set or pool index
4164 : **/
4165 0 : int32_t ixgbe_set_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4166 : {
4167 0 : if (hw->mac.ops.set_vmdq)
4168 0 : return hw->mac.ops.set_vmdq(hw, rar, vmdq);
4169 : else
4170 0 : return IXGBE_NOT_IMPLEMENTED;
4171 0 : }
4172 :
4173 : /**
4174 : * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
4175 : * @hw: pointer to hardware structure
4176 : * @rar: receive address register index to disassociate with VMDq index
4177 : * @vmdq: VMDq set or pool index
4178 : **/
4179 0 : int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
4180 : {
4181 0 : if (hw->mac.ops.clear_vmdq)
4182 0 : return hw->mac.ops.clear_vmdq(hw, rar, vmdq);
4183 : else
4184 0 : return IXGBE_NOT_IMPLEMENTED;
4185 0 : }
4186 :
4187 : /**
4188 : * ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
4189 : * @hw: pointer to hardware structure
4190 : *
4191 : * Initializes the Unicast Table Arrays to zero on device load. This
4192 : * is part of the Rx init addr execution path.
4193 : **/
4194 0 : int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw)
4195 : {
4196 0 : if (hw->mac.ops.init_uta_tables)
4197 0 : return hw->mac.ops.init_uta_tables(hw);
4198 : else
4199 0 : return IXGBE_NOT_IMPLEMENTED;
4200 0 : }
4201 :
4202 0 : void ixgbe_disable_rx(struct ixgbe_hw *hw)
4203 : {
4204 0 : if (hw->mac.ops.disable_rx)
4205 0 : hw->mac.ops.disable_rx(hw);
4206 0 : }
4207 :
4208 0 : void ixgbe_enable_rx(struct ixgbe_hw *hw)
4209 : {
4210 0 : if (hw->mac.ops.enable_rx)
4211 0 : hw->mac.ops.enable_rx(hw);
4212 0 : }
4213 :
4214 : /*
4215 : * MBX: Mailbox handling
4216 : */
4217 :
4218 : /**
4219 : * ixgbe_read_mbx - Reads a message from the mailbox
4220 : * @hw: pointer to the HW structure
4221 : * @msg: The message buffer
4222 : * @size: Length of buffer
4223 : * @mbx_id: id of mailbox to read
4224 : *
4225 : * returns SUCCESS if it successfully read message from buffer
4226 : **/
4227 0 : int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4228 : {
4229 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4230 : int32_t ret_val = IXGBE_ERR_MBX;
4231 :
4232 : DEBUGFUNC("ixgbe_read_mbx");
4233 :
4234 : /* limit read to size of mailbox */
4235 0 : if (size > mbx->size)
4236 0 : size = mbx->size;
4237 :
4238 0 : if (mbx->ops.read)
4239 0 : ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4240 :
4241 0 : return ret_val;
4242 : }
4243 :
4244 : /**
4245 : * ixgbe_write_mbx - Write a message to the mailbox
4246 : * @hw: pointer to the HW structure
4247 : * @msg: The message buffer
4248 : * @size: Length of buffer
4249 : * @mbx_id: id of mailbox to write
4250 : *
4251 : * returns SUCCESS if it successfully copied message into the buffer
4252 : **/
4253 0 : int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4254 : {
4255 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4256 : int32_t ret_val = IXGBE_SUCCESS;
4257 :
4258 : DEBUGFUNC("ixgbe_write_mbx");
4259 :
4260 0 : if (size > mbx->size)
4261 0 : ret_val = IXGBE_ERR_MBX;
4262 :
4263 0 : else if (mbx->ops.write)
4264 0 : ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4265 :
4266 0 : return ret_val;
4267 : }
4268 :
4269 : /**
4270 : * ixgbe_check_for_msg - checks to see if someone sent us mail
4271 : * @hw: pointer to the HW structure
4272 : * @mbx_id: id of mailbox to check
4273 : *
4274 : * returns SUCCESS if the Status bit was found or else ERR_MBX
4275 : **/
4276 0 : int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4277 : {
4278 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4279 : int32_t ret_val = IXGBE_ERR_MBX;
4280 :
4281 : DEBUGFUNC("ixgbe_check_for_msg");
4282 :
4283 0 : if (mbx->ops.check_for_msg)
4284 0 : ret_val = mbx->ops.check_for_msg(hw, mbx_id);
4285 :
4286 0 : return ret_val;
4287 : }
4288 :
4289 : /**
4290 : * ixgbe_check_for_ack - checks to see if someone sent us ACK
4291 : * @hw: pointer to the HW structure
4292 : * @mbx_id: id of mailbox to check
4293 : *
4294 : * returns SUCCESS if the Status bit was found or else ERR_MBX
4295 : **/
4296 0 : int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4297 : {
4298 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4299 : int32_t ret_val = IXGBE_ERR_MBX;
4300 :
4301 : DEBUGFUNC("ixgbe_check_for_ack");
4302 :
4303 0 : if (mbx->ops.check_for_ack)
4304 0 : ret_val = mbx->ops.check_for_ack(hw, mbx_id);
4305 :
4306 0 : return ret_val;
4307 : }
4308 :
4309 : /**
4310 : * ixgbe_check_for_rst - checks to see if other side has reset
4311 : * @hw: pointer to the HW structure
4312 : * @mbx_id: id of mailbox to check
4313 : *
4314 : * returns SUCCESS if the Status bit was found or else ERR_MBX
4315 : **/
4316 0 : int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
4317 : {
4318 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4319 : int32_t ret_val = IXGBE_ERR_MBX;
4320 :
4321 : DEBUGFUNC("ixgbe_check_for_rst");
4322 :
4323 0 : if (mbx->ops.check_for_rst)
4324 0 : ret_val = mbx->ops.check_for_rst(hw, mbx_id);
4325 :
4326 0 : return ret_val;
4327 : }
4328 :
4329 : /**
4330 : * ixgbe_poll_for_msg - Wait for message notification
4331 : * @hw: pointer to the HW structure
4332 : * @mbx_id: id of mailbox to write
4333 : *
4334 : * returns SUCCESS if it successfully received a message notification
4335 : **/
4336 0 : int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
4337 : {
4338 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4339 0 : int countdown = mbx->timeout;
4340 :
4341 : DEBUGFUNC("ixgbe_poll_for_msg");
4342 :
4343 0 : if (!countdown || !mbx->ops.check_for_msg)
4344 : goto out;
4345 :
4346 0 : while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
4347 0 : countdown--;
4348 0 : if (!countdown)
4349 : break;
4350 0 : usec_delay(mbx->usec_delay);
4351 : }
4352 :
4353 : out:
4354 0 : return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4355 : }
4356 :
4357 : /**
4358 : * ixgbe_poll_for_ack - Wait for message acknowledgement
4359 : * @hw: pointer to the HW structure
4360 : * @mbx_id: id of mailbox to write
4361 : *
4362 : * returns SUCCESS if it successfully received a message acknowledgement
4363 : **/
4364 0 : int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
4365 : {
4366 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4367 0 : int countdown = mbx->timeout;
4368 :
4369 : DEBUGFUNC("ixgbe_poll_for_ack");
4370 :
4371 0 : if (!countdown || !mbx->ops.check_for_ack)
4372 : goto out;
4373 :
4374 0 : while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
4375 0 : countdown--;
4376 0 : if (!countdown)
4377 : break;
4378 0 : usec_delay(mbx->usec_delay);
4379 : }
4380 :
4381 : out:
4382 0 : return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
4383 : }
4384 :
4385 : /**
4386 : * ixgbe_read_posted_mbx - Wait for message notification and receive message
4387 : * @hw: pointer to the HW structure
4388 : * @msg: The message buffer
4389 : * @size: Length of buffer
4390 : * @mbx_id: id of mailbox to write
4391 : *
4392 : * returns SUCCESS if it successfully received a message notification and
4393 : * copied it into the receive buffer.
4394 : **/
4395 0 : int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
4396 : {
4397 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4398 : int32_t ret_val = IXGBE_ERR_MBX;
4399 :
4400 : DEBUGFUNC("ixgbe_read_posted_mbx");
4401 :
4402 0 : if (!mbx->ops.read)
4403 : goto out;
4404 :
4405 0 : ret_val = ixgbe_poll_for_msg(hw, mbx_id);
4406 :
4407 : /* if ack received read message, otherwise we timed out */
4408 0 : if (!ret_val)
4409 0 : ret_val = mbx->ops.read(hw, msg, size, mbx_id);
4410 : out:
4411 0 : return ret_val;
4412 : }
4413 :
4414 : /**
4415 : * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
4416 : * @hw: pointer to the HW structure
4417 : * @msg: The message buffer
4418 : * @size: Length of buffer
4419 : * @mbx_id: id of mailbox to write
4420 : *
4421 : * returns SUCCESS if it successfully copied message into the buffer and
4422 : * received an ack to that message within delay * timeout period
4423 : **/
4424 0 : int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4425 : uint16_t mbx_id)
4426 : {
4427 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4428 : int32_t ret_val = IXGBE_ERR_MBX;
4429 :
4430 : DEBUGFUNC("ixgbe_write_posted_mbx");
4431 :
4432 : /* exit if either we can't write or there isn't a defined timeout */
4433 0 : if (!mbx->ops.write || !mbx->timeout)
4434 : goto out;
4435 :
4436 : /* send msg */
4437 0 : ret_val = mbx->ops.write(hw, msg, size, mbx_id);
4438 :
4439 : /* if msg sent wait until we receive an ack */
4440 0 : if (!ret_val)
4441 0 : ret_val = ixgbe_poll_for_ack(hw, mbx_id);
4442 : out:
4443 0 : return ret_val;
4444 : }
4445 :
4446 : /**
4447 : * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
4448 : * @hw: pointer to the HW structure
4449 : *
4450 : * Setups up the mailbox read and write message function pointers
4451 : **/
4452 0 : void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
4453 : {
4454 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4455 :
4456 0 : mbx->ops.read_posted = ixgbe_read_posted_mbx;
4457 0 : mbx->ops.write_posted = ixgbe_write_posted_mbx;
4458 0 : }
4459 :
4460 : /**
4461 : * ixgbe_read_v2p_mailbox - read v2p mailbox
4462 : * @hw: pointer to the HW structure
4463 : *
4464 : * This function is used to read the v2p mailbox without losing the read to
4465 : * clear status bits.
4466 : **/
4467 0 : uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
4468 : {
4469 0 : uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
4470 :
4471 0 : v2p_mailbox |= hw->mbx.v2p_mailbox;
4472 0 : hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
4473 :
4474 0 : return v2p_mailbox;
4475 : }
4476 :
4477 0 : int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
4478 : {
4479 0 : uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
4480 : int32_t ret_val = IXGBE_ERR_MBX;
4481 :
4482 0 : if (mbvficr & mask) {
4483 : ret_val = IXGBE_SUCCESS;
4484 0 : IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
4485 0 : }
4486 :
4487 0 : return ret_val;
4488 : }
4489 :
4490 : /**
4491 : * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
4492 : * @hw: pointer to the HW structure
4493 : * @vf_number: the VF index
4494 : *
4495 : * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4496 : **/
4497 0 : int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4498 : {
4499 : int32_t ret_val = IXGBE_ERR_MBX;
4500 0 : int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4501 0 : uint32_t vf_bit = vf_number % 16;
4502 :
4503 : DEBUGFUNC("ixgbe_check_for_msg_pf");
4504 :
4505 0 : if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
4506 : index)) {
4507 : ret_val = IXGBE_SUCCESS;
4508 0 : hw->mbx.stats.reqs++;
4509 0 : }
4510 :
4511 0 : return ret_val;
4512 : }
4513 :
4514 : /**
4515 : * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
4516 : * @hw: pointer to the HW structure
4517 : * @vf_number: the VF index
4518 : *
4519 : * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4520 : **/
4521 0 : int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4522 : {
4523 : int32_t ret_val = IXGBE_ERR_MBX;
4524 0 : int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
4525 0 : uint32_t vf_bit = vf_number % 16;
4526 :
4527 : DEBUGFUNC("ixgbe_check_for_ack_pf");
4528 :
4529 0 : if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
4530 : index)) {
4531 : ret_val = IXGBE_SUCCESS;
4532 0 : hw->mbx.stats.acks++;
4533 0 : }
4534 :
4535 0 : return ret_val;
4536 : }
4537 :
4538 : /**
4539 : * ixgbe_check_for_rst_pf - checks to see if the VF has reset
4540 : * @hw: pointer to the HW structure
4541 : * @vf_number: the VF index
4542 : *
4543 : * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
4544 : **/
4545 0 : int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4546 : {
4547 0 : uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
4548 0 : uint32_t vf_shift = vf_number % 32;
4549 : uint32_t vflre = 0;
4550 : int32_t ret_val = IXGBE_ERR_MBX;
4551 :
4552 : DEBUGFUNC("ixgbe_check_for_rst_pf");
4553 :
4554 0 : switch (hw->mac.type) {
4555 : case ixgbe_mac_82599EB:
4556 0 : vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
4557 0 : break;
4558 : case ixgbe_mac_X550:
4559 : case ixgbe_mac_X550EM_x:
4560 : case ixgbe_mac_X540:
4561 0 : vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
4562 0 : break;
4563 : default:
4564 : break;
4565 : }
4566 :
4567 0 : if (vflre & (1 << vf_shift)) {
4568 : ret_val = IXGBE_SUCCESS;
4569 0 : IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
4570 0 : hw->mbx.stats.rsts++;
4571 0 : }
4572 :
4573 0 : return ret_val;
4574 : }
4575 :
4576 : /**
4577 : * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
4578 : * @hw: pointer to the HW structure
4579 : * @vf_number: the VF index
4580 : *
4581 : * return SUCCESS if we obtained the mailbox lock
4582 : **/
4583 0 : int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
4584 : {
4585 : int32_t ret_val = IXGBE_ERR_MBX;
4586 : uint32_t p2v_mailbox;
4587 :
4588 : DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
4589 :
4590 : /* Take ownership of the buffer */
4591 0 : IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
4592 :
4593 : /* reserve mailbox for vf use */
4594 0 : p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
4595 0 : if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
4596 0 : ret_val = IXGBE_SUCCESS;
4597 :
4598 0 : return ret_val;
4599 : }
4600 :
4601 : /**
4602 : * ixgbe_write_mbx_pf - Places a message in the mailbox
4603 : * @hw: pointer to the HW structure
4604 : * @msg: The message buffer
4605 : * @size: Length of buffer
4606 : * @vf_number: the VF index
4607 : *
4608 : * returns SUCCESS if it successfully copied message into the buffer
4609 : **/
4610 0 : int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4611 : uint16_t vf_number)
4612 : {
4613 : int32_t ret_val;
4614 : uint16_t i;
4615 :
4616 : DEBUGFUNC("ixgbe_write_mbx_pf");
4617 :
4618 : /* lock the mailbox to prevent pf/vf race condition */
4619 0 : ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4620 0 : if (ret_val)
4621 : goto out_no_write;
4622 :
4623 : /* flush msg and acks as we are overwriting the message buffer */
4624 0 : ixgbe_check_for_msg_pf(hw, vf_number);
4625 0 : ixgbe_check_for_ack_pf(hw, vf_number);
4626 :
4627 : /* copy the caller specified message to the mailbox memory buffer */
4628 0 : for (i = 0; i < size; i++)
4629 0 : IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
4630 :
4631 : /* Interrupt VF to tell it a message has been sent and release buffer*/
4632 0 : IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
4633 :
4634 : /* update stats */
4635 0 : hw->mbx.stats.msgs_tx++;
4636 :
4637 : out_no_write:
4638 0 : return ret_val;
4639 :
4640 : }
4641 :
4642 : /**
4643 : * ixgbe_read_mbx_pf - Read a message from the mailbox
4644 : * @hw: pointer to the HW structure
4645 : * @msg: The message buffer
4646 : * @size: Length of buffer
4647 : * @vf_number: the VF index
4648 : *
4649 : * This function copies a message from the mailbox buffer to the caller's
4650 : * memory buffer. The presumption is that the caller knows that there was
4651 : * a message due to a VF request so no polling for message is needed.
4652 : **/
4653 0 : int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
4654 : uint16_t vf_number)
4655 : {
4656 : int32_t ret_val;
4657 : uint16_t i;
4658 :
4659 : DEBUGFUNC("ixgbe_read_mbx_pf");
4660 :
4661 : /* lock the mailbox to prevent pf/vf race condition */
4662 0 : ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
4663 0 : if (ret_val)
4664 : goto out_no_read;
4665 :
4666 : /* copy the message to the mailbox memory buffer */
4667 0 : for (i = 0; i < size; i++)
4668 0 : msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
4669 :
4670 : /* Acknowledge the message and release buffer */
4671 0 : IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
4672 :
4673 : /* update stats */
4674 0 : hw->mbx.stats.msgs_rx++;
4675 :
4676 : out_no_read:
4677 0 : return ret_val;
4678 : }
4679 :
4680 : /**
4681 : * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
4682 : * @hw: pointer to the HW structure
4683 : *
4684 : * Initializes the hw->mbx struct to correct values for pf mailbox
4685 : */
4686 0 : void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
4687 : {
4688 0 : struct ixgbe_mbx_info *mbx = &hw->mbx;
4689 :
4690 0 : if (hw->mac.type != ixgbe_mac_82599EB &&
4691 0 : hw->mac.type != ixgbe_mac_X550 &&
4692 0 : hw->mac.type != ixgbe_mac_X550EM_x &&
4693 0 : hw->mac.type != ixgbe_mac_X540)
4694 0 : return;
4695 :
4696 0 : mbx->timeout = 0;
4697 0 : mbx->usec_delay = 0;
4698 :
4699 0 : mbx->size = IXGBE_VFMAILBOX_SIZE;
4700 :
4701 0 : mbx->ops.read = ixgbe_read_mbx_pf;
4702 0 : mbx->ops.write = ixgbe_write_mbx_pf;
4703 0 : mbx->ops.read_posted = ixgbe_read_posted_mbx;
4704 0 : mbx->ops.write_posted = ixgbe_write_posted_mbx;
4705 0 : mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
4706 0 : mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
4707 0 : mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
4708 :
4709 0 : mbx->stats.msgs_tx = 0;
4710 0 : mbx->stats.msgs_rx = 0;
4711 0 : mbx->stats.reqs = 0;
4712 0 : mbx->stats.acks = 0;
4713 0 : mbx->stats.rsts = 0;
4714 0 : }
|