Line data Source code
1 : /* $OpenBSD: if_iwm.c,v 1.231 2018/08/13 15:05:31 stsp Exp $ */
2 :
3 : /*
4 : * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 : * Author: Stefan Sperling <stsp@openbsd.org>
6 : * Copyright (c) 2014 Fixup Software Ltd.
7 : * Copyright (c) 2017 Stefan Sperling <stsp@openbsd.org>
8 : *
9 : * Permission to use, copy, modify, and distribute this software for any
10 : * purpose with or without fee is hereby granted, provided that the above
11 : * copyright notice and this permission notice appear in all copies.
12 : *
13 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 : */
21 :
22 : /*-
23 : * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 : * which were used as the reference documentation for this implementation.
25 : *
26 : ***********************************************************************
27 : *
28 : * This file is provided under a dual BSD/GPLv2 license. When using or
29 : * redistributing this file, you may do so under either license.
30 : *
31 : * GPL LICENSE SUMMARY
32 : *
33 : * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 : * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 : * Copyright(c) 2016 Intel Deutschland GmbH
36 : *
37 : * This program is free software; you can redistribute it and/or modify
38 : * it under the terms of version 2 of the GNU General Public License as
39 : * published by the Free Software Foundation.
40 : *
41 : * This program is distributed in the hope that it will be useful, but
42 : * WITHOUT ANY WARRANTY; without even the implied warranty of
43 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 : * General Public License for more details.
45 : *
46 : * You should have received a copy of the GNU General Public License
47 : * along with this program; if not, write to the Free Software
48 : * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 : * USA
50 : *
51 : * The full GNU General Public License is included in this distribution
52 : * in the file called COPYING.
53 : *
54 : * Contact Information:
55 : * Intel Linux Wireless <ilw@linux.intel.com>
56 : * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 : *
58 : *
59 : * BSD LICENSE
60 : *
61 : * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 : * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 : * Copyright(c) 2016 Intel Deutschland GmbH
64 : * All rights reserved.
65 : *
66 : * Redistribution and use in source and binary forms, with or without
67 : * modification, are permitted provided that the following conditions
68 : * are met:
69 : *
70 : * * Redistributions of source code must retain the above copyright
71 : * notice, this list of conditions and the following disclaimer.
72 : * * Redistributions in binary form must reproduce the above copyright
73 : * notice, this list of conditions and the following disclaimer in
74 : * the documentation and/or other materials provided with the
75 : * distribution.
76 : * * Neither the name Intel Corporation nor the names of its
77 : * contributors may be used to endorse or promote products derived
78 : * from this software without specific prior written permission.
79 : *
80 : * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 : * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 : * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 : * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 : * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 : * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 : * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 : * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 : * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 : * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 : * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 : */
92 :
93 : /*-
94 : * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
95 : *
96 : * Permission to use, copy, modify, and distribute this software for any
97 : * purpose with or without fee is hereby granted, provided that the above
98 : * copyright notice and this permission notice appear in all copies.
99 : *
100 : * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 : * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 : * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 : * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 : * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 : * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 : * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 : */
108 :
109 : #include "bpfilter.h"
110 :
111 : #include <sys/param.h>
112 : #include <sys/conf.h>
113 : #include <sys/kernel.h>
114 : #include <sys/malloc.h>
115 : #include <sys/mbuf.h>
116 : #include <sys/mutex.h>
117 : #include <sys/proc.h>
118 : #include <sys/rwlock.h>
119 : #include <sys/socket.h>
120 : #include <sys/sockio.h>
121 : #include <sys/systm.h>
122 : #include <sys/endian.h>
123 :
124 : #include <sys/refcnt.h>
125 : #include <sys/task.h>
126 : #include <machine/bus.h>
127 : #include <machine/intr.h>
128 :
129 : #include <dev/pci/pcireg.h>
130 : #include <dev/pci/pcivar.h>
131 : #include <dev/pci/pcidevs.h>
132 :
133 : #if NBPFILTER > 0
134 : #include <net/bpf.h>
135 : #endif
136 : #include <net/if.h>
137 : #include <net/if_dl.h>
138 : #include <net/if_media.h>
139 :
140 : #include <netinet/in.h>
141 : #include <netinet/if_ether.h>
142 :
143 : #include <net80211/ieee80211_var.h>
144 : #include <net80211/ieee80211_amrr.h>
145 : #include <net80211/ieee80211_mira.h>
146 : #include <net80211/ieee80211_radiotap.h>
147 :
148 : #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
149 :
150 : #define IC2IFP(_ic_) (&(_ic_)->ic_if)
151 :
152 : #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 : #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154 :
155 : #ifdef IWM_DEBUG
156 : #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
157 : #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
158 : int iwm_debug = 1;
159 : #else
160 : #define DPRINTF(x) do { ; } while (0)
161 : #define DPRINTFN(n, x) do { ; } while (0)
162 : #endif
163 :
164 : #include <dev/pci/if_iwmreg.h>
165 : #include <dev/pci/if_iwmvar.h>
166 :
167 : const uint8_t iwm_nvm_channels[] = {
168 : /* 2.4 GHz */
169 : 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 : /* 5 GHz */
171 : 36, 40, 44 , 48, 52, 56, 60, 64,
172 : 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 : 149, 153, 157, 161, 165
174 : };
175 :
176 : const uint8_t iwm_nvm_channels_8000[] = {
177 : /* 2.4 GHz */
178 : 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179 : /* 5 GHz */
180 : 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
181 : 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182 : 149, 153, 157, 161, 165, 169, 173, 177, 181
183 : };
184 :
185 : #define IWM_NUM_2GHZ_CHANNELS 14
186 :
187 : const struct iwm_rate {
188 : uint16_t rate;
189 : uint8_t plcp;
190 : uint8_t ht_plcp;
191 : } iwm_rates[] = {
192 : /* Legacy */ /* HT */
193 : { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 : { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 : { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 : { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 : { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
198 : { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
199 : { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
200 : { 26, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_8_PLCP },
201 : { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
202 : { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
203 : { 52, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_9_PLCP },
204 : { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
205 : { 78, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_10_PLCP },
206 : { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
207 : { 104, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_11_PLCP },
208 : { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
209 : { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
210 : { 156, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_12_PLCP },
211 : { 208, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_13_PLCP },
212 : { 234, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_14_PLCP },
213 : { 260, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_15_PLCP },
214 : };
215 : #define IWM_RIDX_CCK 0
216 : #define IWM_RIDX_OFDM 4
217 : #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
218 : #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
219 : #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
220 :
221 : /* Convert an MCS index into an iwm_rates[] index. */
222 : const int iwm_mcs2ridx[] = {
223 : IWM_RATE_MCS_0_INDEX,
224 : IWM_RATE_MCS_1_INDEX,
225 : IWM_RATE_MCS_2_INDEX,
226 : IWM_RATE_MCS_3_INDEX,
227 : IWM_RATE_MCS_4_INDEX,
228 : IWM_RATE_MCS_5_INDEX,
229 : IWM_RATE_MCS_6_INDEX,
230 : IWM_RATE_MCS_7_INDEX,
231 : IWM_RATE_MCS_8_INDEX,
232 : IWM_RATE_MCS_9_INDEX,
233 : IWM_RATE_MCS_10_INDEX,
234 : IWM_RATE_MCS_11_INDEX,
235 : IWM_RATE_MCS_12_INDEX,
236 : IWM_RATE_MCS_13_INDEX,
237 : IWM_RATE_MCS_14_INDEX,
238 : IWM_RATE_MCS_15_INDEX,
239 : };
240 :
241 : struct iwm_nvm_section {
242 : uint16_t length;
243 : uint8_t *data;
244 : };
245 :
246 : int iwm_is_mimo_ht_plcp(uint8_t);
247 : int iwm_is_mimo_mcs(int);
248 : int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
249 : int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
250 : uint8_t *, size_t);
251 : int iwm_set_default_calib(struct iwm_softc *, const void *);
252 : void iwm_fw_info_free(struct iwm_fw_info *);
253 : int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
254 : uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
255 : void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
256 : int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
257 : int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
258 : int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
259 : int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
260 : int iwm_nic_lock(struct iwm_softc *);
261 : void iwm_nic_assert_locked(struct iwm_softc *);
262 : void iwm_nic_unlock(struct iwm_softc *);
263 : void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
264 : uint32_t);
265 : void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
266 : void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
267 : int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *, bus_size_t,
268 : bus_size_t);
269 : void iwm_dma_contig_free(struct iwm_dma_info *);
270 : int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
271 : void iwm_disable_rx_dma(struct iwm_softc *);
272 : void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
273 : void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
274 : int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
275 : void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
276 : void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
277 : void iwm_enable_rfkill_int(struct iwm_softc *);
278 : int iwm_check_rfkill(struct iwm_softc *);
279 : void iwm_enable_interrupts(struct iwm_softc *);
280 : void iwm_restore_interrupts(struct iwm_softc *);
281 : void iwm_disable_interrupts(struct iwm_softc *);
282 : void iwm_ict_reset(struct iwm_softc *);
283 : int iwm_set_hw_ready(struct iwm_softc *);
284 : int iwm_prepare_card_hw(struct iwm_softc *);
285 : void iwm_apm_config(struct iwm_softc *);
286 : int iwm_apm_init(struct iwm_softc *);
287 : void iwm_apm_stop(struct iwm_softc *);
288 : int iwm_allow_mcast(struct iwm_softc *);
289 : int iwm_start_hw(struct iwm_softc *);
290 : void iwm_stop_device(struct iwm_softc *);
291 : void iwm_nic_config(struct iwm_softc *);
292 : int iwm_nic_rx_init(struct iwm_softc *);
293 : int iwm_nic_tx_init(struct iwm_softc *);
294 : int iwm_nic_init(struct iwm_softc *);
295 : int iwm_enable_txq(struct iwm_softc *, int, int, int);
296 : int iwm_post_alive(struct iwm_softc *);
297 : struct iwm_phy_db_entry *iwm_phy_db_get_section(struct iwm_softc *, uint16_t,
298 : uint16_t);
299 : int iwm_phy_db_set_section(struct iwm_softc *,
300 : struct iwm_calib_res_notif_phy_db *);
301 : int iwm_is_valid_channel(uint16_t);
302 : uint8_t iwm_ch_id_to_ch_index(uint16_t);
303 : uint16_t iwm_channel_id_to_papd(uint16_t);
304 : uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
305 : int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
306 : uint16_t *, uint16_t);
307 : int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
308 : int iwm_phy_db_send_all_channel_groups(struct iwm_softc *, uint16_t,
309 : uint8_t);
310 : int iwm_send_phy_db_data(struct iwm_softc *);
311 : void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
312 : struct iwm_time_event_cmd_v1 *);
313 : int iwm_send_time_event_cmd(struct iwm_softc *,
314 : const struct iwm_time_event_cmd_v2 *);
315 : void iwm_protect_session(struct iwm_softc *, struct iwm_node *, uint32_t,
316 : uint32_t);
317 : void iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
318 : int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
319 : uint8_t *, uint16_t *);
320 : int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
321 : uint16_t *, size_t);
322 : void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
323 : const uint8_t *nvm_channels, size_t nchan);
324 : void iwm_setup_ht_rates(struct iwm_softc *);
325 : void iwm_htprot_task(void *);
326 : void iwm_update_htprot(struct ieee80211com *, struct ieee80211_node *);
327 : int iwm_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
328 : uint8_t);
329 : void iwm_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
330 : uint8_t);
331 : void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *, uint8_t,
332 : uint16_t, int);
333 : #ifdef notyet
334 : int iwm_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
335 : uint8_t);
336 : void iwm_ampdu_tx_stop(struct ieee80211com *, struct ieee80211_node *,
337 : uint8_t);
338 : #endif
339 : void iwm_ba_task(void *);
340 :
341 : int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
342 : const uint16_t *, const uint16_t *,
343 : const uint16_t *, const uint16_t *,
344 : const uint16_t *);
345 : void iwm_set_hw_address_8000(struct iwm_softc *, struct iwm_nvm_data *,
346 : const uint16_t *, const uint16_t *);
347 : int iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
348 : int iwm_nvm_init(struct iwm_softc *);
349 : int iwm_firmware_load_sect(struct iwm_softc *, uint32_t, const uint8_t *,
350 : uint32_t);
351 : int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
352 : uint32_t);
353 : int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
354 : int iwm_load_cpu_sections_8000(struct iwm_softc *, struct iwm_fw_sects *,
355 : int , int *);
356 : int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
357 : int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
358 : int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
359 : int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
360 : int iwm_send_phy_cfg_cmd(struct iwm_softc *);
361 : int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
362 : int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
363 : int iwm_rx_addbuf(struct iwm_softc *, int, int);
364 : int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
365 : int iwm_get_signal_strength(struct iwm_softc *, struct iwm_rx_phy_info *);
366 : void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
367 : struct iwm_rx_data *);
368 : int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
369 : int iwm_rx_frame(struct iwm_softc *, struct mbuf *);
370 : void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
371 : struct iwm_node *);
372 : void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
373 : struct iwm_rx_data *);
374 : void iwm_rx_bmiss(struct iwm_softc *, struct iwm_rx_packet *,
375 : struct iwm_rx_data *);
376 : int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
377 : void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 : struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 : void iwm_phy_ctxt_cmd_data(struct iwm_softc *, struct iwm_phy_context_cmd *,
380 : struct ieee80211_channel *, uint8_t, uint8_t);
381 : int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *, uint8_t,
382 : uint8_t, uint32_t, uint32_t);
383 : int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
384 : int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t, uint16_t,
385 : const void *);
386 : int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
387 : uint32_t *);
388 : int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
389 : const void *, uint32_t *);
390 : void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
391 : void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
392 : void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
393 : const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
394 : struct ieee80211_frame *, struct iwm_tx_cmd *);
395 : int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
396 : int iwm_flush_tx_path(struct iwm_softc *, int);
397 : void iwm_led_enable(struct iwm_softc *);
398 : void iwm_led_disable(struct iwm_softc *);
399 : int iwm_led_is_enabled(struct iwm_softc *);
400 : void iwm_led_blink_timeout(void *);
401 : void iwm_led_blink_start(struct iwm_softc *);
402 : void iwm_led_blink_stop(struct iwm_softc *);
403 : int iwm_beacon_filter_send_cmd(struct iwm_softc *,
404 : struct iwm_beacon_filter_cmd *);
405 : void iwm_beacon_filter_set_cqm_params(struct iwm_softc *, struct iwm_node *,
406 : struct iwm_beacon_filter_cmd *);
407 : int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
408 : void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
409 : struct iwm_mac_power_cmd *);
410 : int iwm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
411 : int iwm_power_update_device(struct iwm_softc *);
412 : int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
413 : int iwm_disable_beacon_filter(struct iwm_softc *);
414 : int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
415 : int iwm_add_aux_sta(struct iwm_softc *);
416 : int iwm_rm_sta_cmd(struct iwm_softc *, struct iwm_node *);
417 : uint16_t iwm_scan_rx_chain(struct iwm_softc *);
418 : uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
419 : uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
420 : struct iwm_scan_channel_cfg_lmac *, int, int);
421 : int iwm_fill_probe_req(struct iwm_softc *, struct iwm_scan_probe_req *);
422 : int iwm_lmac_scan(struct iwm_softc *, int);
423 : int iwm_config_umac_scan(struct iwm_softc *);
424 : int iwm_umac_scan(struct iwm_softc *, int);
425 : uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
426 : int iwm_rval2ridx(int);
427 : void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
428 : void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
429 : struct iwm_mac_ctx_cmd *, uint32_t);
430 : void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
431 : struct iwm_mac_data_sta *, int);
432 : int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *, uint32_t, int);
433 : int iwm_update_quotas(struct iwm_softc *, struct iwm_node *, int);
434 : void iwm_add_task(struct iwm_softc *, struct taskq *, struct task *);
435 : void iwm_del_task(struct iwm_softc *, struct taskq *, struct task *);
436 : int iwm_scan(struct iwm_softc *);
437 : int iwm_bgscan(struct ieee80211com *);
438 : int iwm_umac_scan_abort(struct iwm_softc *);
439 : int iwm_lmac_scan_abort(struct iwm_softc *);
440 : int iwm_scan_abort(struct iwm_softc *);
441 : int iwm_auth(struct iwm_softc *);
442 : int iwm_deauth(struct iwm_softc *);
443 : int iwm_assoc(struct iwm_softc *);
444 : int iwm_disassoc(struct iwm_softc *);
445 : int iwm_run(struct iwm_softc *);
446 : int iwm_run_stop(struct iwm_softc *);
447 : struct ieee80211_node *iwm_node_alloc(struct ieee80211com *);
448 : void iwm_calib_timeout(void *);
449 : void iwm_setrates(struct iwm_node *);
450 : int iwm_media_change(struct ifnet *);
451 : void iwm_newstate_task(void *);
452 : int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
453 : void iwm_endscan(struct iwm_softc *);
454 : void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
455 : struct ieee80211_node *);
456 : int iwm_sf_config(struct iwm_softc *, int);
457 : int iwm_send_bt_init_conf(struct iwm_softc *);
458 : int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
459 : void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
460 : int iwm_init_hw(struct iwm_softc *);
461 : int iwm_init(struct ifnet *);
462 : void iwm_start(struct ifnet *);
463 : void iwm_stop(struct ifnet *);
464 : void iwm_watchdog(struct ifnet *);
465 : int iwm_ioctl(struct ifnet *, u_long, caddr_t);
466 : #ifdef IWM_DEBUG
467 : const char *iwm_desc_lookup(uint32_t);
468 : void iwm_nic_error(struct iwm_softc *);
469 : void iwm_nic_umac_error(struct iwm_softc *);
470 : #endif
471 : void iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, size_t);
472 : void iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *);
473 : void iwm_notif_intr(struct iwm_softc *);
474 : int iwm_intr(void *);
475 : int iwm_match(struct device *, void *, void *);
476 : int iwm_preinit(struct iwm_softc *);
477 : void iwm_attach_hook(struct device *);
478 : void iwm_attach(struct device *, struct device *, void *);
479 : void iwm_init_task(void *);
480 : int iwm_activate(struct device *, int);
481 : int iwm_resume(struct iwm_softc *);
482 :
483 : #if NBPFILTER > 0
484 : void iwm_radiotap_attach(struct iwm_softc *);
485 : #endif
486 :
487 : int
488 0 : iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
489 : {
490 0 : return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
491 0 : (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
492 : }
493 :
494 : int
495 0 : iwm_is_mimo_mcs(int mcs)
496 : {
497 0 : int ridx = iwm_mcs2ridx[mcs];
498 0 : return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
499 :
500 : }
501 :
502 : int
503 0 : iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
504 : {
505 0 : struct iwm_fw_cscheme_list *l = (void *)data;
506 :
507 0 : if (dlen < sizeof(*l) ||
508 0 : dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
509 0 : return EINVAL;
510 :
511 : /* we don't actually store anything for now, always use s/w crypto */
512 :
513 0 : return 0;
514 0 : }
515 :
516 : int
517 0 : iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
518 : uint8_t *data, size_t dlen)
519 : {
520 : struct iwm_fw_sects *fws;
521 : struct iwm_fw_onesect *fwone;
522 :
523 0 : if (type >= IWM_UCODE_TYPE_MAX)
524 0 : return EINVAL;
525 0 : if (dlen < sizeof(uint32_t))
526 0 : return EINVAL;
527 :
528 0 : fws = &sc->sc_fw.fw_sects[type];
529 0 : if (fws->fw_count >= IWM_UCODE_SECT_MAX)
530 0 : return EINVAL;
531 :
532 0 : fwone = &fws->fw_sect[fws->fw_count];
533 :
534 : /* first 32bit are device load offset */
535 0 : memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
536 :
537 : /* rest is data */
538 0 : fwone->fws_data = data + sizeof(uint32_t);
539 0 : fwone->fws_len = dlen - sizeof(uint32_t);
540 :
541 0 : fws->fw_count++;
542 0 : fws->fw_totlen += fwone->fws_len;
543 :
544 0 : return 0;
545 0 : }
546 :
547 : #define IWM_DEFAULT_SCAN_CHANNELS 40
548 :
549 : struct iwm_tlv_calib_data {
550 : uint32_t ucode_type;
551 : struct iwm_tlv_calib_ctrl calib;
552 : } __packed;
553 :
554 : int
555 0 : iwm_set_default_calib(struct iwm_softc *sc, const void *data)
556 : {
557 0 : const struct iwm_tlv_calib_data *def_calib = data;
558 0 : uint32_t ucode_type = le32toh(def_calib->ucode_type);
559 :
560 0 : if (ucode_type >= IWM_UCODE_TYPE_MAX)
561 0 : return EINVAL;
562 :
563 0 : sc->sc_default_calib[ucode_type].flow_trigger =
564 0 : def_calib->calib.flow_trigger;
565 0 : sc->sc_default_calib[ucode_type].event_trigger =
566 0 : def_calib->calib.event_trigger;
567 :
568 0 : return 0;
569 0 : }
570 :
571 : void
572 0 : iwm_fw_info_free(struct iwm_fw_info *fw)
573 : {
574 0 : free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
575 0 : fw->fw_rawdata = NULL;
576 0 : fw->fw_rawsize = 0;
577 : /* don't touch fw->fw_status */
578 0 : memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
579 0 : }
580 :
581 : int
582 0 : iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
583 : {
584 0 : struct iwm_fw_info *fw = &sc->sc_fw;
585 : struct iwm_tlv_ucode_header *uhdr;
586 : struct iwm_ucode_tlv tlv;
587 : uint32_t tlv_type;
588 : uint8_t *data;
589 : int err;
590 : size_t len;
591 :
592 0 : if (fw->fw_status == IWM_FW_STATUS_DONE &&
593 0 : ucode_type != IWM_UCODE_TYPE_INIT)
594 0 : return 0;
595 :
596 0 : while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
597 0 : tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
598 0 : fw->fw_status = IWM_FW_STATUS_INPROGRESS;
599 :
600 0 : if (fw->fw_rawdata != NULL)
601 0 : iwm_fw_info_free(fw);
602 :
603 0 : err = loadfirmware(sc->sc_fwname,
604 0 : (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
605 0 : if (err) {
606 0 : printf("%s: could not read firmware %s (error %d)\n",
607 0 : DEVNAME(sc), sc->sc_fwname, err);
608 0 : goto out;
609 : }
610 :
611 0 : sc->sc_capaflags = 0;
612 0 : sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
613 0 : memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
614 0 : memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
615 :
616 0 : uhdr = (void *)fw->fw_rawdata;
617 0 : if (*(uint32_t *)fw->fw_rawdata != 0
618 0 : || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
619 0 : printf("%s: invalid firmware %s\n",
620 0 : DEVNAME(sc), sc->sc_fwname);
621 : err = EINVAL;
622 0 : goto out;
623 : }
624 :
625 0 : snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
626 0 : IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
627 0 : IWM_UCODE_MINOR(le32toh(uhdr->ver)),
628 0 : IWM_UCODE_API(le32toh(uhdr->ver)));
629 0 : data = uhdr->data;
630 0 : len = fw->fw_rawsize - sizeof(*uhdr);
631 :
632 0 : while (len >= sizeof(tlv)) {
633 : size_t tlv_len;
634 : void *tlv_data;
635 :
636 0 : memcpy(&tlv, data, sizeof(tlv));
637 0 : tlv_len = le32toh(tlv.length);
638 : tlv_type = le32toh(tlv.type);
639 :
640 0 : len -= sizeof(tlv);
641 0 : data += sizeof(tlv);
642 : tlv_data = data;
643 :
644 0 : if (len < tlv_len) {
645 0 : printf("%s: firmware too short: %zu bytes\n",
646 0 : DEVNAME(sc), len);
647 : err = EINVAL;
648 0 : goto parse_out;
649 : }
650 :
651 0 : switch (tlv_type) {
652 : case IWM_UCODE_TLV_PROBE_MAX_LEN:
653 0 : if (tlv_len < sizeof(uint32_t)) {
654 : err = EINVAL;
655 0 : goto parse_out;
656 : }
657 0 : sc->sc_capa_max_probe_len
658 0 : = le32toh(*(uint32_t *)tlv_data);
659 0 : if (sc->sc_capa_max_probe_len >
660 : IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
661 : err = EINVAL;
662 0 : goto parse_out;
663 : }
664 : break;
665 : case IWM_UCODE_TLV_PAN:
666 0 : if (tlv_len) {
667 : err = EINVAL;
668 0 : goto parse_out;
669 : }
670 0 : sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
671 0 : break;
672 : case IWM_UCODE_TLV_FLAGS:
673 0 : if (tlv_len < sizeof(uint32_t)) {
674 : err = EINVAL;
675 0 : goto parse_out;
676 : }
677 : /*
678 : * Apparently there can be many flags, but Linux driver
679 : * parses only the first one, and so do we.
680 : *
681 : * XXX: why does this override IWM_UCODE_TLV_PAN?
682 : * Intentional or a bug? Observations from
683 : * current firmware file:
684 : * 1) TLV_PAN is parsed first
685 : * 2) TLV_FLAGS contains TLV_FLAGS_PAN
686 : * ==> this resets TLV_PAN to itself... hnnnk
687 : */
688 0 : sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
689 0 : break;
690 : case IWM_UCODE_TLV_CSCHEME:
691 0 : err = iwm_store_cscheme(sc, tlv_data, tlv_len);
692 0 : if (err)
693 0 : goto parse_out;
694 : break;
695 : case IWM_UCODE_TLV_NUM_OF_CPU: {
696 : uint32_t num_cpu;
697 0 : if (tlv_len != sizeof(uint32_t)) {
698 : err = EINVAL;
699 0 : goto parse_out;
700 : }
701 0 : num_cpu = le32toh(*(uint32_t *)tlv_data);
702 0 : if (num_cpu < 1 || num_cpu > 2) {
703 : err = EINVAL;
704 0 : goto parse_out;
705 : }
706 0 : break;
707 : }
708 : case IWM_UCODE_TLV_SEC_RT:
709 0 : err = iwm_firmware_store_section(sc,
710 : IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
711 0 : if (err)
712 0 : goto parse_out;
713 : break;
714 : case IWM_UCODE_TLV_SEC_INIT:
715 0 : err = iwm_firmware_store_section(sc,
716 : IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
717 0 : if (err)
718 0 : goto parse_out;
719 : break;
720 : case IWM_UCODE_TLV_SEC_WOWLAN:
721 0 : err = iwm_firmware_store_section(sc,
722 : IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
723 0 : if (err)
724 0 : goto parse_out;
725 : break;
726 : case IWM_UCODE_TLV_DEF_CALIB:
727 0 : if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
728 : err = EINVAL;
729 0 : goto parse_out;
730 : }
731 0 : err = iwm_set_default_calib(sc, tlv_data);
732 0 : if (err)
733 0 : goto parse_out;
734 : break;
735 : case IWM_UCODE_TLV_PHY_SKU:
736 0 : if (tlv_len != sizeof(uint32_t)) {
737 : err = EINVAL;
738 0 : goto parse_out;
739 : }
740 0 : sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
741 0 : break;
742 :
743 : case IWM_UCODE_TLV_API_CHANGES_SET: {
744 : struct iwm_ucode_api *api;
745 0 : if (tlv_len != sizeof(*api)) {
746 : err = EINVAL;
747 0 : goto parse_out;
748 : }
749 0 : api = (struct iwm_ucode_api *)tlv_data;
750 : /* Flags may exceed 32 bits in future firmware. */
751 0 : if (le32toh(api->api_index) > 0) {
752 0 : goto parse_out;
753 : }
754 0 : sc->sc_ucode_api = le32toh(api->api_flags);
755 0 : break;
756 : }
757 :
758 : case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
759 : struct iwm_ucode_capa *capa;
760 : int idx, i;
761 0 : if (tlv_len != sizeof(*capa)) {
762 : err = EINVAL;
763 0 : goto parse_out;
764 : }
765 0 : capa = (struct iwm_ucode_capa *)tlv_data;
766 0 : idx = le32toh(capa->api_index);
767 0 : if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
768 0 : goto parse_out;
769 : }
770 0 : for (i = 0; i < 32; i++) {
771 0 : if ((le32toh(capa->api_capa) & (1 << i)) == 0)
772 : continue;
773 0 : setbit(sc->sc_enabled_capa, i + (32 * idx));
774 0 : }
775 0 : break;
776 : }
777 :
778 : case 48: /* undocumented TLV */
779 : case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
780 : case IWM_UCODE_TLV_FW_GSCAN_CAPA:
781 : /* ignore, not used by current driver */
782 : break;
783 :
784 : case IWM_UCODE_TLV_SEC_RT_USNIFFER:
785 0 : err = iwm_firmware_store_section(sc,
786 : IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
787 : tlv_len);
788 0 : if (err)
789 0 : goto parse_out;
790 : break;
791 :
792 : case IWM_UCODE_TLV_N_SCAN_CHANNELS:
793 0 : if (tlv_len != sizeof(uint32_t)) {
794 : err = EINVAL;
795 0 : goto parse_out;
796 : }
797 0 : sc->sc_capa_n_scan_channels =
798 0 : le32toh(*(uint32_t *)tlv_data);
799 0 : break;
800 :
801 : case IWM_UCODE_TLV_FW_VERSION:
802 0 : if (tlv_len != sizeof(uint32_t) * 3) {
803 : err = EINVAL;
804 0 : goto parse_out;
805 : }
806 0 : snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
807 : "%d.%d.%d",
808 0 : le32toh(((uint32_t *)tlv_data)[0]),
809 0 : le32toh(((uint32_t *)tlv_data)[1]),
810 0 : le32toh(((uint32_t *)tlv_data)[2]));
811 0 : break;
812 :
813 : case IWM_UCODE_TLV_FW_MEM_SEG:
814 : break;
815 :
816 : default:
817 : err = EINVAL;
818 0 : goto parse_out;
819 : }
820 :
821 0 : len -= roundup(tlv_len, 4);
822 0 : data += roundup(tlv_len, 4);
823 0 : }
824 :
825 0 : KASSERT(err == 0);
826 :
827 : parse_out:
828 0 : if (err) {
829 0 : printf("%s: firmware parse error %d, "
830 0 : "section type %d\n", DEVNAME(sc), err, tlv_type);
831 0 : }
832 :
833 0 : if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
834 0 : printf("%s: device uses unsupported power ops\n", DEVNAME(sc));
835 : err = ENOTSUP;
836 0 : }
837 :
838 : out:
839 0 : if (err) {
840 0 : fw->fw_status = IWM_FW_STATUS_NONE;
841 0 : if (fw->fw_rawdata != NULL)
842 0 : iwm_fw_info_free(fw);
843 : } else
844 0 : fw->fw_status = IWM_FW_STATUS_DONE;
845 0 : wakeup(&sc->sc_fw);
846 :
847 0 : return err;
848 0 : }
849 :
850 : uint32_t
851 0 : iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
852 : {
853 0 : iwm_nic_assert_locked(sc);
854 0 : IWM_WRITE(sc,
855 : IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
856 0 : IWM_BARRIER_READ_WRITE(sc);
857 0 : return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
858 : }
859 :
860 : void
861 0 : iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
862 : {
863 0 : iwm_nic_assert_locked(sc);
864 0 : IWM_WRITE(sc,
865 : IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
866 0 : IWM_BARRIER_WRITE(sc);
867 0 : IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
868 0 : }
869 :
870 : int
871 0 : iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
872 : {
873 : int offs, err = 0;
874 0 : uint32_t *vals = buf;
875 :
876 0 : if (iwm_nic_lock(sc)) {
877 0 : IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
878 0 : for (offs = 0; offs < dwords; offs++)
879 0 : vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
880 0 : iwm_nic_unlock(sc);
881 0 : } else {
882 : err = EBUSY;
883 : }
884 0 : return err;
885 : }
886 :
887 : int
888 0 : iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
889 : {
890 : int offs;
891 0 : const uint32_t *vals = buf;
892 :
893 0 : if (iwm_nic_lock(sc)) {
894 0 : IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
895 : /* WADDR auto-increments */
896 0 : for (offs = 0; offs < dwords; offs++) {
897 0 : uint32_t val = vals ? vals[offs] : 0;
898 0 : IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
899 : }
900 0 : iwm_nic_unlock(sc);
901 : } else {
902 0 : return EBUSY;
903 : }
904 0 : return 0;
905 0 : }
906 :
907 : int
908 0 : iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
909 : {
910 0 : return iwm_write_mem(sc, addr, &val, 1);
911 : }
912 :
913 : int
914 0 : iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
915 : int timo)
916 : {
917 0 : for (;;) {
918 0 : if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
919 0 : return 1;
920 : }
921 0 : if (timo < 10) {
922 0 : return 0;
923 : }
924 0 : timo -= 10;
925 0 : DELAY(10);
926 : }
927 0 : }
928 :
929 : int
930 0 : iwm_nic_lock(struct iwm_softc *sc)
931 : {
932 0 : if (sc->sc_nic_locks > 0) {
933 0 : iwm_nic_assert_locked(sc);
934 0 : sc->sc_nic_locks++;
935 0 : return 1; /* already locked */
936 : }
937 :
938 0 : IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
939 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
940 :
941 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
942 0 : DELAY(2);
943 :
944 0 : if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
945 : IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
946 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
947 : | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
948 0 : sc->sc_nic_locks++;
949 0 : return 1;
950 : }
951 :
952 0 : printf("%s: acquiring device failed\n", DEVNAME(sc));
953 0 : return 0;
954 0 : }
955 :
956 : void
957 0 : iwm_nic_assert_locked(struct iwm_softc *sc)
958 : {
959 0 : uint32_t reg = IWM_READ(sc, IWM_CSR_GP_CNTRL);
960 0 : if ((reg & IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) == 0)
961 0 : panic("%s: mac clock not ready", DEVNAME(sc));
962 0 : if (reg & IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)
963 0 : panic("%s: mac gone to sleep", DEVNAME(sc));
964 0 : if (sc->sc_nic_locks <= 0)
965 0 : panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
966 0 : }
967 :
968 : void
969 0 : iwm_nic_unlock(struct iwm_softc *sc)
970 : {
971 0 : if (sc->sc_nic_locks > 0) {
972 0 : if (--sc->sc_nic_locks == 0)
973 0 : IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
974 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
975 : } else
976 0 : printf("%s: NIC already unlocked\n", DEVNAME(sc));
977 0 : }
978 :
979 : void
980 0 : iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
981 : uint32_t mask)
982 : {
983 : uint32_t val;
984 :
985 : /* XXX: no error path? */
986 0 : if (iwm_nic_lock(sc)) {
987 0 : val = iwm_read_prph(sc, reg) & mask;
988 0 : val |= bits;
989 0 : iwm_write_prph(sc, reg, val);
990 0 : iwm_nic_unlock(sc);
991 0 : }
992 0 : }
993 :
994 : void
995 0 : iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
996 : {
997 0 : iwm_set_bits_mask_prph(sc, reg, bits, ~0);
998 0 : }
999 :
1000 : void
1001 0 : iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1002 : {
1003 0 : iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1004 0 : }
1005 :
1006 : int
1007 0 : iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1008 : bus_size_t size, bus_size_t alignment)
1009 : {
1010 0 : int nsegs, err;
1011 0 : caddr_t va;
1012 :
1013 0 : dma->tag = tag;
1014 0 : dma->size = size;
1015 :
1016 0 : err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1017 : &dma->map);
1018 0 : if (err)
1019 : goto fail;
1020 :
1021 0 : err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1022 : BUS_DMA_NOWAIT);
1023 0 : if (err)
1024 : goto fail;
1025 :
1026 0 : err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1027 : BUS_DMA_NOWAIT);
1028 0 : if (err)
1029 : goto fail;
1030 0 : dma->vaddr = va;
1031 :
1032 0 : err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1033 : BUS_DMA_NOWAIT);
1034 0 : if (err)
1035 : goto fail;
1036 :
1037 0 : memset(dma->vaddr, 0, size);
1038 0 : bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1039 0 : dma->paddr = dma->map->dm_segs[0].ds_addr;
1040 :
1041 0 : return 0;
1042 :
1043 0 : fail: iwm_dma_contig_free(dma);
1044 0 : return err;
1045 0 : }
1046 :
1047 : void
1048 0 : iwm_dma_contig_free(struct iwm_dma_info *dma)
1049 : {
1050 0 : if (dma->map != NULL) {
1051 0 : if (dma->vaddr != NULL) {
1052 0 : bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1053 : BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1054 0 : bus_dmamap_unload(dma->tag, dma->map);
1055 0 : bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1056 0 : bus_dmamem_free(dma->tag, &dma->seg, 1);
1057 0 : dma->vaddr = NULL;
1058 0 : }
1059 0 : bus_dmamap_destroy(dma->tag, dma->map);
1060 0 : dma->map = NULL;
1061 0 : }
1062 0 : }
1063 :
1064 : int
1065 0 : iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1066 : {
1067 : bus_size_t size;
1068 : int i, err;
1069 :
1070 0 : ring->cur = 0;
1071 :
1072 : /* Allocate RX descriptors (256-byte aligned). */
1073 : size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1074 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1075 0 : if (err) {
1076 0 : printf("%s: could not allocate RX ring DMA memory\n",
1077 0 : DEVNAME(sc));
1078 0 : goto fail;
1079 : }
1080 0 : ring->desc = ring->desc_dma.vaddr;
1081 :
1082 : /* Allocate RX status area (16-byte aligned). */
1083 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1084 : sizeof(*ring->stat), 16);
1085 0 : if (err) {
1086 0 : printf("%s: could not allocate RX status DMA memory\n",
1087 0 : DEVNAME(sc));
1088 0 : goto fail;
1089 : }
1090 0 : ring->stat = ring->stat_dma.vaddr;
1091 :
1092 0 : for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1093 0 : struct iwm_rx_data *data = &ring->data[i];
1094 :
1095 0 : memset(data, 0, sizeof(*data));
1096 0 : err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1097 : IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1098 : &data->map);
1099 0 : if (err) {
1100 0 : printf("%s: could not create RX buf DMA map\n",
1101 0 : DEVNAME(sc));
1102 0 : goto fail;
1103 : }
1104 :
1105 0 : err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1106 0 : if (err)
1107 0 : goto fail;
1108 0 : }
1109 0 : return 0;
1110 :
1111 0 : fail: iwm_free_rx_ring(sc, ring);
1112 0 : return err;
1113 0 : }
1114 :
1115 : void
1116 0 : iwm_disable_rx_dma(struct iwm_softc *sc)
1117 : {
1118 : int ntries;
1119 :
1120 0 : if (iwm_nic_lock(sc)) {
1121 0 : IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1122 0 : for (ntries = 0; ntries < 1000; ntries++) {
1123 0 : if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1124 : IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1125 : break;
1126 0 : DELAY(10);
1127 : }
1128 0 : iwm_nic_unlock(sc);
1129 0 : }
1130 0 : }
1131 :
1132 : void
1133 0 : iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1134 : {
1135 0 : ring->cur = 0;
1136 0 : bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1137 : ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1138 0 : memset(ring->stat, 0, sizeof(*ring->stat));
1139 0 : bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1140 : ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
1141 :
1142 0 : }
1143 :
1144 : void
1145 0 : iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1146 : {
1147 : int i;
1148 :
1149 0 : iwm_dma_contig_free(&ring->desc_dma);
1150 0 : iwm_dma_contig_free(&ring->stat_dma);
1151 :
1152 0 : for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1153 0 : struct iwm_rx_data *data = &ring->data[i];
1154 :
1155 0 : if (data->m != NULL) {
1156 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1157 : data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1158 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
1159 0 : m_freem(data->m);
1160 0 : data->m = NULL;
1161 0 : }
1162 0 : if (data->map != NULL)
1163 0 : bus_dmamap_destroy(sc->sc_dmat, data->map);
1164 : }
1165 0 : }
1166 :
1167 : int
1168 0 : iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1169 : {
1170 : bus_addr_t paddr;
1171 : bus_size_t size;
1172 : int i, err;
1173 :
1174 0 : ring->qid = qid;
1175 0 : ring->queued = 0;
1176 0 : ring->cur = 0;
1177 :
1178 : /* Allocate TX descriptors (256-byte aligned). */
1179 : size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1180 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1181 0 : if (err) {
1182 0 : printf("%s: could not allocate TX ring DMA memory\n",
1183 0 : DEVNAME(sc));
1184 0 : goto fail;
1185 : }
1186 0 : ring->desc = ring->desc_dma.vaddr;
1187 :
1188 : /*
1189 : * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1190 : * to allocate commands space for other rings.
1191 : */
1192 0 : if (qid > IWM_CMD_QUEUE)
1193 0 : return 0;
1194 :
1195 : size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1196 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1197 0 : if (err) {
1198 0 : printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1199 0 : goto fail;
1200 : }
1201 0 : ring->cmd = ring->cmd_dma.vaddr;
1202 :
1203 0 : paddr = ring->cmd_dma.paddr;
1204 0 : for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1205 0 : struct iwm_tx_data *data = &ring->data[i];
1206 : size_t mapsize;
1207 :
1208 0 : data->cmd_paddr = paddr;
1209 0 : data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1210 0 : + offsetof(struct iwm_tx_cmd, scratch);
1211 0 : paddr += sizeof(struct iwm_device_cmd);
1212 :
1213 : /* FW commands may require more mapped space than packets. */
1214 0 : if (qid == IWM_CMD_QUEUE)
1215 0 : mapsize = (sizeof(struct iwm_cmd_header) +
1216 : IWM_MAX_CMD_PAYLOAD_SIZE);
1217 : else
1218 : mapsize = MCLBYTES;
1219 0 : err = bus_dmamap_create(sc->sc_dmat, mapsize,
1220 : IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
1221 : &data->map);
1222 0 : if (err) {
1223 0 : printf("%s: could not create TX buf DMA map\n",
1224 0 : DEVNAME(sc));
1225 0 : goto fail;
1226 : }
1227 0 : }
1228 0 : KASSERT(paddr == ring->cmd_dma.paddr + size);
1229 0 : return 0;
1230 :
1231 0 : fail: iwm_free_tx_ring(sc, ring);
1232 0 : return err;
1233 0 : }
1234 :
1235 : void
1236 0 : iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 : {
1238 : int i;
1239 :
1240 0 : for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 0 : struct iwm_tx_data *data = &ring->data[i];
1242 :
1243 0 : if (data->m != NULL) {
1244 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1245 : data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1246 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
1247 0 : m_freem(data->m);
1248 0 : data->m = NULL;
1249 0 : }
1250 : }
1251 : /* Clear TX descriptors. */
1252 0 : memset(ring->desc, 0, ring->desc_dma.size);
1253 0 : bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1254 : ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1255 0 : sc->qfullmsk &= ~(1 << ring->qid);
1256 : /* 7000 family NICs are locked while commands are in progress. */
1257 0 : if (ring->qid == IWM_CMD_QUEUE && ring->queued > 0) {
1258 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1259 0 : iwm_nic_unlock(sc);
1260 : }
1261 0 : ring->queued = 0;
1262 0 : ring->cur = 0;
1263 0 : }
1264 :
1265 : void
1266 0 : iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1267 : {
1268 : int i;
1269 :
1270 0 : iwm_dma_contig_free(&ring->desc_dma);
1271 0 : iwm_dma_contig_free(&ring->cmd_dma);
1272 :
1273 0 : for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1274 0 : struct iwm_tx_data *data = &ring->data[i];
1275 :
1276 0 : if (data->m != NULL) {
1277 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1278 : data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1279 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
1280 0 : m_freem(data->m);
1281 0 : data->m = NULL;
1282 0 : }
1283 0 : if (data->map != NULL)
1284 0 : bus_dmamap_destroy(sc->sc_dmat, data->map);
1285 : }
1286 0 : }
1287 :
1288 : void
1289 0 : iwm_enable_rfkill_int(struct iwm_softc *sc)
1290 : {
1291 0 : sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1292 0 : IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1293 0 : }
1294 :
1295 : int
1296 0 : iwm_check_rfkill(struct iwm_softc *sc)
1297 : {
1298 : uint32_t v;
1299 : int s;
1300 : int rv;
1301 :
1302 0 : s = splnet();
1303 :
1304 : /*
1305 : * "documentation" is not really helpful here:
1306 : * 27: HW_RF_KILL_SW
1307 : * Indicates state of (platform's) hardware RF-Kill switch
1308 : *
1309 : * But apparently when it's off, it's on ...
1310 : */
1311 0 : v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1312 0 : rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1313 0 : if (rv) {
1314 0 : sc->sc_flags |= IWM_FLAG_RFKILL;
1315 0 : } else {
1316 0 : sc->sc_flags &= ~IWM_FLAG_RFKILL;
1317 : }
1318 :
1319 0 : splx(s);
1320 0 : return rv;
1321 : }
1322 :
1323 : void
1324 0 : iwm_enable_interrupts(struct iwm_softc *sc)
1325 : {
1326 0 : sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1327 0 : IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1328 0 : }
1329 :
1330 : void
1331 0 : iwm_restore_interrupts(struct iwm_softc *sc)
1332 : {
1333 0 : IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1334 0 : }
1335 :
1336 : void
1337 0 : iwm_disable_interrupts(struct iwm_softc *sc)
1338 : {
1339 0 : int s = splnet();
1340 :
1341 0 : IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1342 :
1343 : /* acknowledge all interrupts */
1344 0 : IWM_WRITE(sc, IWM_CSR_INT, ~0);
1345 0 : IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1346 :
1347 0 : splx(s);
1348 0 : }
1349 :
1350 : void
1351 0 : iwm_ict_reset(struct iwm_softc *sc)
1352 : {
1353 0 : iwm_disable_interrupts(sc);
1354 :
1355 0 : memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1356 0 : sc->ict_cur = 0;
1357 :
1358 : /* Set physical address of ICT (4KB aligned). */
1359 0 : IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1360 : IWM_CSR_DRAM_INT_TBL_ENABLE
1361 : | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1362 : | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1363 : | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1364 :
1365 : /* Switch to ICT interrupt mode in driver. */
1366 0 : sc->sc_flags |= IWM_FLAG_USE_ICT;
1367 :
1368 0 : IWM_WRITE(sc, IWM_CSR_INT, ~0);
1369 0 : iwm_enable_interrupts(sc);
1370 0 : }
1371 :
1372 : #define IWM_HW_READY_TIMEOUT 50
1373 : int
1374 0 : iwm_set_hw_ready(struct iwm_softc *sc)
1375 : {
1376 : int ready;
1377 :
1378 0 : IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1379 : IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1380 :
1381 0 : ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1382 : IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1383 : IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1384 : IWM_HW_READY_TIMEOUT);
1385 0 : if (ready)
1386 0 : IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1387 : IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1388 :
1389 0 : return ready;
1390 : }
1391 : #undef IWM_HW_READY_TIMEOUT
1392 :
1393 : int
1394 0 : iwm_prepare_card_hw(struct iwm_softc *sc)
1395 : {
1396 : int t = 0;
1397 :
1398 0 : if (iwm_set_hw_ready(sc))
1399 0 : return 0;
1400 :
1401 0 : DELAY(100);
1402 :
1403 : /* If HW is not ready, prepare the conditions to check again */
1404 0 : IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1405 : IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1406 :
1407 0 : do {
1408 0 : if (iwm_set_hw_ready(sc))
1409 0 : return 0;
1410 0 : DELAY(200);
1411 0 : t += 200;
1412 0 : } while (t < 150000);
1413 :
1414 0 : return ETIMEDOUT;
1415 0 : }
1416 :
1417 : void
1418 0 : iwm_apm_config(struct iwm_softc *sc)
1419 : {
1420 : pcireg_t reg;
1421 :
1422 0 : reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1423 0 : sc->sc_cap_off + PCI_PCIE_LCSR);
1424 0 : if (reg & PCI_PCIE_LCSR_ASPM_L1) {
1425 : /* Um the Linux driver prints "Disabling L0S for this one ... */
1426 0 : IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1427 : IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1428 0 : } else {
1429 : /* ... and "Enabling" here */
1430 0 : IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1431 : IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1432 : }
1433 0 : }
1434 :
1435 : /*
1436 : * Start up NIC's basic functionality after it has been reset
1437 : * e.g. after platform boot or shutdown.
1438 : * NOTE: This does not load uCode nor start the embedded processor
1439 : */
1440 : int
1441 0 : iwm_apm_init(struct iwm_softc *sc)
1442 : {
1443 : int err = 0;
1444 :
1445 : /* Disable L0S exit timer (platform NMI workaround) */
1446 0 : if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1447 0 : IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1448 : IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1449 :
1450 : /*
1451 : * Disable L0s without affecting L1;
1452 : * don't wait for ICH L0s (ICH bug W/A)
1453 : */
1454 0 : IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1455 : IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1456 :
1457 : /* Set FH wait threshold to maximum (HW error during stress W/A) */
1458 0 : IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1459 :
1460 : /*
1461 : * Enable HAP INTA (interrupt from management bus) to
1462 : * wake device's PCI Express link L1a -> L0s
1463 : */
1464 0 : IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1465 : IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1466 :
1467 0 : iwm_apm_config(sc);
1468 :
1469 : #if 0 /* not for 7k/8k */
1470 : /* Configure analog phase-lock-loop before activating to D0A */
1471 : if (trans->cfg->base_params->pll_cfg_val)
1472 : IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1473 : trans->cfg->base_params->pll_cfg_val);
1474 : #endif
1475 :
1476 : /*
1477 : * Set "initialization complete" bit to move adapter from
1478 : * D0U* --> D0A* (powered-up active) state.
1479 : */
1480 0 : IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1481 :
1482 : /*
1483 : * Wait for clock stabilization; once stabilized, access to
1484 : * device-internal resources is supported, e.g. iwm_write_prph()
1485 : * and accesses to uCode SRAM.
1486 : */
1487 0 : if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1488 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1489 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1490 0 : printf("%s: timeout waiting for clock stabilization\n",
1491 0 : DEVNAME(sc));
1492 : err = ETIMEDOUT;
1493 0 : goto out;
1494 : }
1495 :
1496 0 : if (sc->host_interrupt_operation_mode) {
1497 : /*
1498 : * This is a bit of an abuse - This is needed for 7260 / 3160
1499 : * only check host_interrupt_operation_mode even if this is
1500 : * not related to host_interrupt_operation_mode.
1501 : *
1502 : * Enable the oscillator to count wake up time for L1 exit. This
1503 : * consumes slightly more power (100uA) - but allows to be sure
1504 : * that we wake up from L1 on time.
1505 : *
1506 : * This looks weird: read twice the same register, discard the
1507 : * value, set a bit, and yet again, read that same register
1508 : * just to discard the value. But that's the way the hardware
1509 : * seems to like it.
1510 : */
1511 0 : if (iwm_nic_lock(sc)) {
1512 0 : iwm_read_prph(sc, IWM_OSC_CLK);
1513 0 : iwm_read_prph(sc, IWM_OSC_CLK);
1514 0 : iwm_nic_unlock(sc);
1515 0 : }
1516 0 : iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1517 0 : if (iwm_nic_lock(sc)) {
1518 0 : iwm_read_prph(sc, IWM_OSC_CLK);
1519 0 : iwm_read_prph(sc, IWM_OSC_CLK);
1520 0 : iwm_nic_unlock(sc);
1521 0 : }
1522 : }
1523 :
1524 : /*
1525 : * Enable DMA clock and wait for it to stabilize.
1526 : *
1527 : * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1528 : * do not disable clocks. This preserves any hardware bits already
1529 : * set by default in "CLK_CTRL_REG" after reset.
1530 : */
1531 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1532 0 : if (iwm_nic_lock(sc)) {
1533 0 : iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1534 : IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1535 0 : iwm_nic_unlock(sc);
1536 0 : }
1537 0 : DELAY(20);
1538 :
1539 : /* Disable L1-Active */
1540 0 : iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1541 : IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1542 :
1543 : /* Clear the interrupt in APMG if the NIC is in RFKILL */
1544 0 : if (iwm_nic_lock(sc)) {
1545 0 : iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1546 : IWM_APMG_RTC_INT_STT_RFKILL);
1547 0 : iwm_nic_unlock(sc);
1548 0 : }
1549 : }
1550 : out:
1551 0 : if (err)
1552 0 : printf("%s: apm init error %d\n", DEVNAME(sc), err);
1553 0 : return err;
1554 : }
1555 :
1556 : void
1557 0 : iwm_apm_stop(struct iwm_softc *sc)
1558 : {
1559 : /* stop device's busmaster DMA activity */
1560 0 : IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1561 :
1562 0 : if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1563 : IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1564 : IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1565 0 : printf("%s: timeout waiting for master\n", DEVNAME(sc));
1566 0 : }
1567 :
1568 : int
1569 0 : iwm_start_hw(struct iwm_softc *sc)
1570 : {
1571 : int err;
1572 :
1573 0 : err = iwm_prepare_card_hw(sc);
1574 0 : if (err)
1575 0 : return err;
1576 :
1577 : /* Reset the entire device */
1578 0 : IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1579 0 : DELAY(10);
1580 :
1581 0 : err = iwm_apm_init(sc);
1582 0 : if (err)
1583 0 : return err;
1584 :
1585 0 : iwm_enable_rfkill_int(sc);
1586 0 : iwm_check_rfkill(sc);
1587 :
1588 0 : return 0;
1589 0 : }
1590 :
1591 :
1592 : void
1593 0 : iwm_stop_device(struct iwm_softc *sc)
1594 : {
1595 : int chnl, ntries;
1596 : int qid;
1597 :
1598 0 : iwm_disable_interrupts(sc);
1599 0 : sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1600 :
1601 : /* Stop all DMA channels. */
1602 0 : if (iwm_nic_lock(sc)) {
1603 : /* Deactivate TX scheduler. */
1604 0 : iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1605 :
1606 0 : for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1607 0 : IWM_WRITE(sc,
1608 : IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1609 0 : for (ntries = 0; ntries < 200; ntries++) {
1610 : uint32_t r;
1611 :
1612 0 : r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1613 0 : if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1614 : chnl))
1615 0 : break;
1616 0 : DELAY(20);
1617 0 : }
1618 : }
1619 0 : iwm_nic_unlock(sc);
1620 0 : }
1621 0 : iwm_disable_rx_dma(sc);
1622 :
1623 0 : iwm_reset_rx_ring(sc, &sc->rxq);
1624 :
1625 0 : for (qid = 0; qid < nitems(sc->txq); qid++)
1626 0 : iwm_reset_tx_ring(sc, &sc->txq[qid]);
1627 :
1628 0 : if (iwm_nic_lock(sc)) {
1629 : /* Power-down device's busmaster DMA clocks */
1630 0 : iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1631 : IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1632 0 : iwm_nic_unlock(sc);
1633 0 : }
1634 0 : DELAY(5);
1635 :
1636 : /* Make sure (redundant) we've released our request to stay awake */
1637 0 : IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1638 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1639 0 : if (sc->sc_nic_locks > 0)
1640 0 : printf("%s: %d active NIC locks forcefully cleared\n",
1641 0 : DEVNAME(sc), sc->sc_nic_locks);
1642 0 : sc->sc_nic_locks = 0;
1643 :
1644 : /* Stop the device, and put it in low power state */
1645 0 : iwm_apm_stop(sc);
1646 :
1647 : /*
1648 : * Upon stop, the APM issues an interrupt if HW RF kill is set.
1649 : * Clear the interrupt again.
1650 : */
1651 0 : iwm_disable_interrupts(sc);
1652 :
1653 : /* Reset the on-board processor. */
1654 0 : IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1655 :
1656 : /* Even though we stop the HW we still want the RF kill interrupt. */
1657 0 : iwm_enable_rfkill_int(sc);
1658 0 : iwm_check_rfkill(sc);
1659 0 : }
1660 :
1661 : void
1662 0 : iwm_nic_config(struct iwm_softc *sc)
1663 : {
1664 : uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1665 : uint32_t reg_val = 0;
1666 :
1667 0 : radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1668 : IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1669 0 : radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1670 : IWM_FW_PHY_CFG_RADIO_STEP_POS;
1671 0 : radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1672 : IWM_FW_PHY_CFG_RADIO_DASH_POS;
1673 :
1674 0 : reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1675 : IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1676 0 : reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1677 : IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1678 :
1679 : /* radio configuration */
1680 0 : reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1681 0 : reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1682 0 : reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1683 :
1684 0 : IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1685 :
1686 : /*
1687 : * W/A : NIC is stuck in a reset state after Early PCIe power off
1688 : * (PCIe power is lost before PERST# is asserted), causing ME FW
1689 : * to lose ownership and not being able to obtain it back.
1690 : */
1691 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1692 0 : iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1693 : IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1694 : ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1695 0 : }
1696 :
1697 : int
1698 0 : iwm_nic_rx_init(struct iwm_softc *sc)
1699 : {
1700 0 : memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1701 :
1702 0 : iwm_disable_rx_dma(sc);
1703 :
1704 0 : if (!iwm_nic_lock(sc))
1705 0 : return EBUSY;
1706 :
1707 : /* reset and flush pointers */
1708 0 : IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1709 0 : IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1710 0 : IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1711 0 : IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1712 :
1713 : /* Set physical address of RX ring (256-byte aligned). */
1714 0 : IWM_WRITE(sc,
1715 : IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1716 :
1717 : /* Set physical address of RX status (16-byte aligned). */
1718 0 : IWM_WRITE(sc,
1719 : IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1720 :
1721 : /* Enable RX. */
1722 0 : IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1723 : IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1724 : IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1725 : IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1726 : (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1727 : IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1728 : IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1729 :
1730 0 : IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1731 :
1732 : /* W/A for interrupt coalescing bug in 7260 and 3160 */
1733 0 : if (sc->host_interrupt_operation_mode)
1734 0 : IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1735 :
1736 : /*
1737 : * This value should initially be 0 (before preparing any RBs),
1738 : * and should be 8 after preparing the first 8 RBs (for example).
1739 : */
1740 0 : IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1741 :
1742 0 : iwm_nic_unlock(sc);
1743 :
1744 0 : return 0;
1745 0 : }
1746 :
1747 : int
1748 0 : iwm_nic_tx_init(struct iwm_softc *sc)
1749 : {
1750 : int qid;
1751 :
1752 0 : if (!iwm_nic_lock(sc))
1753 0 : return EBUSY;
1754 :
1755 : /* Deactivate TX scheduler. */
1756 0 : iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1757 :
1758 : /* Set physical address of "keep warm" page (16-byte aligned). */
1759 0 : IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1760 :
1761 0 : for (qid = 0; qid < nitems(sc->txq); qid++) {
1762 0 : struct iwm_tx_ring *txq = &sc->txq[qid];
1763 :
1764 : /* Set physical address of TX ring (256-byte aligned). */
1765 0 : IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1766 : txq->desc_dma.paddr >> 8);
1767 : }
1768 :
1769 0 : iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1770 :
1771 0 : iwm_nic_unlock(sc);
1772 :
1773 0 : return 0;
1774 0 : }
1775 :
1776 : int
1777 0 : iwm_nic_init(struct iwm_softc *sc)
1778 : {
1779 : int err;
1780 :
1781 0 : iwm_apm_init(sc);
1782 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1783 0 : iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1784 : IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1785 : ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1786 :
1787 0 : iwm_nic_config(sc);
1788 :
1789 0 : err = iwm_nic_rx_init(sc);
1790 0 : if (err)
1791 0 : return err;
1792 :
1793 0 : err = iwm_nic_tx_init(sc);
1794 0 : if (err)
1795 0 : return err;
1796 :
1797 0 : IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1798 :
1799 0 : return 0;
1800 0 : }
1801 :
1802 : const uint8_t iwm_ac_to_tx_fifo[] = {
1803 : IWM_TX_FIFO_VO,
1804 : IWM_TX_FIFO_VI,
1805 : IWM_TX_FIFO_BE,
1806 : IWM_TX_FIFO_BK,
1807 : };
1808 :
1809 : int
1810 0 : iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1811 : {
1812 0 : iwm_nic_assert_locked(sc);
1813 :
1814 0 : IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1815 :
1816 0 : if (qid == IWM_CMD_QUEUE) {
1817 0 : iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1818 : (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1819 : | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1820 :
1821 0 : iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1822 :
1823 0 : iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1824 :
1825 0 : iwm_write_mem32(sc,
1826 0 : sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1827 :
1828 : /* Set scheduler window size and frame limit. */
1829 0 : iwm_write_mem32(sc,
1830 0 : sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1831 : sizeof(uint32_t),
1832 : ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1833 : IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1834 : ((IWM_FRAME_LIMIT
1835 : << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1836 : IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1837 :
1838 0 : iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1839 0 : (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1840 0 : (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1841 0 : (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1842 : IWM_SCD_QUEUE_STTS_REG_MSK);
1843 0 : } else {
1844 0 : struct iwm_scd_txq_cfg_cmd cmd;
1845 : int err;
1846 :
1847 0 : memset(&cmd, 0, sizeof(cmd));
1848 0 : cmd.scd_queue = qid;
1849 0 : cmd.enable = 1;
1850 0 : cmd.sta_id = sta_id;
1851 0 : cmd.tx_fifo = fifo;
1852 0 : cmd.aggregate = 0;
1853 0 : cmd.window = IWM_FRAME_LIMIT;
1854 :
1855 0 : err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
1856 : sizeof(cmd), &cmd);
1857 0 : if (err)
1858 0 : return err;
1859 0 : }
1860 :
1861 0 : iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1862 0 : iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1863 :
1864 0 : return 0;
1865 0 : }
1866 :
1867 : int
1868 0 : iwm_post_alive(struct iwm_softc *sc)
1869 : {
1870 : int nwords;
1871 : int err, chnl;
1872 : uint32_t base;
1873 :
1874 0 : if (!iwm_nic_lock(sc))
1875 0 : return EBUSY;
1876 :
1877 0 : base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1878 :
1879 0 : iwm_ict_reset(sc);
1880 :
1881 : /* Clear TX scheduler state in SRAM. */
1882 : nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1883 : IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1884 : / sizeof(uint32_t);
1885 0 : err = iwm_write_mem(sc,
1886 0 : sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1887 : NULL, nwords);
1888 0 : if (err)
1889 : goto out;
1890 :
1891 : /* Set physical address of TX scheduler rings (1KB aligned). */
1892 0 : iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1893 :
1894 0 : iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1895 :
1896 : /* enable command channel */
1897 0 : err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1898 0 : if (err)
1899 : goto out;
1900 :
1901 : /* Activate TX scheduler. */
1902 0 : iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1903 :
1904 : /* Enable DMA channels. */
1905 0 : for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1906 0 : IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1907 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1908 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1909 : }
1910 :
1911 0 : IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1912 : IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1913 :
1914 : /* Enable L1-Active */
1915 0 : if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1916 0 : iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1917 : IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1918 :
1919 : out:
1920 0 : iwm_nic_unlock(sc);
1921 0 : return err;
1922 0 : }
1923 :
1924 : struct iwm_phy_db_entry *
1925 0 : iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
1926 : {
1927 0 : struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1928 :
1929 0 : if (type >= IWM_PHY_DB_MAX)
1930 0 : return NULL;
1931 :
1932 0 : switch (type) {
1933 : case IWM_PHY_DB_CFG:
1934 0 : return &phy_db->cfg;
1935 : case IWM_PHY_DB_CALIB_NCH:
1936 0 : return &phy_db->calib_nch;
1937 : case IWM_PHY_DB_CALIB_CHG_PAPD:
1938 0 : if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1939 0 : return NULL;
1940 0 : return &phy_db->calib_ch_group_papd[chg_id];
1941 : case IWM_PHY_DB_CALIB_CHG_TXP:
1942 0 : if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1943 0 : return NULL;
1944 0 : return &phy_db->calib_ch_group_txp[chg_id];
1945 : default:
1946 0 : return NULL;
1947 : }
1948 : return NULL;
1949 0 : }
1950 :
1951 : int
1952 0 : iwm_phy_db_set_section(struct iwm_softc *sc,
1953 : struct iwm_calib_res_notif_phy_db *phy_db_notif)
1954 : {
1955 0 : uint16_t type = le16toh(phy_db_notif->type);
1956 0 : uint16_t size = le16toh(phy_db_notif->length);
1957 : struct iwm_phy_db_entry *entry;
1958 : uint16_t chg_id = 0;
1959 :
1960 0 : if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
1961 0 : type == IWM_PHY_DB_CALIB_CHG_TXP)
1962 0 : chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
1963 :
1964 0 : entry = iwm_phy_db_get_section(sc, type, chg_id);
1965 0 : if (!entry)
1966 0 : return EINVAL;
1967 :
1968 0 : if (entry->data)
1969 0 : free(entry->data, M_DEVBUF, entry->size);
1970 0 : entry->data = malloc(size, M_DEVBUF, M_NOWAIT);
1971 0 : if (!entry->data) {
1972 0 : entry->size = 0;
1973 0 : return ENOMEM;
1974 : }
1975 0 : memcpy(entry->data, phy_db_notif->data, size);
1976 0 : entry->size = size;
1977 :
1978 0 : return 0;
1979 0 : }
1980 :
1981 : int
1982 0 : iwm_is_valid_channel(uint16_t ch_id)
1983 : {
1984 0 : if (ch_id <= 14 ||
1985 0 : (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
1986 0 : (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
1987 0 : (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
1988 0 : return 1;
1989 0 : return 0;
1990 0 : }
1991 :
1992 : uint8_t
1993 0 : iwm_ch_id_to_ch_index(uint16_t ch_id)
1994 : {
1995 0 : if (!iwm_is_valid_channel(ch_id))
1996 0 : return 0xff;
1997 :
1998 0 : if (ch_id <= 14)
1999 0 : return ch_id - 1;
2000 0 : if (ch_id <= 64)
2001 0 : return (ch_id + 20) / 4;
2002 0 : if (ch_id <= 140)
2003 0 : return (ch_id - 12) / 4;
2004 0 : return (ch_id - 13) / 4;
2005 0 : }
2006 :
2007 :
2008 : uint16_t
2009 0 : iwm_channel_id_to_papd(uint16_t ch_id)
2010 : {
2011 0 : if (!iwm_is_valid_channel(ch_id))
2012 0 : return 0xff;
2013 :
2014 0 : if (1 <= ch_id && ch_id <= 14)
2015 0 : return 0;
2016 0 : if (36 <= ch_id && ch_id <= 64)
2017 0 : return 1;
2018 0 : if (100 <= ch_id && ch_id <= 140)
2019 0 : return 2;
2020 0 : return 3;
2021 0 : }
2022 :
2023 : uint16_t
2024 0 : iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2025 : {
2026 0 : struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2027 : struct iwm_phy_db_chg_txp *txp_chg;
2028 : int i;
2029 0 : uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2030 :
2031 0 : if (ch_index == 0xff)
2032 0 : return 0xff;
2033 :
2034 0 : for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2035 0 : txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2036 0 : if (!txp_chg)
2037 0 : return 0xff;
2038 : /*
2039 : * Looking for the first channel group the max channel
2040 : * of which is higher than the requested channel.
2041 : */
2042 0 : if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2043 0 : return i;
2044 : }
2045 0 : return 0xff;
2046 0 : }
2047 :
2048 : int
2049 0 : iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2050 : uint16_t *size, uint16_t ch_id)
2051 : {
2052 : struct iwm_phy_db_entry *entry;
2053 : uint16_t ch_group_id = 0;
2054 :
2055 0 : if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2056 0 : ch_group_id = iwm_channel_id_to_papd(ch_id);
2057 0 : else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2058 0 : ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2059 :
2060 0 : entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2061 0 : if (!entry)
2062 0 : return EINVAL;
2063 :
2064 0 : *data = entry->data;
2065 0 : *size = entry->size;
2066 :
2067 0 : return 0;
2068 0 : }
2069 :
2070 : int
2071 0 : iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2072 : void *data)
2073 : {
2074 0 : struct iwm_phy_db_cmd phy_db_cmd;
2075 0 : struct iwm_host_cmd cmd = {
2076 : .id = IWM_PHY_DB_CMD,
2077 : .flags = IWM_CMD_ASYNC,
2078 : };
2079 :
2080 0 : phy_db_cmd.type = le16toh(type);
2081 0 : phy_db_cmd.length = le16toh(length);
2082 :
2083 0 : cmd.data[0] = &phy_db_cmd;
2084 0 : cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2085 0 : cmd.data[1] = data;
2086 0 : cmd.len[1] = length;
2087 :
2088 0 : return iwm_send_cmd(sc, &cmd);
2089 0 : }
2090 :
2091 : int
2092 0 : iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2093 : uint8_t max_ch_groups)
2094 : {
2095 : uint16_t i;
2096 : int err;
2097 : struct iwm_phy_db_entry *entry;
2098 :
2099 0 : for (i = 0; i < max_ch_groups; i++) {
2100 0 : entry = iwm_phy_db_get_section(sc, type, i);
2101 0 : if (!entry)
2102 0 : return EINVAL;
2103 :
2104 0 : if (!entry->size)
2105 : continue;
2106 :
2107 0 : err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2108 0 : if (err)
2109 0 : return err;
2110 :
2111 0 : DELAY(1000);
2112 0 : }
2113 :
2114 0 : return 0;
2115 0 : }
2116 :
2117 : int
2118 0 : iwm_send_phy_db_data(struct iwm_softc *sc)
2119 : {
2120 0 : uint8_t *data = NULL;
2121 0 : uint16_t size = 0;
2122 : int err;
2123 :
2124 0 : err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2125 0 : if (err)
2126 0 : return err;
2127 :
2128 0 : err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2129 0 : if (err)
2130 0 : return err;
2131 :
2132 0 : err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2133 : &data, &size, 0);
2134 0 : if (err)
2135 0 : return err;
2136 :
2137 0 : err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2138 0 : if (err)
2139 0 : return err;
2140 :
2141 0 : err = iwm_phy_db_send_all_channel_groups(sc,
2142 : IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2143 0 : if (err)
2144 0 : return err;
2145 :
2146 0 : err = iwm_phy_db_send_all_channel_groups(sc,
2147 : IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2148 0 : if (err)
2149 0 : return err;
2150 :
2151 0 : return 0;
2152 0 : }
2153 :
2154 : /*
2155 : * For the high priority TE use a time event type that has similar priority to
2156 : * the FW's action scan priority.
2157 : */
2158 : #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2159 : #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2160 :
2161 : /* used to convert from time event API v2 to v1 */
2162 : #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2163 : IWM_TE_V2_EVENT_SOCIOPATHIC)
2164 : static inline uint16_t
2165 0 : iwm_te_v2_get_notify(uint16_t policy)
2166 : {
2167 0 : return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2168 : }
2169 :
2170 : static inline uint16_t
2171 0 : iwm_te_v2_get_dep_policy(uint16_t policy)
2172 : {
2173 0 : return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2174 : IWM_TE_V2_PLACEMENT_POS;
2175 : }
2176 :
2177 : static inline uint16_t
2178 0 : iwm_te_v2_get_absence(uint16_t policy)
2179 : {
2180 0 : return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2181 : }
2182 :
2183 : void
2184 0 : iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2185 : struct iwm_time_event_cmd_v1 *cmd_v1)
2186 : {
2187 0 : cmd_v1->id_and_color = cmd_v2->id_and_color;
2188 0 : cmd_v1->action = cmd_v2->action;
2189 0 : cmd_v1->id = cmd_v2->id;
2190 0 : cmd_v1->apply_time = cmd_v2->apply_time;
2191 0 : cmd_v1->max_delay = cmd_v2->max_delay;
2192 0 : cmd_v1->depends_on = cmd_v2->depends_on;
2193 0 : cmd_v1->interval = cmd_v2->interval;
2194 0 : cmd_v1->duration = cmd_v2->duration;
2195 0 : if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2196 0 : cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2197 : else
2198 0 : cmd_v1->repeat = htole32(cmd_v2->repeat);
2199 0 : cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2200 0 : cmd_v1->interval_reciprocal = 0; /* unused */
2201 :
2202 0 : cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2203 0 : cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2204 0 : cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2205 0 : }
2206 :
2207 : int
2208 0 : iwm_send_time_event_cmd(struct iwm_softc *sc,
2209 : const struct iwm_time_event_cmd_v2 *cmd)
2210 : {
2211 0 : struct iwm_time_event_cmd_v1 cmd_v1;
2212 : struct iwm_rx_packet *pkt;
2213 : struct iwm_time_event_resp *resp;
2214 0 : struct iwm_host_cmd hcmd = {
2215 : .id = IWM_TIME_EVENT_CMD,
2216 : .flags = IWM_CMD_WANT_RESP,
2217 : .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2218 : };
2219 : uint32_t resp_len;
2220 : int err;
2221 :
2222 0 : if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2) {
2223 0 : hcmd.data[0] = cmd;
2224 0 : hcmd.len[0] = sizeof(*cmd);
2225 0 : } else {
2226 0 : iwm_te_v2_to_v1(cmd, &cmd_v1);
2227 0 : hcmd.data[0] = &cmd_v1;
2228 0 : hcmd.len[0] = sizeof(cmd_v1);
2229 : }
2230 0 : err = iwm_send_cmd(sc, &hcmd);
2231 0 : if (err)
2232 0 : return err;
2233 :
2234 0 : pkt = hcmd.resp_pkt;
2235 0 : if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
2236 : err = EIO;
2237 0 : goto out;
2238 : }
2239 :
2240 0 : resp_len = iwm_rx_packet_payload_len(pkt);
2241 0 : if (resp_len != sizeof(*resp)) {
2242 : err = EIO;
2243 0 : goto out;
2244 : }
2245 :
2246 0 : resp = (void *)pkt->data;
2247 0 : if (le32toh(resp->status) == 0)
2248 0 : sc->sc_time_event_uid = le32toh(resp->unique_id);
2249 : else
2250 : err = EIO;
2251 : out:
2252 0 : iwm_free_resp(sc, &hcmd);
2253 0 : return err;
2254 0 : }
2255 :
2256 : void
2257 0 : iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2258 : uint32_t duration, uint32_t max_delay)
2259 : {
2260 0 : struct iwm_time_event_cmd_v2 time_cmd;
2261 :
2262 : /* Do nothing if a time event is already scheduled. */
2263 0 : if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2264 0 : return;
2265 :
2266 0 : memset(&time_cmd, 0, sizeof(time_cmd));
2267 :
2268 0 : time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2269 0 : time_cmd.id_and_color =
2270 0 : htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2271 0 : time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2272 :
2273 0 : time_cmd.apply_time = htole32(0);
2274 :
2275 0 : time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2276 0 : time_cmd.max_delay = htole32(max_delay);
2277 : /* TODO: why do we need to interval = bi if it is not periodic? */
2278 0 : time_cmd.interval = htole32(1);
2279 0 : time_cmd.duration = htole32(duration);
2280 0 : time_cmd.repeat = 1;
2281 0 : time_cmd.policy
2282 0 : = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2283 : IWM_TE_V2_NOTIF_HOST_EVENT_END |
2284 : IWM_T2_V2_START_IMMEDIATELY);
2285 :
2286 0 : if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2287 0 : sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2288 :
2289 0 : DELAY(100);
2290 0 : }
2291 :
2292 : void
2293 0 : iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2294 : {
2295 0 : struct iwm_time_event_cmd_v2 time_cmd;
2296 :
2297 : /* Do nothing if the time event has already ended. */
2298 0 : if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2299 0 : return;
2300 :
2301 0 : memset(&time_cmd, 0, sizeof(time_cmd));
2302 :
2303 0 : time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
2304 0 : time_cmd.id_and_color =
2305 0 : htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2306 0 : time_cmd.id = htole32(sc->sc_time_event_uid);
2307 :
2308 0 : if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2309 0 : sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2310 :
2311 0 : DELAY(100);
2312 0 : }
2313 :
2314 : /*
2315 : * NVM read access and content parsing. We do not support
2316 : * external NVM or writing NVM.
2317 : */
2318 :
2319 : /* list of NVM sections we are allowed/need to read */
2320 : const int iwm_nvm_to_read[] = {
2321 : IWM_NVM_SECTION_TYPE_HW,
2322 : IWM_NVM_SECTION_TYPE_SW,
2323 : IWM_NVM_SECTION_TYPE_REGULATORY,
2324 : IWM_NVM_SECTION_TYPE_CALIBRATION,
2325 : IWM_NVM_SECTION_TYPE_PRODUCTION,
2326 : IWM_NVM_SECTION_TYPE_HW_8000,
2327 : IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2328 : IWM_NVM_SECTION_TYPE_PHY_SKU,
2329 : };
2330 :
2331 : #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2332 : #define IWM_MAX_NVM_SECTION_SIZE 8192
2333 :
2334 : #define IWM_NVM_WRITE_OPCODE 1
2335 : #define IWM_NVM_READ_OPCODE 0
2336 :
2337 : int
2338 0 : iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2339 : uint16_t length, uint8_t *data, uint16_t *len)
2340 : {
2341 : offset = 0;
2342 0 : struct iwm_nvm_access_cmd nvm_access_cmd = {
2343 : .offset = htole16(offset),
2344 : .length = htole16(length),
2345 : .type = htole16(section),
2346 : .op_code = IWM_NVM_READ_OPCODE,
2347 : };
2348 : struct iwm_nvm_access_resp *nvm_resp;
2349 : struct iwm_rx_packet *pkt;
2350 0 : struct iwm_host_cmd cmd = {
2351 : .id = IWM_NVM_ACCESS_CMD,
2352 : .flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
2353 : .resp_pkt_len = IWM_CMD_RESP_MAX,
2354 0 : .data = { &nvm_access_cmd, },
2355 : };
2356 : int err, offset_read;
2357 : size_t bytes_read;
2358 : uint8_t *resp_data;
2359 :
2360 0 : cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2361 :
2362 0 : err = iwm_send_cmd(sc, &cmd);
2363 0 : if (err)
2364 0 : return err;
2365 :
2366 0 : pkt = cmd.resp_pkt;
2367 0 : if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2368 : err = EIO;
2369 0 : goto exit;
2370 : }
2371 :
2372 : /* Extract NVM response */
2373 0 : nvm_resp = (void *)pkt->data;
2374 0 : if (nvm_resp == NULL)
2375 0 : return EIO;
2376 :
2377 0 : err = le16toh(nvm_resp->status);
2378 0 : bytes_read = le16toh(nvm_resp->length);
2379 0 : offset_read = le16toh(nvm_resp->offset);
2380 0 : resp_data = nvm_resp->data;
2381 0 : if (err) {
2382 : err = EINVAL;
2383 0 : goto exit;
2384 : }
2385 :
2386 0 : if (offset_read != offset) {
2387 : err = EINVAL;
2388 0 : goto exit;
2389 : }
2390 :
2391 0 : if (bytes_read > length) {
2392 : err = EINVAL;
2393 0 : goto exit;
2394 : }
2395 :
2396 0 : memcpy(data + offset, resp_data, bytes_read);
2397 0 : *len = bytes_read;
2398 :
2399 : exit:
2400 0 : iwm_free_resp(sc, &cmd);
2401 0 : return err;
2402 0 : }
2403 :
2404 : /*
2405 : * Reads an NVM section completely.
2406 : * NICs prior to 7000 family doesn't have a real NVM, but just read
2407 : * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2408 : * by uCode, we need to manually check in this case that we don't
2409 : * overflow and try to read more than the EEPROM size.
2410 : */
2411 : int
2412 0 : iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2413 : uint16_t *len, size_t max_len)
2414 : {
2415 0 : uint16_t chunklen, seglen;
2416 : int err = 0;
2417 :
2418 0 : chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2419 0 : *len = 0;
2420 :
2421 : /* Read NVM chunks until exhausted (reading less than requested) */
2422 0 : while (seglen == chunklen && *len < max_len) {
2423 0 : err = iwm_nvm_read_chunk(sc,
2424 : section, *len, chunklen, data, &seglen);
2425 0 : if (err)
2426 0 : return err;
2427 :
2428 0 : *len += seglen;
2429 : }
2430 :
2431 0 : return err;
2432 0 : }
2433 :
2434 : uint8_t
2435 0 : iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2436 : {
2437 : uint8_t tx_ant;
2438 :
2439 0 : tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2440 0 : >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2441 :
2442 0 : if (sc->sc_nvm.valid_tx_ant)
2443 0 : tx_ant &= sc->sc_nvm.valid_tx_ant;
2444 :
2445 0 : return tx_ant;
2446 : }
2447 :
2448 : uint8_t
2449 0 : iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2450 : {
2451 : uint8_t rx_ant;
2452 :
2453 0 : rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2454 0 : >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2455 :
2456 0 : if (sc->sc_nvm.valid_rx_ant)
2457 0 : rx_ant &= sc->sc_nvm.valid_rx_ant;
2458 :
2459 0 : return rx_ant;
2460 : }
2461 :
2462 : void
2463 0 : iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2464 : const uint8_t *nvm_channels, size_t nchan)
2465 : {
2466 0 : struct ieee80211com *ic = &sc->sc_ic;
2467 0 : struct iwm_nvm_data *data = &sc->sc_nvm;
2468 : int ch_idx;
2469 : struct ieee80211_channel *channel;
2470 : uint16_t ch_flags;
2471 : int is_5ghz;
2472 : int flags, hw_value;
2473 :
2474 0 : for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2475 0 : ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2476 :
2477 0 : if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2478 0 : !data->sku_cap_band_52GHz_enable)
2479 0 : ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2480 :
2481 0 : if (!(ch_flags & IWM_NVM_CHANNEL_VALID))
2482 : continue;
2483 :
2484 0 : hw_value = nvm_channels[ch_idx];
2485 0 : channel = &ic->ic_channels[hw_value];
2486 :
2487 0 : is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2488 0 : if (!is_5ghz) {
2489 : flags = IEEE80211_CHAN_2GHZ;
2490 0 : channel->ic_flags
2491 0 : = IEEE80211_CHAN_CCK
2492 : | IEEE80211_CHAN_OFDM
2493 : | IEEE80211_CHAN_DYN
2494 : | IEEE80211_CHAN_2GHZ;
2495 0 : } else {
2496 : flags = IEEE80211_CHAN_5GHZ;
2497 0 : channel->ic_flags =
2498 : IEEE80211_CHAN_A;
2499 : }
2500 0 : channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2501 :
2502 0 : if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2503 0 : channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2504 :
2505 0 : if (data->sku_cap_11n_enable)
2506 0 : channel->ic_flags |= IEEE80211_CHAN_HT;
2507 : }
2508 0 : }
2509 :
2510 : void
2511 0 : iwm_setup_ht_rates(struct iwm_softc *sc)
2512 : {
2513 0 : struct ieee80211com *ic = &sc->sc_ic;
2514 : uint8_t rx_ant;
2515 :
2516 : /* TX is supported with the same MCS as RX. */
2517 0 : ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2518 :
2519 0 : ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2520 :
2521 0 : if (sc->sc_nvm.sku_cap_mimo_disable)
2522 0 : return;
2523 :
2524 0 : rx_ant = iwm_fw_valid_rx_ant(sc);
2525 0 : if ((rx_ant & IWM_ANT_AB) == IWM_ANT_AB ||
2526 0 : (rx_ant & IWM_ANT_BC) == IWM_ANT_BC)
2527 0 : ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2528 0 : }
2529 :
2530 : #define IWM_MAX_RX_BA_SESSIONS 16
2531 :
2532 : void
2533 0 : iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2534 : uint16_t ssn, int start)
2535 : {
2536 0 : struct ieee80211com *ic = &sc->sc_ic;
2537 0 : struct iwm_add_sta_cmd_v7 cmd;
2538 0 : struct iwm_node *in = (void *)ni;
2539 : int err, s;
2540 0 : uint32_t status;
2541 :
2542 0 : if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2543 0 : ieee80211_addba_req_refuse(ic, ni, tid);
2544 0 : return;
2545 : }
2546 :
2547 0 : memset(&cmd, 0, sizeof(cmd));
2548 :
2549 0 : cmd.sta_id = IWM_STATION_ID;
2550 0 : cmd.mac_id_n_color
2551 0 : = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2552 0 : cmd.add_modify = IWM_STA_MODE_MODIFY;
2553 :
2554 0 : if (start) {
2555 0 : cmd.add_immediate_ba_tid = (uint8_t)tid;
2556 0 : cmd.add_immediate_ba_ssn = ssn;
2557 0 : } else {
2558 0 : cmd.remove_immediate_ba_tid = (uint8_t)tid;
2559 : }
2560 0 : cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2561 : IWM_STA_MODIFY_REMOVE_BA_TID;
2562 :
2563 0 : status = IWM_ADD_STA_SUCCESS;
2564 0 : err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2565 : &status);
2566 :
2567 0 : s = splnet();
2568 0 : if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2569 0 : if (start) {
2570 0 : sc->sc_rx_ba_sessions++;
2571 0 : ieee80211_addba_req_accept(ic, ni, tid);
2572 0 : } else if (sc->sc_rx_ba_sessions > 0)
2573 0 : sc->sc_rx_ba_sessions--;
2574 0 : } else if (start)
2575 0 : ieee80211_addba_req_refuse(ic, ni, tid);
2576 :
2577 0 : splx(s);
2578 0 : }
2579 :
2580 : void
2581 0 : iwm_htprot_task(void *arg)
2582 : {
2583 0 : struct iwm_softc *sc = arg;
2584 0 : struct ieee80211com *ic = &sc->sc_ic;
2585 0 : struct iwm_node *in = (void *)ic->ic_bss;
2586 0 : int err, s = splnet();
2587 :
2588 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2589 0 : refcnt_rele_wake(&sc->task_refs);
2590 0 : splx(s);
2591 0 : return;
2592 : }
2593 :
2594 : /* This call updates HT protection based on in->in_ni.ni_htop1. */
2595 0 : err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2596 0 : if (err)
2597 0 : printf("%s: could not change HT protection: error %d\n",
2598 0 : DEVNAME(sc), err);
2599 :
2600 0 : refcnt_rele_wake(&sc->task_refs);
2601 0 : splx(s);
2602 0 : }
2603 :
2604 : /*
2605 : * This function is called by upper layer when HT protection settings in
2606 : * beacons have changed.
2607 : */
2608 : void
2609 0 : iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2610 : {
2611 0 : struct iwm_softc *sc = ic->ic_softc;
2612 :
2613 : /* assumes that ni == ic->ic_bss */
2614 0 : iwm_add_task(sc, systq, &sc->htprot_task);
2615 0 : }
2616 :
2617 : void
2618 0 : iwm_ba_task(void *arg)
2619 : {
2620 0 : struct iwm_softc *sc = arg;
2621 0 : struct ieee80211com *ic = &sc->sc_ic;
2622 0 : struct ieee80211_node *ni = ic->ic_bss;
2623 0 : int s = splnet();
2624 :
2625 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
2626 0 : refcnt_rele_wake(&sc->task_refs);
2627 0 : splx(s);
2628 0 : return;
2629 : }
2630 :
2631 0 : if (sc->ba_start)
2632 0 : iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2633 : else
2634 0 : iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2635 :
2636 0 : refcnt_rele_wake(&sc->task_refs);
2637 0 : splx(s);
2638 0 : }
2639 :
2640 : /*
2641 : * This function is called by upper layer when an ADDBA request is received
2642 : * from another STA and before the ADDBA response is sent.
2643 : */
2644 : int
2645 0 : iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2646 : uint8_t tid)
2647 : {
2648 0 : struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2649 0 : struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2650 :
2651 0 : if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2652 0 : return ENOSPC;
2653 :
2654 0 : sc->ba_start = 1;
2655 0 : sc->ba_tid = tid;
2656 0 : sc->ba_ssn = htole16(ba->ba_winstart);
2657 0 : iwm_add_task(sc, systq, &sc->ba_task);
2658 :
2659 0 : return EBUSY;
2660 0 : }
2661 :
2662 : /*
2663 : * This function is called by upper layer on teardown of an HT-immediate
2664 : * Block Ack agreement (eg. upon receipt of a DELBA frame).
2665 : */
2666 : void
2667 0 : iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2668 : uint8_t tid)
2669 : {
2670 0 : struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2671 :
2672 0 : sc->ba_start = 0;
2673 0 : sc->ba_tid = tid;
2674 0 : iwm_add_task(sc, systq, &sc->ba_task);
2675 0 : }
2676 :
2677 : void
2678 0 : iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2679 : const uint16_t *mac_override, const uint16_t *nvm_hw)
2680 : {
2681 : const uint8_t *hw_addr;
2682 :
2683 0 : if (mac_override) {
2684 : static const uint8_t reserved_mac[] = {
2685 : 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2686 : };
2687 :
2688 0 : hw_addr = (const uint8_t *)(mac_override +
2689 : IWM_MAC_ADDRESS_OVERRIDE_8000);
2690 :
2691 : /*
2692 : * Store the MAC address from MAO section.
2693 : * No byte swapping is required in MAO section
2694 : */
2695 0 : memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2696 :
2697 : /*
2698 : * Force the use of the OTP MAC address in case of reserved MAC
2699 : * address in the NVM, or if address is given but invalid.
2700 : */
2701 0 : if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2702 0 : (memcmp(etherbroadcastaddr, data->hw_addr,
2703 0 : sizeof(etherbroadcastaddr)) != 0) &&
2704 0 : (memcmp(etheranyaddr, data->hw_addr,
2705 0 : sizeof(etheranyaddr)) != 0) &&
2706 0 : !ETHER_IS_MULTICAST(data->hw_addr))
2707 0 : return;
2708 : }
2709 :
2710 0 : if (nvm_hw) {
2711 : /* Read the mac address from WFMP registers. */
2712 : uint32_t mac_addr0, mac_addr1;
2713 :
2714 0 : if (!iwm_nic_lock(sc))
2715 0 : goto out;
2716 0 : mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2717 0 : mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2718 0 : iwm_nic_unlock(sc);
2719 :
2720 : hw_addr = (const uint8_t *)&mac_addr0;
2721 0 : data->hw_addr[0] = hw_addr[3];
2722 0 : data->hw_addr[1] = hw_addr[2];
2723 0 : data->hw_addr[2] = hw_addr[1];
2724 0 : data->hw_addr[3] = hw_addr[0];
2725 :
2726 : hw_addr = (const uint8_t *)&mac_addr1;
2727 0 : data->hw_addr[4] = hw_addr[1];
2728 0 : data->hw_addr[5] = hw_addr[0];
2729 :
2730 0 : return;
2731 : }
2732 : out:
2733 0 : printf("%s: mac address not found\n", DEVNAME(sc));
2734 0 : memset(data->hw_addr, 0, sizeof(data->hw_addr));
2735 0 : }
2736 :
2737 : int
2738 0 : iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2739 : const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2740 : const uint16_t *mac_override, const uint16_t *phy_sku,
2741 : const uint16_t *regulatory)
2742 : {
2743 0 : struct iwm_nvm_data *data = &sc->sc_nvm;
2744 : uint8_t hw_addr[ETHER_ADDR_LEN];
2745 : uint32_t sku;
2746 :
2747 0 : data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2748 :
2749 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2750 0 : uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2751 0 : data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2752 0 : data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2753 0 : data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2754 0 : data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2755 :
2756 0 : sku = le16_to_cpup(nvm_sw + IWM_SKU);
2757 0 : } else {
2758 : uint32_t radio_cfg =
2759 0 : le32_to_cpup((uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2760 0 : data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2761 0 : data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2762 0 : data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2763 0 : data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2764 0 : data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2765 0 : data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2766 :
2767 0 : sku = le32_to_cpup((uint32_t *)(phy_sku + IWM_SKU_8000));
2768 : }
2769 :
2770 0 : data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2771 0 : data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2772 0 : data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2773 0 : data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2774 :
2775 0 : data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2776 :
2777 : /* The byte order is little endian 16 bit, meaning 214365 */
2778 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2779 0 : memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2780 0 : data->hw_addr[0] = hw_addr[1];
2781 0 : data->hw_addr[1] = hw_addr[0];
2782 0 : data->hw_addr[2] = hw_addr[3];
2783 0 : data->hw_addr[3] = hw_addr[2];
2784 0 : data->hw_addr[4] = hw_addr[5];
2785 0 : data->hw_addr[5] = hw_addr[4];
2786 0 : } else
2787 0 : iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2788 :
2789 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2790 0 : iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2791 : iwm_nvm_channels, nitems(iwm_nvm_channels));
2792 : else
2793 0 : iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2794 : iwm_nvm_channels_8000, nitems(iwm_nvm_channels_8000));
2795 :
2796 0 : data->calib_version = 255; /* TODO:
2797 : this value will prevent some checks from
2798 : failing, we need to check if this
2799 : field is still needed, and if it does,
2800 : where is it in the NVM */
2801 :
2802 0 : return 0;
2803 : }
2804 :
2805 : int
2806 0 : iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2807 : {
2808 : const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2809 : const uint16_t *regulatory = NULL;
2810 :
2811 : /* Checking for required sections */
2812 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2813 0 : if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2814 0 : !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2815 0 : return ENOENT;
2816 : }
2817 :
2818 0 : hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2819 0 : } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2820 : /* SW and REGULATORY sections are mandatory */
2821 0 : if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2822 0 : !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2823 0 : return ENOENT;
2824 : }
2825 : /* MAC_OVERRIDE or at least HW section must exist */
2826 0 : if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2827 0 : !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2828 0 : return ENOENT;
2829 : }
2830 :
2831 : /* PHY_SKU section is mandatory in B0 */
2832 0 : if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2833 0 : return ENOENT;
2834 : }
2835 :
2836 0 : regulatory = (const uint16_t *)
2837 0 : sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2838 0 : hw = (const uint16_t *)
2839 0 : sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2840 : mac_override =
2841 0 : (const uint16_t *)
2842 0 : sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2843 0 : phy_sku = (const uint16_t *)
2844 : sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2845 : } else {
2846 0 : panic("unknown device family %d\n", sc->sc_device_family);
2847 : }
2848 :
2849 0 : sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2850 0 : calib = (const uint16_t *)
2851 0 : sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2852 :
2853 0 : return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2854 : phy_sku, regulatory);
2855 0 : }
2856 :
2857 : int
2858 0 : iwm_nvm_init(struct iwm_softc *sc)
2859 : {
2860 0 : struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2861 : int i, section, err;
2862 0 : uint16_t len;
2863 : uint8_t *buf;
2864 : const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2865 :
2866 0 : memset(nvm_sections, 0, sizeof(nvm_sections));
2867 :
2868 0 : buf = malloc(bufsz, M_DEVBUF, M_WAIT);
2869 0 : if (buf == NULL)
2870 0 : return ENOMEM;
2871 :
2872 0 : for (i = 0; i < nitems(iwm_nvm_to_read); i++) {
2873 0 : section = iwm_nvm_to_read[i];
2874 0 : KASSERT(section <= nitems(nvm_sections));
2875 :
2876 0 : err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2877 0 : if (err) {
2878 : err = 0;
2879 0 : continue;
2880 : }
2881 0 : nvm_sections[section].data = malloc(len, M_DEVBUF, M_WAIT);
2882 0 : if (nvm_sections[section].data == NULL) {
2883 : err = ENOMEM;
2884 0 : break;
2885 : }
2886 0 : memcpy(nvm_sections[section].data, buf, len);
2887 0 : nvm_sections[section].length = len;
2888 0 : }
2889 0 : free(buf, M_DEVBUF, bufsz);
2890 0 : if (err == 0)
2891 0 : err = iwm_parse_nvm_sections(sc, nvm_sections);
2892 :
2893 0 : for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2894 0 : if (nvm_sections[i].data != NULL)
2895 0 : free(nvm_sections[i].data, M_DEVBUF,
2896 0 : nvm_sections[i].length);
2897 : }
2898 :
2899 0 : return err;
2900 0 : }
2901 :
2902 : int
2903 0 : iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2904 : const uint8_t *section, uint32_t byte_cnt)
2905 : {
2906 : int err = EINVAL;
2907 : uint32_t chunk_sz, offset;
2908 :
2909 0 : chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2910 :
2911 0 : for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2912 : uint32_t addr, len;
2913 : const uint8_t *data;
2914 :
2915 0 : addr = dst_addr + offset;
2916 0 : len = MIN(chunk_sz, byte_cnt - offset);
2917 0 : data = section + offset;
2918 :
2919 0 : err = iwm_firmware_load_chunk(sc, addr, data, len);
2920 0 : if (err)
2921 0 : break;
2922 0 : }
2923 :
2924 0 : return err;
2925 : }
2926 :
2927 : int
2928 0 : iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2929 : const uint8_t *chunk, uint32_t byte_cnt)
2930 : {
2931 0 : struct iwm_dma_info *dma = &sc->fw_dma;
2932 : int err;
2933 :
2934 : /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2935 0 : memcpy(dma->vaddr, chunk, byte_cnt);
2936 0 : bus_dmamap_sync(sc->sc_dmat,
2937 : dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2938 :
2939 0 : if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2940 0 : dst_addr <= IWM_FW_MEM_EXTENDED_END)
2941 0 : iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2942 : IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2943 :
2944 0 : sc->sc_fw_chunk_done = 0;
2945 :
2946 0 : if (!iwm_nic_lock(sc))
2947 0 : return EBUSY;
2948 :
2949 0 : IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2950 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2951 0 : IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2952 : dst_addr);
2953 0 : IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2954 : dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2955 0 : IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2956 : (iwm_get_dma_hi_addr(dma->paddr)
2957 : << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2958 0 : IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2959 : 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2960 : 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2961 : IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2962 0 : IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2963 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2964 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2965 : IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2966 :
2967 0 : iwm_nic_unlock(sc);
2968 :
2969 : /* Wait for this segment to load. */
2970 : err = 0;
2971 0 : while (!sc->sc_fw_chunk_done) {
2972 0 : err = tsleep(&sc->sc_fw, 0, "iwmfw", hz);
2973 0 : if (err)
2974 : break;
2975 : }
2976 :
2977 0 : if (!sc->sc_fw_chunk_done)
2978 0 : printf("%s: fw chunk addr 0x%x len %d failed to load\n",
2979 0 : DEVNAME(sc), dst_addr, byte_cnt);
2980 :
2981 0 : if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2982 : dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2983 0 : iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2984 : IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2985 0 : }
2986 :
2987 0 : return err;
2988 0 : }
2989 :
2990 : int
2991 0 : iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2992 : {
2993 : struct iwm_fw_sects *fws;
2994 : int err, i;
2995 : void *data;
2996 : uint32_t dlen;
2997 : uint32_t offset;
2998 :
2999 0 : fws = &sc->sc_fw.fw_sects[ucode_type];
3000 0 : for (i = 0; i < fws->fw_count; i++) {
3001 0 : data = fws->fw_sect[i].fws_data;
3002 0 : dlen = fws->fw_sect[i].fws_len;
3003 0 : offset = fws->fw_sect[i].fws_devoff;
3004 0 : if (dlen > sc->sc_fwdmasegsz) {
3005 : err = EFBIG;
3006 0 : } else
3007 0 : err = iwm_firmware_load_sect(sc, offset, data, dlen);
3008 0 : if (err) {
3009 0 : printf("%s: could not load firmware chunk %u of %u\n",
3010 0 : DEVNAME(sc), i, fws->fw_count);
3011 0 : return err;
3012 : }
3013 : }
3014 :
3015 0 : IWM_WRITE(sc, IWM_CSR_RESET, 0);
3016 :
3017 0 : return 0;
3018 0 : }
3019 :
3020 : int
3021 0 : iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3022 : int cpu, int *first_ucode_section)
3023 : {
3024 : int shift_param;
3025 : int i, err = 0, sec_num = 0x1;
3026 : uint32_t val, last_read_idx = 0;
3027 : void *data;
3028 : uint32_t dlen;
3029 : uint32_t offset;
3030 :
3031 0 : if (cpu == 1) {
3032 : shift_param = 0;
3033 0 : *first_ucode_section = 0;
3034 0 : } else {
3035 : shift_param = 16;
3036 0 : (*first_ucode_section)++;
3037 : }
3038 :
3039 0 : for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3040 : last_read_idx = i;
3041 0 : data = fws->fw_sect[i].fws_data;
3042 0 : dlen = fws->fw_sect[i].fws_len;
3043 0 : offset = fws->fw_sect[i].fws_devoff;
3044 :
3045 : /*
3046 : * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3047 : * CPU1 to CPU2.
3048 : * PAGING_SEPARATOR_SECTION delimiter - separate between
3049 : * CPU2 non paged to CPU2 paging sec.
3050 : */
3051 0 : if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3052 0 : offset == IWM_PAGING_SEPARATOR_SECTION)
3053 : break;
3054 :
3055 0 : if (dlen > sc->sc_fwdmasegsz) {
3056 : err = EFBIG;
3057 0 : } else
3058 0 : err = iwm_firmware_load_sect(sc, offset, data, dlen);
3059 0 : if (err) {
3060 0 : printf("%s: could not load firmware chunk %d "
3061 0 : "(error %d)\n", DEVNAME(sc), i, err);
3062 0 : return err;
3063 : }
3064 :
3065 : /* Notify the ucode of the loaded section number and status */
3066 0 : if (iwm_nic_lock(sc)) {
3067 0 : val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3068 0 : val = val | (sec_num << shift_param);
3069 0 : IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3070 0 : sec_num = (sec_num << 1) | 0x1;
3071 0 : iwm_nic_unlock(sc);
3072 : } else {
3073 : err = EBUSY;
3074 0 : printf("%s: could not load firmware chunk %d "
3075 0 : "(error %d)\n", DEVNAME(sc), i, err);
3076 0 : return err;
3077 : }
3078 : }
3079 :
3080 0 : *first_ucode_section = last_read_idx;
3081 :
3082 0 : if (iwm_nic_lock(sc)) {
3083 0 : if (cpu == 1)
3084 0 : IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3085 : else
3086 0 : IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3087 0 : iwm_nic_unlock(sc);
3088 : } else {
3089 : err = EBUSY;
3090 0 : printf("%s: could not finalize firmware loading (error %d)\n",
3091 0 : DEVNAME(sc), err);
3092 0 : return err;
3093 : }
3094 :
3095 0 : return 0;
3096 0 : }
3097 :
3098 : int
3099 0 : iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3100 : {
3101 : struct iwm_fw_sects *fws;
3102 : int err = 0;
3103 0 : int first_ucode_section;
3104 :
3105 0 : fws = &sc->sc_fw.fw_sects[ucode_type];
3106 :
3107 : /* configure the ucode to be ready to get the secured image */
3108 : /* release CPU reset */
3109 0 : if (iwm_nic_lock(sc)) {
3110 0 : iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3111 : IWM_RELEASE_CPU_RESET_BIT);
3112 0 : iwm_nic_unlock(sc);
3113 0 : }
3114 :
3115 : /* load to FW the binary Secured sections of CPU1 */
3116 0 : err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3117 0 : if (err)
3118 0 : return err;
3119 :
3120 : /* load to FW the binary sections of CPU2 */
3121 0 : return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3122 0 : }
3123 :
3124 : int
3125 0 : iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3126 : {
3127 : int err, w;
3128 :
3129 0 : sc->sc_uc.uc_intr = 0;
3130 :
3131 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3132 0 : err = iwm_load_firmware_8000(sc, ucode_type);
3133 : else
3134 0 : err = iwm_load_firmware_7000(sc, ucode_type);
3135 :
3136 0 : if (err)
3137 0 : return err;
3138 :
3139 : /* wait for the firmware to load */
3140 0 : for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
3141 0 : err = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
3142 : }
3143 0 : if (err || !sc->sc_uc.uc_ok)
3144 0 : printf("%s: could not load firmware\n", DEVNAME(sc));
3145 :
3146 0 : return err;
3147 0 : }
3148 :
3149 : int
3150 0 : iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3151 : {
3152 : int err;
3153 :
3154 0 : IWM_WRITE(sc, IWM_CSR_INT, ~0);
3155 :
3156 0 : err = iwm_nic_init(sc);
3157 0 : if (err) {
3158 0 : printf("%s: unable to init nic\n", DEVNAME(sc));
3159 0 : return err;
3160 : }
3161 :
3162 : /* make sure rfkill handshake bits are cleared */
3163 0 : IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3164 0 : IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3165 : IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3166 :
3167 : /* clear (again), then enable host interrupts */
3168 0 : IWM_WRITE(sc, IWM_CSR_INT, ~0);
3169 0 : iwm_enable_interrupts(sc);
3170 :
3171 : /* really make sure rfkill handshake bits are cleared */
3172 : /* maybe we should write a few times more? just to make sure */
3173 0 : IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3174 0 : IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3175 :
3176 0 : return iwm_load_firmware(sc, ucode_type);
3177 0 : }
3178 :
3179 : int
3180 0 : iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3181 : {
3182 0 : struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3183 0 : .valid = htole32(valid_tx_ant),
3184 : };
3185 :
3186 0 : return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
3187 : 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
3188 0 : }
3189 :
3190 : int
3191 0 : iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3192 : {
3193 0 : struct iwm_phy_cfg_cmd phy_cfg_cmd;
3194 0 : enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3195 :
3196 0 : phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3197 0 : phy_cfg_cmd.calib_control.event_trigger =
3198 0 : sc->sc_default_calib[ucode_type].event_trigger;
3199 0 : phy_cfg_cmd.calib_control.flow_trigger =
3200 0 : sc->sc_default_calib[ucode_type].flow_trigger;
3201 :
3202 0 : return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3203 : sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3204 0 : }
3205 :
3206 : int
3207 0 : iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3208 : enum iwm_ucode_type ucode_type)
3209 : {
3210 0 : enum iwm_ucode_type old_type = sc->sc_uc_current;
3211 : int err;
3212 :
3213 0 : err = iwm_read_firmware(sc, ucode_type);
3214 0 : if (err)
3215 0 : return err;
3216 :
3217 0 : sc->sc_uc_current = ucode_type;
3218 0 : err = iwm_start_fw(sc, ucode_type);
3219 0 : if (err) {
3220 0 : sc->sc_uc_current = old_type;
3221 0 : return err;
3222 : }
3223 :
3224 0 : return iwm_post_alive(sc);
3225 0 : }
3226 :
3227 : int
3228 0 : iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3229 : {
3230 : const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
3231 : int err;
3232 :
3233 0 : if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3234 0 : printf("%s: radio is disabled by hardware switch\n",
3235 0 : DEVNAME(sc));
3236 0 : return EPERM;
3237 : }
3238 :
3239 0 : sc->sc_init_complete = 0;
3240 0 : err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3241 0 : if (err) {
3242 0 : printf("%s: failed to load init firmware\n", DEVNAME(sc));
3243 0 : return err;
3244 : }
3245 :
3246 0 : if (justnvm) {
3247 0 : err = iwm_nvm_init(sc);
3248 0 : if (err) {
3249 0 : printf("%s: failed to read nvm\n", DEVNAME(sc));
3250 0 : return err;
3251 : }
3252 :
3253 0 : if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
3254 0 : IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
3255 : sc->sc_nvm.hw_addr);
3256 :
3257 0 : return 0;
3258 : }
3259 :
3260 0 : err = iwm_send_bt_init_conf(sc);
3261 0 : if (err)
3262 0 : return err;
3263 :
3264 0 : err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3265 0 : if (err)
3266 0 : return err;
3267 :
3268 : /* Send TX valid antennas before triggering calibrations */
3269 0 : err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3270 0 : if (err)
3271 0 : return err;
3272 :
3273 : /*
3274 : * Send phy configurations command to init uCode
3275 : * to start the 16.0 uCode init image internal calibrations.
3276 : */
3277 0 : err = iwm_send_phy_cfg_cmd(sc);
3278 0 : if (err)
3279 0 : return err;
3280 :
3281 : /*
3282 : * Nothing to do but wait for the init complete and phy DB
3283 : * notifications from the firmware.
3284 : */
3285 0 : while ((sc->sc_init_complete & wait_flags) != wait_flags) {
3286 0 : err = tsleep(&sc->sc_init_complete, 0, "iwminit", 2*hz);
3287 0 : if (err)
3288 : break;
3289 : }
3290 :
3291 0 : return err;
3292 0 : }
3293 :
3294 : int
3295 0 : iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3296 : {
3297 0 : struct iwm_rx_ring *ring = &sc->rxq;
3298 0 : struct iwm_rx_data *data = &ring->data[idx];
3299 : struct mbuf *m;
3300 : int err;
3301 : int fatal = 0;
3302 :
3303 0 : m = m_gethdr(M_DONTWAIT, MT_DATA);
3304 0 : if (m == NULL)
3305 0 : return ENOBUFS;
3306 :
3307 0 : if (size <= MCLBYTES) {
3308 0 : MCLGET(m, M_DONTWAIT);
3309 0 : } else {
3310 0 : MCLGETI(m, M_DONTWAIT, NULL, IWM_RBUF_SIZE);
3311 : }
3312 0 : if ((m->m_flags & M_EXT) == 0) {
3313 0 : m_freem(m);
3314 0 : return ENOBUFS;
3315 : }
3316 :
3317 0 : if (data->m != NULL) {
3318 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
3319 : fatal = 1;
3320 0 : }
3321 :
3322 0 : m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3323 0 : err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3324 : BUS_DMA_READ|BUS_DMA_NOWAIT);
3325 0 : if (err) {
3326 : /* XXX */
3327 0 : if (fatal)
3328 0 : panic("iwm: could not load RX mbuf");
3329 0 : m_freem(m);
3330 0 : return err;
3331 : }
3332 0 : data->m = m;
3333 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3334 :
3335 : /* Update RX descriptor. */
3336 0 : ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3337 0 : bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3338 : idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3339 :
3340 0 : return 0;
3341 0 : }
3342 :
3343 : #define IWM_RSSI_OFFSET 50
3344 : int
3345 0 : iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3346 : {
3347 : int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3348 : uint32_t agc_a, agc_b;
3349 : uint32_t val;
3350 :
3351 0 : val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3352 0 : agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3353 0 : agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3354 :
3355 0 : val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3356 0 : rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3357 0 : rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3358 :
3359 : /*
3360 : * dBm = rssi dB - agc dB - constant.
3361 : * Higher AGC (higher radio gain) means lower signal.
3362 : */
3363 0 : rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3364 0 : rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3365 0 : max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3366 :
3367 0 : return max_rssi_dbm;
3368 : }
3369 :
3370 : /*
3371 : * RSSI values are reported by the FW as positive values - need to negate
3372 : * to obtain their dBM. Account for missing antennas by replacing 0
3373 : * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3374 : */
3375 : int
3376 0 : iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3377 : {
3378 : int energy_a, energy_b, energy_c, max_energy;
3379 : uint32_t val;
3380 :
3381 0 : val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3382 0 : energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3383 : IWM_RX_INFO_ENERGY_ANT_A_POS;
3384 0 : energy_a = energy_a ? -energy_a : -256;
3385 0 : energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3386 : IWM_RX_INFO_ENERGY_ANT_B_POS;
3387 0 : energy_b = energy_b ? -energy_b : -256;
3388 0 : energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3389 : IWM_RX_INFO_ENERGY_ANT_C_POS;
3390 0 : energy_c = energy_c ? -energy_c : -256;
3391 0 : max_energy = MAX(energy_a, energy_b);
3392 0 : max_energy = MAX(max_energy, energy_c);
3393 :
3394 0 : return max_energy;
3395 : }
3396 :
3397 : void
3398 0 : iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3399 : struct iwm_rx_data *data)
3400 : {
3401 0 : struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3402 :
3403 0 : bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3404 : sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3405 :
3406 0 : memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3407 0 : }
3408 :
3409 : /*
3410 : * Retrieve the average noise (in dBm) among receivers.
3411 : */
3412 : int
3413 0 : iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3414 : {
3415 : int i, total, nbant, noise;
3416 :
3417 : total = nbant = noise = 0;
3418 0 : for (i = 0; i < 3; i++) {
3419 0 : noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
3420 0 : if (noise) {
3421 0 : total += noise;
3422 0 : nbant++;
3423 0 : }
3424 : }
3425 :
3426 : /* There should be at least one antenna but check anyway. */
3427 0 : return (nbant == 0) ? -127 : (total / nbant) - 107;
3428 : }
3429 :
3430 : int
3431 0 : iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m)
3432 : {
3433 0 : struct ieee80211com *ic = &sc->sc_ic;
3434 : struct ieee80211_frame *wh;
3435 : struct ieee80211_node *ni;
3436 0 : struct ieee80211_rxinfo rxi;
3437 : struct ieee80211_channel *bss_chan;
3438 : struct iwm_rx_phy_info *phy_info;
3439 : int device_timestamp;
3440 : int rssi, chanidx;
3441 0 : uint8_t saved_bssid[IEEE80211_ADDR_LEN] = { 0 };
3442 :
3443 0 : phy_info = &sc->sc_last_phy_info;
3444 0 : if (__predict_false(phy_info->cfg_phy_cnt > 20))
3445 0 : return EINVAL;
3446 :
3447 0 : device_timestamp = le32toh(phy_info->system_timestamp);
3448 :
3449 0 : if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3450 0 : rssi = iwm_get_signal_strength(sc, phy_info);
3451 0 : } else {
3452 0 : rssi = iwm_calc_rssi(sc, phy_info);
3453 : }
3454 0 : rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
3455 0 : rssi = MIN(rssi, ic->ic_max_rssi); /* clip to max. 100% */
3456 :
3457 0 : chanidx = letoh32(phy_info->channel);
3458 0 : if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
3459 0 : chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
3460 :
3461 0 : wh = mtod(m, struct ieee80211_frame *);
3462 0 : ni = ieee80211_find_rxnode(ic, wh);
3463 0 : if (ni == ic->ic_bss) {
3464 : /*
3465 : * We may switch ic_bss's channel during scans.
3466 : * Record the current channel so we can restore it later.
3467 : */
3468 0 : bss_chan = ni->ni_chan;
3469 0 : IEEE80211_ADDR_COPY(&saved_bssid, ni->ni_macaddr);
3470 0 : }
3471 0 : ni->ni_chan = &ic->ic_channels[chanidx];
3472 :
3473 0 : memset(&rxi, 0, sizeof(rxi));
3474 0 : rxi.rxi_rssi = rssi;
3475 0 : rxi.rxi_tstamp = device_timestamp;
3476 :
3477 : #if NBPFILTER > 0
3478 0 : if (sc->sc_drvbpf != NULL) {
3479 0 : struct mbuf mb;
3480 0 : struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3481 : uint16_t chan_flags;
3482 :
3483 0 : tap->wr_flags = 0;
3484 0 : if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3485 0 : tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3486 0 : tap->wr_chan_freq =
3487 0 : htole16(ic->ic_channels[chanidx].ic_freq);
3488 0 : chan_flags = ic->ic_channels[chanidx].ic_flags;
3489 0 : if (ic->ic_curmode != IEEE80211_MODE_11N)
3490 0 : chan_flags &= ~IEEE80211_CHAN_HT;
3491 0 : tap->wr_chan_flags = htole16(chan_flags);
3492 0 : tap->wr_dbm_antsignal = (int8_t)rssi;
3493 0 : tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3494 0 : tap->wr_tsft = phy_info->system_timestamp;
3495 0 : if (phy_info->phy_flags &
3496 : htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3497 0 : uint8_t mcs = (phy_info->rate_n_flags &
3498 : htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3499 : IWM_RATE_HT_MCS_NSS_MSK));
3500 0 : tap->wr_rate = (0x80 | mcs);
3501 0 : } else {
3502 0 : uint8_t rate = (phy_info->rate_n_flags &
3503 : htole32(IWM_RATE_LEGACY_RATE_MSK));
3504 0 : switch (rate) {
3505 : /* CCK rates. */
3506 0 : case 10: tap->wr_rate = 2; break;
3507 0 : case 20: tap->wr_rate = 4; break;
3508 0 : case 55: tap->wr_rate = 11; break;
3509 0 : case 110: tap->wr_rate = 22; break;
3510 : /* OFDM rates. */
3511 0 : case 0xd: tap->wr_rate = 12; break;
3512 0 : case 0xf: tap->wr_rate = 18; break;
3513 0 : case 0x5: tap->wr_rate = 24; break;
3514 0 : case 0x7: tap->wr_rate = 36; break;
3515 0 : case 0x9: tap->wr_rate = 48; break;
3516 0 : case 0xb: tap->wr_rate = 72; break;
3517 0 : case 0x1: tap->wr_rate = 96; break;
3518 0 : case 0x3: tap->wr_rate = 108; break;
3519 : /* Unknown rate: should not happen. */
3520 0 : default: tap->wr_rate = 0;
3521 0 : }
3522 : }
3523 :
3524 0 : mb.m_data = (caddr_t)tap;
3525 0 : mb.m_len = sc->sc_rxtap_len;
3526 0 : mb.m_next = m;
3527 0 : mb.m_nextpkt = NULL;
3528 0 : mb.m_type = 0;
3529 0 : mb.m_flags = 0;
3530 0 : bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_IN);
3531 0 : }
3532 : #endif
3533 0 : ieee80211_input(IC2IFP(ic), m, ni, &rxi);
3534 : /*
3535 : * ieee80211_input() might have changed our BSS.
3536 : * Restore ic_bss's channel if we are still in the same BSS.
3537 : */
3538 0 : if (ni == ic->ic_bss && IEEE80211_ADDR_EQ(saved_bssid, ni->ni_macaddr))
3539 0 : ni->ni_chan = bss_chan;
3540 0 : ieee80211_release_node(ic, ni);
3541 :
3542 0 : return 0;
3543 0 : }
3544 :
3545 : void
3546 0 : iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3547 : struct iwm_node *in)
3548 : {
3549 0 : struct ieee80211com *ic = &sc->sc_ic;
3550 0 : struct ieee80211_node *ni = &in->in_ni;
3551 0 : struct ifnet *ifp = IC2IFP(ic);
3552 0 : struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3553 0 : int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3554 : int txfail;
3555 :
3556 0 : KASSERT(tx_resp->frame_count == 1);
3557 :
3558 0 : txfail = (status != IWM_TX_STATUS_SUCCESS &&
3559 0 : status != IWM_TX_STATUS_DIRECT_DONE);
3560 :
3561 : /* Update rate control statistics. */
3562 0 : if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) {
3563 0 : in->in_amn.amn_txcnt++;
3564 0 : if (tx_resp->failure_frame > 0)
3565 0 : in->in_amn.amn_retrycnt++;
3566 0 : } else if (ic->ic_fixed_mcs == -1) {
3567 0 : int omcs = ni->ni_txmcs;
3568 0 : in->in_mn.frames += tx_resp->frame_count;
3569 0 : in->in_mn.ampdu_size = le16toh(tx_resp->byte_cnt);
3570 0 : in->in_mn.agglen = tx_resp->frame_count;
3571 0 : if (tx_resp->failure_frame > 0)
3572 0 : in->in_mn.retries += tx_resp->failure_frame;
3573 0 : if (txfail)
3574 0 : in->in_mn.txfail += tx_resp->frame_count;
3575 0 : if (ic->ic_state == IEEE80211_S_RUN)
3576 0 : ieee80211_mira_choose(&in->in_mn, ic, &in->in_ni);
3577 : /*
3578 : * If MiRA has chosen a new TX rate we must update
3579 : * the firwmare's LQ rate table from process context.
3580 : */
3581 0 : if (omcs != ni->ni_txmcs)
3582 0 : iwm_add_task(sc, systq, &sc->setrates_task);
3583 0 : }
3584 :
3585 0 : if (txfail)
3586 0 : ifp->if_oerrors++;
3587 0 : }
3588 :
3589 : void
3590 0 : iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3591 : struct iwm_rx_data *data)
3592 : {
3593 0 : struct ieee80211com *ic = &sc->sc_ic;
3594 0 : struct ifnet *ifp = IC2IFP(ic);
3595 0 : struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3596 0 : int idx = cmd_hdr->idx;
3597 0 : int qid = cmd_hdr->qid;
3598 0 : struct iwm_tx_ring *ring = &sc->txq[qid];
3599 0 : struct iwm_tx_data *txd = &ring->data[idx];
3600 0 : struct iwm_node *in = txd->in;
3601 :
3602 0 : if (txd->done)
3603 0 : return;
3604 :
3605 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3606 : BUS_DMASYNC_POSTREAD);
3607 :
3608 0 : sc->sc_tx_timer = 0;
3609 :
3610 0 : iwm_rx_tx_cmd_single(sc, pkt, in);
3611 :
3612 0 : bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3613 : BUS_DMASYNC_POSTWRITE);
3614 0 : bus_dmamap_unload(sc->sc_dmat, txd->map);
3615 0 : m_freem(txd->m);
3616 :
3617 0 : KASSERT(txd->done == 0);
3618 0 : txd->done = 1;
3619 0 : KASSERT(txd->in);
3620 :
3621 0 : txd->m = NULL;
3622 0 : txd->in = NULL;
3623 0 : ieee80211_release_node(ic, &in->in_ni);
3624 :
3625 0 : if (--ring->queued < IWM_TX_RING_LOMARK) {
3626 0 : sc->qfullmsk &= ~(1 << ring->qid);
3627 0 : if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
3628 0 : ifq_clr_oactive(&ifp->if_snd);
3629 : /*
3630 : * Well, we're in interrupt context, but then again
3631 : * I guess net80211 does all sorts of stunts in
3632 : * interrupt context, so maybe this is no biggie.
3633 : */
3634 0 : (*ifp->if_start)(ifp);
3635 0 : }
3636 : }
3637 0 : }
3638 :
3639 : void
3640 0 : iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3641 : struct iwm_rx_data *data)
3642 : {
3643 0 : struct ieee80211com *ic = &sc->sc_ic;
3644 0 : struct iwm_missed_beacons_notif *mbn = (void *)pkt->data;
3645 : uint32_t missed;
3646 :
3647 0 : if ((ic->ic_opmode != IEEE80211_M_STA) ||
3648 0 : (ic->ic_state != IEEE80211_S_RUN))
3649 0 : return;
3650 :
3651 0 : bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3652 : sizeof(*mbn), BUS_DMASYNC_POSTREAD);
3653 :
3654 0 : missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
3655 0 : if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
3656 : /*
3657 : * Rather than go directly to scan state, try to send a
3658 : * directed probe request first. If that fails then the
3659 : * state machine will drop us into scanning after timing
3660 : * out waiting for a probe response.
3661 : */
3662 0 : IEEE80211_SEND_MGMT(ic, ic->ic_bss,
3663 : IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
3664 0 : }
3665 :
3666 0 : }
3667 :
3668 : int
3669 0 : iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3670 : {
3671 0 : struct iwm_binding_cmd cmd;
3672 0 : struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3673 0 : uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
3674 0 : int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
3675 0 : uint32_t status;
3676 :
3677 0 : if (action == IWM_FW_CTXT_ACTION_ADD && active)
3678 0 : panic("binding already added");
3679 0 : if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
3680 0 : panic("binding already removed");
3681 :
3682 0 : if (phyctxt == NULL) /* XXX race with iwm_stop() */
3683 0 : return EINVAL;
3684 :
3685 0 : memset(&cmd, 0, sizeof(cmd));
3686 :
3687 0 : cmd.id_and_color
3688 0 : = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3689 0 : cmd.action = htole32(action);
3690 0 : cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3691 :
3692 0 : cmd.macs[0] = htole32(mac_id);
3693 0 : for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3694 0 : cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3695 :
3696 0 : status = 0;
3697 0 : err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3698 : sizeof(cmd), &cmd, &status);
3699 0 : if (err == 0 && status != 0)
3700 : err = EIO;
3701 :
3702 0 : return err;
3703 0 : }
3704 :
3705 : void
3706 0 : iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3707 : struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3708 : {
3709 0 : memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3710 :
3711 0 : cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3712 : ctxt->color));
3713 0 : cmd->action = htole32(action);
3714 0 : cmd->apply_time = htole32(apply_time);
3715 0 : }
3716 :
3717 : void
3718 0 : iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3719 : struct ieee80211_channel *chan, uint8_t chains_static,
3720 : uint8_t chains_dynamic)
3721 : {
3722 0 : struct ieee80211com *ic = &sc->sc_ic;
3723 : uint8_t active_cnt, idle_cnt;
3724 :
3725 0 : cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3726 : IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3727 0 : cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3728 0 : cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3729 0 : cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3730 :
3731 : /* Set rx the chains */
3732 : idle_cnt = chains_static;
3733 : active_cnt = chains_dynamic;
3734 :
3735 0 : cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3736 : IWM_PHY_RX_CHAIN_VALID_POS);
3737 0 : cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3738 0 : cmd->rxchain_info |= htole32(active_cnt <<
3739 : IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3740 :
3741 0 : cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3742 0 : }
3743 :
3744 : int
3745 0 : iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3746 : uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3747 : uint32_t apply_time)
3748 : {
3749 0 : struct iwm_phy_context_cmd cmd;
3750 :
3751 0 : iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3752 :
3753 0 : iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3754 : chains_static, chains_dynamic);
3755 :
3756 0 : return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3757 : sizeof(struct iwm_phy_context_cmd), &cmd);
3758 0 : }
3759 :
3760 : int
3761 0 : iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3762 : {
3763 0 : struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3764 : struct iwm_tfd *desc;
3765 : struct iwm_tx_data *txdata;
3766 : struct iwm_device_cmd *cmd;
3767 : struct mbuf *m;
3768 : bus_addr_t paddr;
3769 : uint32_t addr_lo;
3770 : int err = 0, i, paylen, off, s;
3771 : int idx, code, async, group_id;
3772 : size_t hdrlen, datasz;
3773 : uint8_t *data;
3774 0 : int generation = sc->sc_generation;
3775 :
3776 0 : code = hcmd->id;
3777 0 : async = hcmd->flags & IWM_CMD_ASYNC;
3778 0 : idx = ring->cur;
3779 :
3780 0 : for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
3781 0 : paylen += hcmd->len[i];
3782 : }
3783 :
3784 : /* If this command waits for a response, allocate response buffer. */
3785 0 : hcmd->resp_pkt = NULL;
3786 0 : if (hcmd->flags & IWM_CMD_WANT_RESP) {
3787 : uint8_t *resp_buf;
3788 0 : KASSERT(!async);
3789 0 : KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
3790 0 : KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
3791 0 : if (sc->sc_cmd_resp_pkt[idx] != NULL)
3792 0 : return ENOSPC;
3793 0 : resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
3794 : M_NOWAIT | M_ZERO);
3795 0 : if (resp_buf == NULL)
3796 0 : return ENOMEM;
3797 0 : sc->sc_cmd_resp_pkt[idx] = resp_buf;
3798 0 : sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
3799 0 : } else {
3800 0 : sc->sc_cmd_resp_pkt[idx] = NULL;
3801 : }
3802 :
3803 0 : s = splnet();
3804 :
3805 0 : desc = &ring->desc[idx];
3806 0 : txdata = &ring->data[idx];
3807 :
3808 0 : group_id = iwm_cmd_groupid(code);
3809 0 : if (group_id != 0) {
3810 : hdrlen = sizeof(cmd->hdr_wide);
3811 : datasz = sizeof(cmd->data_wide);
3812 0 : } else {
3813 : hdrlen = sizeof(cmd->hdr);
3814 : datasz = sizeof(cmd->data);
3815 : }
3816 :
3817 0 : if (paylen > datasz) {
3818 : /* Command is too large to fit in pre-allocated space. */
3819 0 : size_t totlen = hdrlen + paylen;
3820 0 : if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3821 0 : printf("%s: firmware command too long (%zd bytes)\n",
3822 0 : DEVNAME(sc), totlen);
3823 : err = EINVAL;
3824 0 : goto out;
3825 : }
3826 0 : m = MCLGETI(NULL, M_DONTWAIT, NULL, totlen);
3827 0 : if (m == NULL) {
3828 0 : printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
3829 0 : DEVNAME(sc), totlen);
3830 : err = ENOMEM;
3831 0 : goto out;
3832 : }
3833 0 : cmd = mtod(m, struct iwm_device_cmd *);
3834 0 : err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3835 : totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3836 0 : if (err) {
3837 0 : printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
3838 0 : DEVNAME(sc), totlen);
3839 0 : m_freem(m);
3840 0 : goto out;
3841 : }
3842 0 : txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
3843 0 : paddr = txdata->map->dm_segs[0].ds_addr;
3844 0 : } else {
3845 0 : cmd = &ring->cmd[idx];
3846 0 : paddr = txdata->cmd_paddr;
3847 : }
3848 :
3849 0 : if (group_id != 0) {
3850 0 : cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3851 0 : cmd->hdr_wide.group_id = group_id;
3852 0 : cmd->hdr_wide.qid = ring->qid;
3853 0 : cmd->hdr_wide.idx = idx;
3854 0 : cmd->hdr_wide.length = htole16(paylen);
3855 0 : cmd->hdr_wide.version = iwm_cmd_version(code);
3856 0 : data = cmd->data_wide;
3857 0 : } else {
3858 0 : cmd->hdr.code = code;
3859 0 : cmd->hdr.flags = 0;
3860 0 : cmd->hdr.qid = ring->qid;
3861 0 : cmd->hdr.idx = idx;
3862 0 : data = cmd->data;
3863 : }
3864 :
3865 0 : for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
3866 0 : if (hcmd->len[i] == 0)
3867 : continue;
3868 0 : memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3869 0 : off += hcmd->len[i];
3870 0 : }
3871 0 : KASSERT(off == paylen);
3872 :
3873 : /* lo field is not aligned */
3874 0 : addr_lo = htole32((uint32_t)paddr);
3875 0 : memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3876 0 : desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3877 : | ((hdrlen + paylen) << 4));
3878 0 : desc->num_tbs = 1;
3879 :
3880 0 : if (paylen > datasz) {
3881 0 : bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3882 : hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3883 0 : } else {
3884 0 : bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3885 : (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3886 : hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3887 : }
3888 0 : bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3889 : (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3890 : sizeof (*desc), BUS_DMASYNC_PREWRITE);
3891 :
3892 : /*
3893 : * Wake up the NIC to make sure that the firmware will see the host
3894 : * command - we will let the NIC sleep once all the host commands
3895 : * returned. This needs to be done only on 7000 family NICs.
3896 : */
3897 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3898 0 : if (ring->queued == 0 && !iwm_nic_lock(sc)) {
3899 : err = EBUSY;
3900 0 : goto out;
3901 : }
3902 : }
3903 :
3904 : #if 0
3905 : iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3906 : #endif
3907 : /* Kick command ring. */
3908 0 : ring->queued++;
3909 0 : ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3910 0 : IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3911 :
3912 0 : if (!async) {
3913 0 : err = tsleep(desc, PCATCH, "iwmcmd", hz);
3914 0 : if (err == 0) {
3915 : /* if hardware is no longer up, return error */
3916 0 : if (generation != sc->sc_generation) {
3917 : err = ENXIO;
3918 0 : goto out;
3919 : }
3920 :
3921 : /* Response buffer will be freed in iwm_free_resp(). */
3922 0 : hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
3923 0 : sc->sc_cmd_resp_pkt[idx] = NULL;
3924 0 : } else if (generation == sc->sc_generation) {
3925 0 : free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
3926 0 : sc->sc_cmd_resp_len[idx]);
3927 0 : sc->sc_cmd_resp_pkt[idx] = NULL;
3928 0 : }
3929 : }
3930 : out:
3931 0 : splx(s);
3932 :
3933 0 : return err;
3934 0 : }
3935 :
3936 : int
3937 0 : iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
3938 : uint16_t len, const void *data)
3939 : {
3940 0 : struct iwm_host_cmd cmd = {
3941 : .id = id,
3942 0 : .len = { len, },
3943 0 : .data = { data, },
3944 : .flags = flags,
3945 : };
3946 :
3947 0 : return iwm_send_cmd(sc, &cmd);
3948 0 : }
3949 :
3950 : int
3951 0 : iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3952 : uint32_t *status)
3953 : {
3954 : struct iwm_rx_packet *pkt;
3955 : struct iwm_cmd_response *resp;
3956 : int err, resp_len;
3957 :
3958 0 : KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
3959 0 : cmd->flags |= IWM_CMD_WANT_RESP;
3960 0 : cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
3961 :
3962 0 : err = iwm_send_cmd(sc, cmd);
3963 0 : if (err)
3964 0 : return err;
3965 :
3966 0 : pkt = cmd->resp_pkt;
3967 0 : if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
3968 0 : return EIO;
3969 :
3970 0 : resp_len = iwm_rx_packet_payload_len(pkt);
3971 0 : if (resp_len != sizeof(*resp)) {
3972 0 : iwm_free_resp(sc, cmd);
3973 0 : return EIO;
3974 : }
3975 :
3976 0 : resp = (void *)pkt->data;
3977 0 : *status = le32toh(resp->status);
3978 0 : iwm_free_resp(sc, cmd);
3979 0 : return err;
3980 0 : }
3981 :
3982 : int
3983 0 : iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
3984 : const void *data, uint32_t *status)
3985 : {
3986 0 : struct iwm_host_cmd cmd = {
3987 : .id = id,
3988 0 : .len = { len, },
3989 0 : .data = { data, },
3990 : };
3991 :
3992 0 : return iwm_send_cmd_status(sc, &cmd, status);
3993 0 : }
3994 :
3995 : void
3996 0 : iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3997 : {
3998 0 : KASSERT((hcmd->flags & (IWM_CMD_WANT_RESP)) == IWM_CMD_WANT_RESP);
3999 0 : free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
4000 0 : hcmd->resp_pkt = NULL;
4001 0 : }
4002 :
4003 : void
4004 0 : iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
4005 : {
4006 0 : struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4007 : struct iwm_tx_data *data;
4008 :
4009 0 : if (pkt->hdr.qid != IWM_CMD_QUEUE) {
4010 0 : return; /* Not a command ack. */
4011 : }
4012 :
4013 0 : data = &ring->data[pkt->hdr.idx];
4014 :
4015 0 : if (data->m != NULL) {
4016 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4017 : data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4018 0 : bus_dmamap_unload(sc->sc_dmat, data->map);
4019 0 : m_freem(data->m);
4020 0 : data->m = NULL;
4021 0 : }
4022 0 : wakeup(&ring->desc[pkt->hdr.idx]);
4023 :
4024 0 : if (ring->queued == 0) {
4025 : DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
4026 : DEVNAME(sc), IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code)));
4027 0 : } else if (--ring->queued == 0) {
4028 : /*
4029 : * 7000 family NICs are locked while commands are in progress.
4030 : * All commands are now done so we may unlock the NIC again.
4031 : */
4032 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4033 0 : iwm_nic_unlock(sc);
4034 : }
4035 0 : }
4036 :
4037 : #if 0
4038 : /*
4039 : * necessary only for block ack mode
4040 : */
4041 : void
4042 : iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4043 : uint16_t len)
4044 : {
4045 : struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4046 : uint16_t w_val;
4047 :
4048 : scd_bc_tbl = sc->sched_dma.vaddr;
4049 :
4050 : len += 8; /* magic numbers came naturally from paris */
4051 : if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4052 : len = roundup(len, 4) / 4;
4053 :
4054 : w_val = htole16(sta_id << 12 | len);
4055 :
4056 : /* Update TX scheduler. */
4057 : scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4058 : bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4059 : (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4060 : sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4061 :
4062 : /* I really wonder what this is ?!? */
4063 : if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4064 : scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4065 : bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4066 : (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4067 : (char *)(void *)sc->sched_dma.vaddr,
4068 : sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4069 : }
4070 : }
4071 : #endif
4072 :
4073 : /*
4074 : * Fill in various bit for management frames, and leave them
4075 : * unfilled for data frames (firmware takes care of that).
4076 : * Return the selected TX rate.
4077 : */
4078 : const struct iwm_rate *
4079 0 : iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4080 : struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4081 : {
4082 0 : struct ieee80211com *ic = &sc->sc_ic;
4083 0 : struct ieee80211_node *ni = &in->in_ni;
4084 : const struct iwm_rate *rinfo;
4085 0 : int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4086 : int ridx, rate_flags, i;
4087 0 : int nrates = ni->ni_rates.rs_nrates;
4088 :
4089 0 : tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4090 0 : tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4091 :
4092 0 : if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4093 0 : type != IEEE80211_FC0_TYPE_DATA) {
4094 : /* for non-data, use the lowest supported rate */
4095 0 : ridx = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
4096 0 : tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4097 0 : } else if (ic->ic_fixed_mcs != -1) {
4098 0 : ridx = sc->sc_fixed_ridx;
4099 0 : } else if (ic->ic_fixed_rate != -1) {
4100 0 : ridx = sc->sc_fixed_ridx;
4101 : } else {
4102 : /* for data frames, use RS table */
4103 0 : tx->initial_rate_index = 0;
4104 0 : tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4105 0 : if (ni->ni_flags & IEEE80211_NODE_HT) {
4106 0 : ridx = iwm_mcs2ridx[ni->ni_txmcs];
4107 0 : return &iwm_rates[ridx];
4108 : }
4109 0 : ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4110 : IWM_RIDX_OFDM : IWM_RIDX_CCK;
4111 0 : for (i = 0; i < nrates; i++) {
4112 0 : if (iwm_rates[i].rate == (ni->ni_txrate &
4113 : IEEE80211_RATE_VAL)) {
4114 : ridx = i;
4115 0 : break;
4116 : }
4117 : }
4118 0 : return &iwm_rates[ridx];
4119 : }
4120 :
4121 0 : rinfo = &iwm_rates[ridx];
4122 0 : if (iwm_is_mimo_ht_plcp(rinfo->ht_plcp))
4123 0 : rate_flags = IWM_RATE_MCS_ANT_AB_MSK;
4124 : else
4125 : rate_flags = IWM_RATE_MCS_ANT_A_MSK;
4126 0 : if (IWM_RIDX_IS_CCK(ridx))
4127 0 : rate_flags |= IWM_RATE_MCS_CCK_MSK;
4128 0 : if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4129 0 : rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4130 0 : rate_flags |= IWM_RATE_MCS_HT_MSK;
4131 0 : tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4132 0 : } else
4133 0 : tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4134 :
4135 0 : return rinfo;
4136 0 : }
4137 :
4138 : #define TB0_SIZE 16
4139 : int
4140 0 : iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4141 : {
4142 0 : struct ieee80211com *ic = &sc->sc_ic;
4143 0 : struct iwm_node *in = (void *)ni;
4144 : struct iwm_tx_ring *ring;
4145 : struct iwm_tx_data *data;
4146 : struct iwm_tfd *desc;
4147 : struct iwm_device_cmd *cmd;
4148 : struct iwm_tx_cmd *tx;
4149 : struct ieee80211_frame *wh;
4150 : struct ieee80211_key *k = NULL;
4151 : const struct iwm_rate *rinfo;
4152 : uint32_t flags;
4153 : u_int hdrlen;
4154 : bus_dma_segment_t *seg;
4155 : uint8_t tid, type;
4156 : int i, totlen, err, pad;
4157 : int hdrlen2;
4158 :
4159 0 : wh = mtod(m, struct ieee80211_frame *);
4160 0 : hdrlen = ieee80211_get_hdrlen(wh);
4161 0 : type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4162 :
4163 0 : hdrlen2 = (ieee80211_has_qos(wh)) ?
4164 : sizeof (struct ieee80211_qosframe) :
4165 : sizeof (struct ieee80211_frame);
4166 :
4167 : tid = 0;
4168 :
4169 0 : ring = &sc->txq[ac];
4170 0 : desc = &ring->desc[ring->cur];
4171 0 : memset(desc, 0, sizeof(*desc));
4172 0 : data = &ring->data[ring->cur];
4173 :
4174 0 : cmd = &ring->cmd[ring->cur];
4175 0 : cmd->hdr.code = IWM_TX_CMD;
4176 0 : cmd->hdr.flags = 0;
4177 0 : cmd->hdr.qid = ring->qid;
4178 0 : cmd->hdr.idx = ring->cur;
4179 :
4180 0 : tx = (void *)cmd->data;
4181 0 : memset(tx, 0, sizeof(*tx));
4182 :
4183 0 : rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4184 :
4185 : #if NBPFILTER > 0
4186 0 : if (sc->sc_drvbpf != NULL) {
4187 0 : struct mbuf mb;
4188 0 : struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4189 : uint16_t chan_flags;
4190 :
4191 0 : tap->wt_flags = 0;
4192 0 : tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4193 0 : chan_flags = ni->ni_chan->ic_flags;
4194 0 : if (ic->ic_curmode != IEEE80211_MODE_11N)
4195 0 : chan_flags &= ~IEEE80211_CHAN_HT;
4196 0 : tap->wt_chan_flags = htole16(chan_flags);
4197 0 : if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4198 0 : !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4199 0 : type == IEEE80211_FC0_TYPE_DATA &&
4200 0 : rinfo->plcp == IWM_RATE_INVM_PLCP) {
4201 0 : tap->wt_rate = (0x80 | rinfo->ht_plcp);
4202 0 : } else
4203 0 : tap->wt_rate = rinfo->rate;
4204 0 : tap->wt_hwqueue = ac;
4205 0 : if ((ic->ic_flags & IEEE80211_F_WEPON) &&
4206 0 : (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
4207 0 : tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4208 :
4209 0 : mb.m_data = (caddr_t)tap;
4210 0 : mb.m_len = sc->sc_txtap_len;
4211 0 : mb.m_next = m;
4212 0 : mb.m_nextpkt = NULL;
4213 0 : mb.m_type = 0;
4214 0 : mb.m_flags = 0;
4215 0 : bpf_mtap(sc->sc_drvbpf, &mb, BPF_DIRECTION_OUT);
4216 0 : }
4217 : #endif
4218 :
4219 0 : if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4220 0 : k = ieee80211_get_txkey(ic, wh, ni);
4221 0 : if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
4222 0 : return ENOBUFS;
4223 : /* 802.11 header may have moved. */
4224 0 : wh = mtod(m, struct ieee80211_frame *);
4225 0 : }
4226 0 : totlen = m->m_pkthdr.len;
4227 :
4228 : flags = 0;
4229 0 : if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4230 : flags |= IWM_TX_CMD_FLG_ACK;
4231 0 : }
4232 :
4233 0 : if (type == IEEE80211_FC0_TYPE_DATA &&
4234 0 : !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4235 0 : (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4236 0 : (ic->ic_flags & IEEE80211_F_USEPROT)))
4237 0 : flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4238 :
4239 0 : if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4240 0 : type != IEEE80211_FC0_TYPE_DATA)
4241 0 : tx->sta_id = IWM_AUX_STA_ID;
4242 : else
4243 0 : tx->sta_id = IWM_STATION_ID;
4244 :
4245 0 : if (type == IEEE80211_FC0_TYPE_MGT) {
4246 0 : uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4247 :
4248 0 : if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4249 0 : subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4250 0 : tx->pm_frame_timeout = htole16(3);
4251 : else
4252 0 : tx->pm_frame_timeout = htole16(2);
4253 0 : } else {
4254 0 : tx->pm_frame_timeout = htole16(0);
4255 : }
4256 :
4257 0 : if (hdrlen & 3) {
4258 : /* First segment length must be a multiple of 4. */
4259 0 : flags |= IWM_TX_CMD_FLG_MH_PAD;
4260 0 : pad = 4 - (hdrlen & 3);
4261 0 : } else
4262 : pad = 0;
4263 :
4264 0 : tx->driver_txop = 0;
4265 0 : tx->next_frame_len = 0;
4266 :
4267 0 : tx->len = htole16(totlen);
4268 0 : tx->tid_tspec = tid;
4269 0 : tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4270 :
4271 : /* Set physical address of "scratch area". */
4272 0 : tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4273 0 : tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4274 :
4275 : /* Copy 802.11 header in TX command. */
4276 0 : memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4277 :
4278 0 : flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4279 :
4280 0 : tx->sec_ctl = 0;
4281 0 : tx->tx_flags |= htole32(flags);
4282 :
4283 : /* Trim 802.11 header. */
4284 0 : m_adj(m, hdrlen);
4285 :
4286 0 : err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4287 : BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4288 0 : if (err && err != EFBIG) {
4289 0 : printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
4290 0 : m_freem(m);
4291 0 : return err;
4292 : }
4293 0 : if (err) {
4294 : /* Too many DMA segments, linearize mbuf. */
4295 0 : if (m_defrag(m, M_DONTWAIT)) {
4296 0 : m_freem(m);
4297 0 : return ENOBUFS;
4298 : }
4299 0 : err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4300 : BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4301 0 : if (err) {
4302 0 : printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
4303 : err);
4304 0 : m_freem(m);
4305 0 : return err;
4306 : }
4307 : }
4308 0 : data->m = m;
4309 0 : data->in = in;
4310 0 : data->done = 0;
4311 :
4312 : /* Fill TX descriptor. */
4313 0 : desc->num_tbs = 2 + data->map->dm_nsegs;
4314 :
4315 0 : desc->tbs[0].lo = htole32(data->cmd_paddr);
4316 0 : desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4317 : (TB0_SIZE << 4);
4318 0 : desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4319 0 : desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4320 : ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4321 0 : + hdrlen + pad - TB0_SIZE) << 4);
4322 :
4323 : /* Other DMA segments are for data payload. */
4324 0 : seg = data->map->dm_segs;
4325 0 : for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4326 0 : desc->tbs[i+2].lo = htole32(seg->ds_addr);
4327 0 : desc->tbs[i+2].hi_n_len = \
4328 0 : htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4329 0 : | ((seg->ds_len) << 4);
4330 : }
4331 :
4332 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4333 : BUS_DMASYNC_PREWRITE);
4334 0 : bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4335 : (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4336 : sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4337 0 : bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4338 : (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4339 : sizeof (*desc), BUS_DMASYNC_PREWRITE);
4340 :
4341 : #if 0
4342 : iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4343 : #endif
4344 :
4345 : /* Kick TX ring. */
4346 0 : ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4347 0 : IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4348 :
4349 : /* Mark TX ring as full if we reach a certain threshold. */
4350 0 : if (++ring->queued > IWM_TX_RING_HIMARK) {
4351 0 : sc->qfullmsk |= 1 << ring->qid;
4352 0 : }
4353 :
4354 0 : return 0;
4355 0 : }
4356 :
4357 : int
4358 0 : iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk)
4359 : {
4360 0 : struct iwm_tx_path_flush_cmd flush_cmd = {
4361 : .queues_ctl = htole32(tfd_msk),
4362 : .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4363 : };
4364 : int err;
4365 :
4366 0 : err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
4367 : sizeof(flush_cmd), &flush_cmd);
4368 0 : if (err)
4369 0 : printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
4370 0 : return err;
4371 0 : }
4372 :
4373 : void
4374 0 : iwm_led_enable(struct iwm_softc *sc)
4375 : {
4376 0 : IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4377 0 : }
4378 :
4379 : void
4380 0 : iwm_led_disable(struct iwm_softc *sc)
4381 : {
4382 0 : IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4383 0 : }
4384 :
4385 : int
4386 0 : iwm_led_is_enabled(struct iwm_softc *sc)
4387 : {
4388 0 : return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4389 : }
4390 :
4391 : void
4392 0 : iwm_led_blink_timeout(void *arg)
4393 : {
4394 0 : struct iwm_softc *sc = arg;
4395 :
4396 0 : if (iwm_led_is_enabled(sc))
4397 0 : iwm_led_disable(sc);
4398 : else
4399 0 : iwm_led_enable(sc);
4400 :
4401 0 : timeout_add_msec(&sc->sc_led_blink_to, 200);
4402 0 : }
4403 :
4404 : void
4405 0 : iwm_led_blink_start(struct iwm_softc *sc)
4406 : {
4407 0 : timeout_add(&sc->sc_led_blink_to, 0);
4408 0 : }
4409 :
4410 : void
4411 0 : iwm_led_blink_stop(struct iwm_softc *sc)
4412 : {
4413 0 : timeout_del(&sc->sc_led_blink_to);
4414 0 : iwm_led_disable(sc);
4415 0 : }
4416 :
4417 : #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4418 :
4419 : int
4420 0 : iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4421 : struct iwm_beacon_filter_cmd *cmd)
4422 : {
4423 0 : return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4424 0 : 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4425 : }
4426 :
4427 : void
4428 0 : iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4429 : struct iwm_beacon_filter_cmd *cmd)
4430 : {
4431 0 : cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4432 0 : }
4433 :
4434 : int
4435 0 : iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4436 : {
4437 0 : struct iwm_beacon_filter_cmd cmd = {
4438 : IWM_BF_CMD_CONFIG_DEFAULTS,
4439 : .bf_enable_beacon_filter = htole32(1),
4440 : .ba_enable_beacon_abort = htole32(enable),
4441 : };
4442 :
4443 0 : if (!sc->sc_bf.bf_enabled)
4444 0 : return 0;
4445 :
4446 0 : sc->sc_bf.ba_enabled = enable;
4447 0 : iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4448 0 : return iwm_beacon_filter_send_cmd(sc, &cmd);
4449 0 : }
4450 :
4451 : void
4452 0 : iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4453 : struct iwm_mac_power_cmd *cmd)
4454 : {
4455 0 : struct ieee80211com *ic = &sc->sc_ic;
4456 0 : struct ieee80211_node *ni = &in->in_ni;
4457 : int dtim_period, dtim_msec, keep_alive;
4458 :
4459 0 : cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4460 : in->in_color));
4461 0 : if (ni->ni_dtimperiod)
4462 0 : dtim_period = ni->ni_dtimperiod;
4463 : else
4464 : dtim_period = 1;
4465 :
4466 : /*
4467 : * Regardless of power management state the driver must set
4468 : * keep alive period. FW will use it for sending keep alive NDPs
4469 : * immediately after association. Check that keep alive period
4470 : * is at least 3 * DTIM.
4471 : */
4472 0 : dtim_msec = dtim_period * ni->ni_intval;
4473 0 : keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4474 0 : keep_alive = roundup(keep_alive, 1000) / 1000;
4475 0 : cmd->keep_alive_seconds = htole16(keep_alive);
4476 :
4477 0 : if (ic->ic_opmode != IEEE80211_M_MONITOR)
4478 0 : cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4479 0 : }
4480 :
4481 : int
4482 0 : iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4483 : {
4484 : int err;
4485 : int ba_enable;
4486 0 : struct iwm_mac_power_cmd cmd;
4487 :
4488 0 : memset(&cmd, 0, sizeof(cmd));
4489 :
4490 0 : iwm_power_build_cmd(sc, in, &cmd);
4491 :
4492 0 : err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4493 : sizeof(cmd), &cmd);
4494 0 : if (err != 0)
4495 0 : return err;
4496 :
4497 0 : ba_enable = !!(cmd.flags &
4498 : htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4499 0 : return iwm_update_beacon_abort(sc, in, ba_enable);
4500 0 : }
4501 :
4502 : int
4503 0 : iwm_power_update_device(struct iwm_softc *sc)
4504 : {
4505 0 : struct iwm_device_power_cmd cmd = { };
4506 0 : struct ieee80211com *ic = &sc->sc_ic;
4507 :
4508 0 : if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4509 0 : return 0;
4510 :
4511 0 : if (ic->ic_opmode != IEEE80211_M_MONITOR)
4512 0 : cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4513 :
4514 0 : return iwm_send_cmd_pdu(sc,
4515 : IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4516 0 : }
4517 :
4518 : int
4519 0 : iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4520 : {
4521 0 : struct iwm_beacon_filter_cmd cmd = {
4522 : IWM_BF_CMD_CONFIG_DEFAULTS,
4523 : .bf_enable_beacon_filter = htole32(1),
4524 : };
4525 : int err;
4526 :
4527 0 : iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4528 0 : err = iwm_beacon_filter_send_cmd(sc, &cmd);
4529 :
4530 0 : if (err == 0)
4531 0 : sc->sc_bf.bf_enabled = 1;
4532 :
4533 0 : return err;
4534 0 : }
4535 :
4536 : int
4537 0 : iwm_disable_beacon_filter(struct iwm_softc *sc)
4538 : {
4539 0 : struct iwm_beacon_filter_cmd cmd;
4540 : int err;
4541 :
4542 0 : memset(&cmd, 0, sizeof(cmd));
4543 0 : if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4544 0 : return 0;
4545 :
4546 0 : err = iwm_beacon_filter_send_cmd(sc, &cmd);
4547 0 : if (err == 0)
4548 0 : sc->sc_bf.bf_enabled = 0;
4549 :
4550 0 : return err;
4551 0 : }
4552 :
4553 : int
4554 0 : iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4555 : {
4556 0 : struct iwm_add_sta_cmd_v7 add_sta_cmd;
4557 : int err;
4558 0 : uint32_t status;
4559 0 : struct ieee80211com *ic = &sc->sc_ic;
4560 :
4561 0 : if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
4562 0 : panic("STA already added");
4563 :
4564 0 : memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4565 :
4566 0 : add_sta_cmd.sta_id = IWM_STATION_ID;
4567 0 : add_sta_cmd.mac_id_n_color
4568 0 : = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4569 0 : if (!update) {
4570 : int ac;
4571 0 : for (ac = 0; ac < EDCA_NUM_AC; ac++) {
4572 0 : add_sta_cmd.tfd_queue_msk |=
4573 0 : htole32(1 << iwm_ac_to_tx_fifo[ac]);
4574 : }
4575 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR)
4576 0 : IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4577 : etherbroadcastaddr);
4578 : else
4579 0 : IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
4580 : in->in_ni.ni_bssid);
4581 0 : }
4582 0 : add_sta_cmd.add_modify = update ? 1 : 0;
4583 0 : add_sta_cmd.station_flags_msk
4584 0 : |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4585 0 : add_sta_cmd.tid_disable_tx = htole16(0xffff);
4586 0 : if (update)
4587 0 : add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4588 :
4589 0 : if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4590 : add_sta_cmd.station_flags_msk
4591 0 : |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4592 : IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4593 :
4594 0 : add_sta_cmd.station_flags
4595 0 : |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4596 0 : switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4597 : case IEEE80211_AMPDU_PARAM_SS_2:
4598 : add_sta_cmd.station_flags
4599 0 : |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4600 0 : break;
4601 : case IEEE80211_AMPDU_PARAM_SS_4:
4602 : add_sta_cmd.station_flags
4603 0 : |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4604 0 : break;
4605 : case IEEE80211_AMPDU_PARAM_SS_8:
4606 : add_sta_cmd.station_flags
4607 0 : |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4608 0 : break;
4609 : case IEEE80211_AMPDU_PARAM_SS_16:
4610 : add_sta_cmd.station_flags
4611 0 : |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4612 0 : break;
4613 : default:
4614 : break;
4615 : }
4616 : }
4617 :
4618 0 : status = IWM_ADD_STA_SUCCESS;
4619 0 : err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4620 : &add_sta_cmd, &status);
4621 0 : if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4622 : err = EIO;
4623 :
4624 0 : return err;
4625 0 : }
4626 :
4627 : int
4628 0 : iwm_add_aux_sta(struct iwm_softc *sc)
4629 : {
4630 0 : struct iwm_add_sta_cmd_v7 cmd;
4631 : int err;
4632 0 : uint32_t status;
4633 :
4634 0 : err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4635 0 : if (err)
4636 0 : return err;
4637 :
4638 0 : memset(&cmd, 0, sizeof(cmd));
4639 0 : cmd.sta_id = IWM_AUX_STA_ID;
4640 0 : cmd.mac_id_n_color =
4641 : htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4642 0 : cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4643 0 : cmd.tid_disable_tx = htole16(0xffff);
4644 :
4645 0 : status = IWM_ADD_STA_SUCCESS;
4646 0 : err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4647 : &status);
4648 0 : if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4649 : err = EIO;
4650 :
4651 0 : return err;
4652 0 : }
4653 :
4654 : int
4655 0 : iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
4656 : {
4657 0 : struct iwm_rm_sta_cmd rm_sta_cmd;
4658 : int err;
4659 :
4660 0 : if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
4661 0 : panic("sta already removed");
4662 :
4663 0 : memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
4664 0 : rm_sta_cmd.sta_id = IWM_STATION_ID;
4665 :
4666 0 : err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
4667 : &rm_sta_cmd);
4668 :
4669 0 : return err;
4670 0 : }
4671 :
4672 : uint16_t
4673 0 : iwm_scan_rx_chain(struct iwm_softc *sc)
4674 : {
4675 : uint16_t rx_chain;
4676 : uint8_t rx_ant;
4677 :
4678 0 : rx_ant = iwm_fw_valid_rx_ant(sc);
4679 0 : rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4680 0 : rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4681 0 : rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4682 0 : rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4683 0 : return htole16(rx_chain);
4684 : }
4685 :
4686 : uint32_t
4687 0 : iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4688 : {
4689 : uint32_t tx_ant;
4690 : int i, ind;
4691 :
4692 0 : for (i = 0, ind = sc->sc_scan_last_antenna;
4693 0 : i < IWM_RATE_MCS_ANT_NUM; i++) {
4694 0 : ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4695 0 : if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4696 0 : sc->sc_scan_last_antenna = ind;
4697 0 : break;
4698 : }
4699 : }
4700 0 : tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4701 :
4702 0 : if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4703 0 : return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4704 : tx_ant);
4705 : else
4706 0 : return htole32(IWM_RATE_6M_PLCP | tx_ant);
4707 0 : }
4708 :
4709 : uint8_t
4710 0 : iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4711 : struct iwm_scan_channel_cfg_lmac *chan, int n_ssids, int bgscan)
4712 : {
4713 0 : struct ieee80211com *ic = &sc->sc_ic;
4714 : struct ieee80211_channel *c;
4715 : uint8_t nchan;
4716 :
4717 0 : for (nchan = 0, c = &ic->ic_channels[1];
4718 0 : c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4719 0 : nchan < sc->sc_capa_n_scan_channels;
4720 0 : c++) {
4721 0 : if (c->ic_flags == 0)
4722 : continue;
4723 :
4724 0 : chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4725 0 : chan->iter_count = htole16(1);
4726 0 : chan->iter_interval = 0;
4727 0 : chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4728 0 : if (n_ssids != 0 && !bgscan)
4729 0 : chan->flags |= htole32(1 << 1); /* select SSID 0 */
4730 0 : chan++;
4731 0 : nchan++;
4732 0 : }
4733 :
4734 0 : return nchan;
4735 : }
4736 :
4737 : uint8_t
4738 0 : iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4739 : struct iwm_scan_channel_cfg_umac *chan, int n_ssids, int bgscan)
4740 : {
4741 0 : struct ieee80211com *ic = &sc->sc_ic;
4742 : struct ieee80211_channel *c;
4743 : uint8_t nchan;
4744 :
4745 0 : for (nchan = 0, c = &ic->ic_channels[1];
4746 0 : c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4747 0 : nchan < sc->sc_capa_n_scan_channels;
4748 0 : c++) {
4749 0 : if (c->ic_flags == 0)
4750 : continue;
4751 :
4752 0 : chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4753 0 : chan->iter_count = 1;
4754 0 : chan->iter_interval = htole16(0);
4755 0 : if (n_ssids != 0 && !bgscan)
4756 0 : chan->flags = htole32(1 << 0); /* select SSID 0 */
4757 0 : chan++;
4758 0 : nchan++;
4759 0 : }
4760 :
4761 0 : return nchan;
4762 : }
4763 :
4764 : int
4765 0 : iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4766 : {
4767 0 : struct ieee80211com *ic = &sc->sc_ic;
4768 0 : struct ifnet *ifp = IC2IFP(ic);
4769 0 : struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4770 : struct ieee80211_rateset *rs;
4771 : size_t remain = sizeof(preq->buf);
4772 : uint8_t *frm, *pos;
4773 :
4774 0 : memset(preq, 0, sizeof(*preq));
4775 :
4776 0 : if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4777 0 : return ENOBUFS;
4778 :
4779 : /*
4780 : * Build a probe request frame. Most of the following code is a
4781 : * copy & paste of what is done in net80211.
4782 : */
4783 0 : wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4784 : IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4785 0 : wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4786 0 : IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl));
4787 0 : IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4788 0 : IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4789 0 : IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4790 0 : *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4791 0 : *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4792 :
4793 0 : frm = (uint8_t *)(wh + 1);
4794 0 : frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4795 :
4796 : /* Tell the firmware where the MAC header is. */
4797 0 : preq->mac_header.offset = 0;
4798 0 : preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4799 0 : remain -= frm - (uint8_t *)wh;
4800 :
4801 : /* Fill in 2GHz IEs and tell firmware where they are. */
4802 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4803 0 : if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4804 0 : if (remain < 4 + rs->rs_nrates)
4805 0 : return ENOBUFS;
4806 0 : } else if (remain < 2 + rs->rs_nrates)
4807 0 : return ENOBUFS;
4808 0 : preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4809 : pos = frm;
4810 0 : frm = ieee80211_add_rates(frm, rs);
4811 0 : if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4812 0 : frm = ieee80211_add_xrates(frm, rs);
4813 0 : preq->band_data[0].len = htole16(frm - pos);
4814 0 : remain -= frm - pos;
4815 :
4816 0 : if (isset(sc->sc_enabled_capa,
4817 : IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4818 0 : if (remain < 3)
4819 0 : return ENOBUFS;
4820 0 : *frm++ = IEEE80211_ELEMID_DSPARMS;
4821 0 : *frm++ = 1;
4822 0 : *frm++ = 0;
4823 0 : remain -= 3;
4824 0 : }
4825 :
4826 0 : if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4827 : /* Fill in 5GHz IEs. */
4828 0 : rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4829 0 : if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4830 0 : if (remain < 4 + rs->rs_nrates)
4831 0 : return ENOBUFS;
4832 0 : } else if (remain < 2 + rs->rs_nrates)
4833 0 : return ENOBUFS;
4834 0 : preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4835 : pos = frm;
4836 0 : frm = ieee80211_add_rates(frm, rs);
4837 0 : if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4838 0 : frm = ieee80211_add_xrates(frm, rs);
4839 0 : preq->band_data[1].len = htole16(frm - pos);
4840 0 : remain -= frm - pos;
4841 0 : }
4842 :
4843 : /* Send 11n IEs on both 2GHz and 5GHz bands. */
4844 0 : preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4845 : pos = frm;
4846 0 : if (ic->ic_flags & IEEE80211_F_HTON) {
4847 0 : if (remain < 28)
4848 0 : return ENOBUFS;
4849 0 : frm = ieee80211_add_htcaps(frm, ic);
4850 : /* XXX add WME info? */
4851 0 : }
4852 0 : preq->common_data.len = htole16(frm - pos);
4853 :
4854 0 : return 0;
4855 0 : }
4856 :
4857 : int
4858 0 : iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
4859 : {
4860 0 : struct ieee80211com *ic = &sc->sc_ic;
4861 0 : struct iwm_host_cmd hcmd = {
4862 : .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4863 : .len = { 0, },
4864 : .data = { NULL, },
4865 : .flags = 0,
4866 : };
4867 : struct iwm_scan_req_lmac *req;
4868 : size_t req_len;
4869 : int err, async = bgscan;
4870 :
4871 0 : req_len = sizeof(struct iwm_scan_req_lmac) +
4872 0 : (sizeof(struct iwm_scan_channel_cfg_lmac) *
4873 0 : sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4874 0 : if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4875 0 : return ENOMEM;
4876 0 : req = malloc(req_len, M_DEVBUF,
4877 0 : (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
4878 0 : if (req == NULL)
4879 0 : return ENOMEM;
4880 :
4881 0 : hcmd.len[0] = (uint16_t)req_len;
4882 0 : hcmd.data[0] = (void *)req;
4883 0 : hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
4884 :
4885 : /* These timings correspond to iwlwifi's UNASSOC scan. */
4886 0 : req->active_dwell = 10;
4887 0 : req->passive_dwell = 110;
4888 0 : req->fragmented_dwell = 44;
4889 0 : req->extended_dwell = 90;
4890 0 : if (bgscan) {
4891 0 : req->max_out_time = htole32(120);
4892 0 : req->suspend_time = htole32(120);
4893 0 : } else {
4894 0 : req->max_out_time = htole32(0);
4895 0 : req->suspend_time = htole32(0);
4896 : }
4897 0 : req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4898 0 : req->rx_chain_select = iwm_scan_rx_chain(sc);
4899 0 : req->iter_num = htole32(1);
4900 0 : req->delay = 0;
4901 :
4902 0 : req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4903 : IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4904 : IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4905 0 : if (ic->ic_des_esslen == 0)
4906 0 : req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4907 : else
4908 0 : req->scan_flags |=
4909 : htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4910 0 : if (isset(sc->sc_enabled_capa,
4911 : IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4912 0 : req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4913 :
4914 0 : req->flags = htole32(IWM_PHY_BAND_24);
4915 0 : if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4916 0 : req->flags |= htole32(IWM_PHY_BAND_5);
4917 0 : req->filter_flags =
4918 : htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4919 :
4920 : /* Tx flags 2 GHz. */
4921 0 : req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4922 : IWM_TX_CMD_FLG_BT_DIS);
4923 0 : req->tx_cmd[0].rate_n_flags =
4924 0 : iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4925 0 : req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4926 :
4927 : /* Tx flags 5 GHz. */
4928 0 : req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4929 : IWM_TX_CMD_FLG_BT_DIS);
4930 0 : req->tx_cmd[1].rate_n_flags =
4931 0 : iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4932 0 : req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4933 :
4934 : /* Check if we're doing an active directed scan. */
4935 0 : if (ic->ic_des_esslen != 0) {
4936 0 : req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4937 0 : req->direct_scan[0].len = ic->ic_des_esslen;
4938 0 : memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4939 : ic->ic_des_esslen);
4940 0 : }
4941 :
4942 0 : req->n_channels = iwm_lmac_scan_fill_channels(sc,
4943 0 : (struct iwm_scan_channel_cfg_lmac *)req->data,
4944 0 : ic->ic_des_esslen != 0, bgscan);
4945 :
4946 0 : err = iwm_fill_probe_req(sc,
4947 0 : (struct iwm_scan_probe_req *)(req->data +
4948 0 : (sizeof(struct iwm_scan_channel_cfg_lmac) *
4949 0 : sc->sc_capa_n_scan_channels)));
4950 0 : if (err) {
4951 0 : free(req, M_DEVBUF, req_len);
4952 0 : return err;
4953 : }
4954 :
4955 : /* Specify the scan plan: We'll do one iteration. */
4956 0 : req->schedule[0].iterations = 1;
4957 0 : req->schedule[0].full_scan_mul = 1;
4958 :
4959 : /* Disable EBS. */
4960 0 : req->channel_opt[0].non_ebs_ratio = 1;
4961 0 : req->channel_opt[1].non_ebs_ratio = 1;
4962 :
4963 0 : err = iwm_send_cmd(sc, &hcmd);
4964 0 : free(req, M_DEVBUF, req_len);
4965 0 : return err;
4966 0 : }
4967 :
4968 : int
4969 0 : iwm_config_umac_scan(struct iwm_softc *sc)
4970 : {
4971 0 : struct ieee80211com *ic = &sc->sc_ic;
4972 : struct iwm_scan_config *scan_config;
4973 : int err, nchan;
4974 : size_t cmd_size;
4975 : struct ieee80211_channel *c;
4976 0 : struct iwm_host_cmd hcmd = {
4977 0 : .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
4978 : .flags = 0,
4979 : };
4980 : static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
4981 : IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
4982 : IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
4983 : IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
4984 : IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
4985 : IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
4986 : IWM_SCAN_CONFIG_RATE_54M);
4987 :
4988 0 : cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4989 :
4990 0 : scan_config = malloc(cmd_size, M_DEVBUF, M_WAIT | M_CANFAIL | M_ZERO);
4991 0 : if (scan_config == NULL)
4992 0 : return ENOMEM;
4993 :
4994 0 : scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
4995 0 : scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
4996 0 : scan_config->legacy_rates = htole32(rates |
4997 : IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
4998 :
4999 : /* These timings correspond to iwlwifi's UNASSOC scan. */
5000 0 : scan_config->dwell_active = 10;
5001 0 : scan_config->dwell_passive = 110;
5002 0 : scan_config->dwell_fragmented = 44;
5003 0 : scan_config->dwell_extended = 90;
5004 0 : scan_config->out_of_channel_time = htole32(0);
5005 0 : scan_config->suspend_time = htole32(0);
5006 :
5007 0 : IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5008 :
5009 0 : scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5010 0 : scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5011 : IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5012 : IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5013 :
5014 0 : for (c = &ic->ic_channels[1], nchan = 0;
5015 0 : c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5016 0 : nchan < sc->sc_capa_n_scan_channels; c++) {
5017 0 : if (c->ic_flags == 0)
5018 : continue;
5019 0 : scan_config->channel_array[nchan++] =
5020 0 : ieee80211_mhz2ieee(c->ic_freq, 0);
5021 0 : }
5022 :
5023 0 : scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5024 : IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5025 : IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5026 : IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5027 : IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5028 : IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5029 : IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5030 : IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5031 : IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5032 : IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5033 : IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5034 :
5035 0 : hcmd.data[0] = scan_config;
5036 0 : hcmd.len[0] = cmd_size;
5037 :
5038 0 : err = iwm_send_cmd(sc, &hcmd);
5039 0 : free(scan_config, M_DEVBUF, cmd_size);
5040 0 : return err;
5041 0 : }
5042 :
5043 : int
5044 0 : iwm_umac_scan(struct iwm_softc *sc, int bgscan)
5045 : {
5046 0 : struct ieee80211com *ic = &sc->sc_ic;
5047 0 : struct iwm_host_cmd hcmd = {
5048 0 : .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5049 0 : .len = { 0, },
5050 : .data = { NULL, },
5051 : .flags =0,
5052 : };
5053 : struct iwm_scan_req_umac *req;
5054 : struct iwm_scan_req_umac_tail *tail;
5055 : size_t req_len;
5056 : int err, async = bgscan;
5057 :
5058 0 : req_len = sizeof(struct iwm_scan_req_umac) +
5059 0 : (sizeof(struct iwm_scan_channel_cfg_umac) *
5060 0 : sc->sc_capa_n_scan_channels) +
5061 : sizeof(struct iwm_scan_req_umac_tail);
5062 0 : if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5063 0 : return ENOMEM;
5064 0 : req = malloc(req_len, M_DEVBUF,
5065 0 : (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
5066 0 : if (req == NULL)
5067 0 : return ENOMEM;
5068 :
5069 0 : hcmd.len[0] = (uint16_t)req_len;
5070 0 : hcmd.data[0] = (void *)req;
5071 0 : hcmd.flags |= async ? IWM_CMD_ASYNC : 0;
5072 :
5073 : /* These timings correspond to iwlwifi's UNASSOC scan. */
5074 0 : req->active_dwell = 10;
5075 0 : req->passive_dwell = 110;
5076 0 : req->fragmented_dwell = 44;
5077 0 : req->extended_dwell = 90;
5078 0 : if (bgscan) {
5079 0 : req->max_out_time = htole32(120);
5080 0 : req->suspend_time = htole32(120);
5081 0 : } else {
5082 0 : req->max_out_time = htole32(0);
5083 0 : req->suspend_time = htole32(0);
5084 : }
5085 :
5086 0 : req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5087 0 : req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5088 :
5089 0 : req->n_channels = iwm_umac_scan_fill_channels(sc,
5090 0 : (struct iwm_scan_channel_cfg_umac *)req->data,
5091 0 : ic->ic_des_esslen != 0, bgscan);
5092 :
5093 0 : req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5094 : IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5095 : IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5096 :
5097 0 : tail = (void *)&req->data +
5098 0 : sizeof(struct iwm_scan_channel_cfg_umac) *
5099 0 : sc->sc_capa_n_scan_channels;
5100 :
5101 : /* Check if we're doing an active directed scan. */
5102 0 : if (ic->ic_des_esslen != 0) {
5103 0 : tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5104 0 : tail->direct_scan[0].len = ic->ic_des_esslen;
5105 0 : memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5106 : ic->ic_des_esslen);
5107 0 : req->general_flags |=
5108 : htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5109 0 : } else
5110 0 : req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5111 :
5112 0 : if (isset(sc->sc_enabled_capa,
5113 : IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5114 0 : req->general_flags |=
5115 : htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5116 :
5117 0 : err = iwm_fill_probe_req(sc, &tail->preq);
5118 0 : if (err) {
5119 0 : free(req, M_DEVBUF, req_len);
5120 0 : return err;
5121 : }
5122 :
5123 : /* Specify the scan plan: We'll do one iteration. */
5124 0 : tail->schedule[0].interval = 0;
5125 0 : tail->schedule[0].iter_count = 1;
5126 :
5127 0 : err = iwm_send_cmd(sc, &hcmd);
5128 0 : free(req, M_DEVBUF, req_len);
5129 0 : return err;
5130 0 : }
5131 :
5132 : uint8_t
5133 0 : iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5134 : {
5135 : int i;
5136 : uint8_t rval;
5137 :
5138 0 : for (i = 0; i < rs->rs_nrates; i++) {
5139 0 : rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5140 0 : if (rval == iwm_rates[ridx].rate)
5141 0 : return rs->rs_rates[i];
5142 : }
5143 :
5144 0 : return 0;
5145 0 : }
5146 :
5147 : int
5148 0 : iwm_rval2ridx(int rval)
5149 : {
5150 : int ridx;
5151 :
5152 0 : for (ridx = 0; ridx < nitems(iwm_rates); ridx++) {
5153 0 : if (rval == iwm_rates[ridx].rate)
5154 : break;
5155 : }
5156 :
5157 0 : return ridx;
5158 : }
5159 :
5160 : void
5161 0 : iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5162 : int *ofdm_rates)
5163 : {
5164 0 : struct ieee80211_node *ni = &in->in_ni;
5165 0 : struct ieee80211_rateset *rs = &ni->ni_rates;
5166 : int lowest_present_ofdm = -1;
5167 : int lowest_present_cck = -1;
5168 : uint8_t cck = 0;
5169 : uint8_t ofdm = 0;
5170 : int i;
5171 :
5172 0 : if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5173 0 : IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5174 0 : for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5175 0 : if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5176 : continue;
5177 0 : cck |= (1 << i);
5178 0 : if (lowest_present_cck == -1 || lowest_present_cck > i)
5179 0 : lowest_present_cck = i;
5180 : }
5181 : }
5182 0 : for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5183 0 : if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5184 : continue;
5185 0 : ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5186 0 : if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5187 0 : lowest_present_ofdm = i;
5188 : }
5189 :
5190 : /*
5191 : * Now we've got the basic rates as bitmaps in the ofdm and cck
5192 : * variables. This isn't sufficient though, as there might not
5193 : * be all the right rates in the bitmap. E.g. if the only basic
5194 : * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5195 : * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5196 : *
5197 : * [...] a STA responding to a received frame shall transmit
5198 : * its Control Response frame [...] at the highest rate in the
5199 : * BSSBasicRateSet parameter that is less than or equal to the
5200 : * rate of the immediately previous frame in the frame exchange
5201 : * sequence ([...]) and that is of the same modulation class
5202 : * ([...]) as the received frame. If no rate contained in the
5203 : * BSSBasicRateSet parameter meets these conditions, then the
5204 : * control frame sent in response to a received frame shall be
5205 : * transmitted at the highest mandatory rate of the PHY that is
5206 : * less than or equal to the rate of the received frame, and
5207 : * that is of the same modulation class as the received frame.
5208 : *
5209 : * As a consequence, we need to add all mandatory rates that are
5210 : * lower than all of the basic rates to these bitmaps.
5211 : */
5212 :
5213 0 : if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5214 0 : ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5215 0 : if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5216 0 : ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5217 : /* 6M already there or needed so always add */
5218 0 : ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5219 :
5220 : /*
5221 : * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5222 : * Note, however:
5223 : * - if no CCK rates are basic, it must be ERP since there must
5224 : * be some basic rates at all, so they're OFDM => ERP PHY
5225 : * (or we're in 5 GHz, and the cck bitmap will never be used)
5226 : * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5227 : * - if 5.5M is basic, 1M and 2M are mandatory
5228 : * - if 2M is basic, 1M is mandatory
5229 : * - if 1M is basic, that's the only valid ACK rate.
5230 : * As a consequence, it's not as complicated as it sounds, just add
5231 : * any lower rates to the ACK rate bitmap.
5232 : */
5233 0 : if (IWM_RATE_11M_INDEX < lowest_present_cck)
5234 0 : cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5235 0 : if (IWM_RATE_5M_INDEX < lowest_present_cck)
5236 0 : cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5237 0 : if (IWM_RATE_2M_INDEX < lowest_present_cck)
5238 0 : cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5239 : /* 1M already there or needed so always add */
5240 0 : cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5241 :
5242 0 : *cck_rates = cck;
5243 0 : *ofdm_rates = ofdm;
5244 0 : }
5245 :
5246 : void
5247 0 : iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5248 : struct iwm_mac_ctx_cmd *cmd, uint32_t action)
5249 : {
5250 : #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5251 0 : struct ieee80211com *ic = &sc->sc_ic;
5252 0 : struct ieee80211_node *ni = ic->ic_bss;
5253 0 : int cck_ack_rates, ofdm_ack_rates;
5254 : int i;
5255 :
5256 0 : cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5257 : in->in_color));
5258 0 : cmd->action = htole32(action);
5259 :
5260 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR)
5261 0 : cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
5262 0 : else if (ic->ic_opmode == IEEE80211_M_STA)
5263 0 : cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5264 : else
5265 0 : panic("unsupported operating mode %d\n", ic->ic_opmode);
5266 0 : cmd->tsf_id = htole32(IWM_TSF_ID_A);
5267 :
5268 0 : IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5269 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5270 0 : IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
5271 0 : return;
5272 : }
5273 :
5274 0 : IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5275 0 : iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5276 0 : cmd->cck_rates = htole32(cck_ack_rates);
5277 0 : cmd->ofdm_rates = htole32(ofdm_ack_rates);
5278 :
5279 0 : cmd->cck_short_preamble
5280 0 : = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5281 : ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5282 0 : cmd->short_slot
5283 0 : = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5284 : ? IWM_MAC_FLG_SHORT_SLOT : 0);
5285 :
5286 0 : for (i = 0; i < EDCA_NUM_AC; i++) {
5287 0 : struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
5288 0 : int txf = iwm_ac_to_tx_fifo[i];
5289 :
5290 0 : cmd->ac[txf].cw_min = htole16(IWM_EXP2(ac->ac_ecwmin));
5291 0 : cmd->ac[txf].cw_max = htole16(IWM_EXP2(ac->ac_ecwmax));
5292 0 : cmd->ac[txf].aifsn = ac->ac_aifsn;
5293 0 : cmd->ac[txf].fifos_mask = (1 << txf);
5294 0 : cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
5295 : }
5296 0 : if (ni->ni_flags & IEEE80211_NODE_QOS)
5297 0 : cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5298 :
5299 0 : if (ni->ni_flags & IEEE80211_NODE_HT) {
5300 : enum ieee80211_htprot htprot =
5301 0 : (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5302 0 : switch (htprot) {
5303 : case IEEE80211_HTPROT_NONE:
5304 : break;
5305 : case IEEE80211_HTPROT_NONMEMBER:
5306 : case IEEE80211_HTPROT_NONHT_MIXED:
5307 0 : cmd->protection_flags |=
5308 : htole32(IWM_MAC_PROT_FLG_HT_PROT);
5309 0 : break;
5310 : case IEEE80211_HTPROT_20MHZ:
5311 0 : if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
5312 : /* XXX ... and if our channel is 40 MHz ... */
5313 0 : cmd->protection_flags |=
5314 : htole32(IWM_MAC_PROT_FLG_HT_PROT |
5315 : IWM_MAC_PROT_FLG_FAT_PROT);
5316 0 : }
5317 : break;
5318 : default:
5319 : break;
5320 : }
5321 :
5322 0 : cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5323 0 : }
5324 0 : if (ic->ic_flags & IEEE80211_F_USEPROT)
5325 0 : cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5326 :
5327 0 : cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5328 : #undef IWM_EXP2
5329 0 : }
5330 :
5331 : void
5332 0 : iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5333 : struct iwm_mac_data_sta *sta, int assoc)
5334 : {
5335 0 : struct ieee80211_node *ni = &in->in_ni;
5336 : uint32_t dtim_off;
5337 : uint64_t tsf;
5338 :
5339 0 : dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
5340 0 : memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
5341 : tsf = letoh64(tsf);
5342 :
5343 0 : sta->is_assoc = htole32(assoc);
5344 0 : sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5345 0 : sta->dtim_tsf = htole64(tsf + dtim_off);
5346 0 : sta->bi = htole32(ni->ni_intval);
5347 0 : sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5348 0 : sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
5349 0 : sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5350 0 : sta->listen_interval = htole32(10);
5351 0 : sta->assoc_id = htole32(ni->ni_associd);
5352 0 : sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5353 0 : }
5354 :
5355 : int
5356 0 : iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5357 : int assoc)
5358 : {
5359 0 : struct ieee80211com *ic = &sc->sc_ic;
5360 0 : struct ieee80211_node *ni = &in->in_ni;
5361 0 : struct iwm_mac_ctx_cmd cmd;
5362 0 : int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
5363 :
5364 0 : if (action == IWM_FW_CTXT_ACTION_ADD && active)
5365 0 : panic("MAC already added");
5366 0 : if (action == IWM_FW_CTXT_ACTION_REMOVE && !active)
5367 0 : panic("MAC already removed");
5368 :
5369 0 : memset(&cmd, 0, sizeof(cmd));
5370 :
5371 0 : iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
5372 :
5373 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5374 0 : cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_PROMISC |
5375 : IWM_MAC_FILTER_IN_CONTROL_AND_MGMT |
5376 : IWM_MAC_FILTER_IN_BEACON |
5377 : IWM_MAC_FILTER_IN_PROBE_REQUEST |
5378 : IWM_MAC_FILTER_IN_CRC32);
5379 0 : } else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod)
5380 : /*
5381 : * Allow beacons to pass through as long as we are not
5382 : * associated or we do not have dtim period information.
5383 : */
5384 0 : cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5385 : else
5386 0 : iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5387 :
5388 0 : return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5389 0 : }
5390 :
5391 : int
5392 0 : iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
5393 : {
5394 0 : struct iwm_time_quota_cmd cmd;
5395 : int i, idx, num_active_macs, quota, quota_rem;
5396 0 : int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5397 0 : int n_ifs[IWM_MAX_BINDINGS] = {0, };
5398 : uint16_t id;
5399 :
5400 0 : memset(&cmd, 0, sizeof(cmd));
5401 :
5402 : /* currently, PHY ID == binding ID */
5403 0 : if (in && in->in_phyctxt) {
5404 0 : id = in->in_phyctxt->id;
5405 0 : KASSERT(id < IWM_MAX_BINDINGS);
5406 0 : colors[id] = in->in_phyctxt->color;
5407 0 : if (running)
5408 0 : n_ifs[id] = 1;
5409 : }
5410 :
5411 : /*
5412 : * The FW's scheduling session consists of
5413 : * IWM_MAX_QUOTA fragments. Divide these fragments
5414 : * equally between all the bindings that require quota
5415 : */
5416 : num_active_macs = 0;
5417 0 : for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5418 0 : cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5419 0 : num_active_macs += n_ifs[i];
5420 : }
5421 :
5422 : quota = 0;
5423 : quota_rem = 0;
5424 0 : if (num_active_macs) {
5425 0 : quota = IWM_MAX_QUOTA / num_active_macs;
5426 0 : quota_rem = IWM_MAX_QUOTA % num_active_macs;
5427 0 : }
5428 :
5429 0 : for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5430 0 : if (colors[i] < 0)
5431 : continue;
5432 :
5433 0 : cmd.quotas[idx].id_and_color =
5434 0 : htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5435 :
5436 0 : if (n_ifs[i] <= 0) {
5437 0 : cmd.quotas[idx].quota = htole32(0);
5438 0 : cmd.quotas[idx].max_duration = htole32(0);
5439 0 : } else {
5440 0 : cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5441 0 : cmd.quotas[idx].max_duration = htole32(0);
5442 : }
5443 0 : idx++;
5444 0 : }
5445 :
5446 : /* Give the remainder of the session to the first binding */
5447 0 : cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5448 :
5449 0 : return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
5450 : sizeof(cmd), &cmd);
5451 0 : }
5452 :
5453 : void
5454 0 : iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
5455 : {
5456 0 : int s = splnet();
5457 :
5458 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
5459 0 : splx(s);
5460 0 : return;
5461 : }
5462 :
5463 0 : refcnt_take(&sc->task_refs);
5464 0 : if (!task_add(taskq, task))
5465 0 : refcnt_rele_wake(&sc->task_refs);
5466 0 : splx(s);
5467 0 : }
5468 :
5469 : void
5470 0 : iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
5471 : {
5472 0 : if (task_del(taskq, task))
5473 0 : refcnt_rele(&sc->task_refs);
5474 0 : }
5475 :
5476 : int
5477 0 : iwm_scan(struct iwm_softc *sc)
5478 : {
5479 0 : struct ieee80211com *ic = &sc->sc_ic;
5480 0 : struct ifnet *ifp = IC2IFP(ic);
5481 : int err;
5482 :
5483 0 : if (sc->sc_flags & IWM_FLAG_BGSCAN) {
5484 0 : err = iwm_scan_abort(sc);
5485 0 : if (err) {
5486 0 : printf("%s: could not abort background scan\n",
5487 0 : DEVNAME(sc));
5488 0 : return err;
5489 : }
5490 : }
5491 :
5492 0 : if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5493 0 : err = iwm_umac_scan(sc, 0);
5494 : else
5495 0 : err = iwm_lmac_scan(sc, 0);
5496 0 : if (err) {
5497 0 : printf("%s: could not initiate scan\n", DEVNAME(sc));
5498 0 : return err;
5499 : }
5500 :
5501 0 : sc->sc_flags |= IWM_FLAG_SCANNING;
5502 0 : if (ifp->if_flags & IFF_DEBUG)
5503 0 : printf("%s: %s -> %s\n", ifp->if_xname,
5504 0 : ieee80211_state_name[ic->ic_state],
5505 0 : ieee80211_state_name[IEEE80211_S_SCAN]);
5506 0 : if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
5507 0 : ieee80211_set_link_state(ic, LINK_STATE_DOWN);
5508 0 : ieee80211_free_allnodes(ic, 1);
5509 0 : }
5510 0 : ic->ic_state = IEEE80211_S_SCAN;
5511 0 : iwm_led_blink_start(sc);
5512 0 : wakeup(&ic->ic_state); /* wake iwm_init() */
5513 :
5514 0 : return 0;
5515 0 : }
5516 :
5517 : int
5518 0 : iwm_bgscan(struct ieee80211com *ic)
5519 : {
5520 0 : struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5521 : int err;
5522 :
5523 0 : if (sc->sc_flags & IWM_FLAG_SCANNING)
5524 0 : return 0;
5525 :
5526 0 : if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5527 0 : err = iwm_umac_scan(sc, 1);
5528 : else
5529 0 : err = iwm_lmac_scan(sc, 1);
5530 0 : if (err) {
5531 0 : printf("%s: could not initiate scan\n", DEVNAME(sc));
5532 0 : return err;
5533 : }
5534 :
5535 0 : sc->sc_flags |= IWM_FLAG_BGSCAN;
5536 0 : return 0;
5537 0 : }
5538 :
5539 : int
5540 0 : iwm_umac_scan_abort(struct iwm_softc *sc)
5541 : {
5542 0 : struct iwm_umac_scan_abort cmd = { 0 };
5543 :
5544 0 : return iwm_send_cmd_pdu(sc,
5545 : IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC),
5546 : 0, sizeof(cmd), &cmd);
5547 0 : }
5548 :
5549 : int
5550 0 : iwm_lmac_scan_abort(struct iwm_softc *sc)
5551 : {
5552 0 : struct iwm_host_cmd cmd = {
5553 : .id = IWM_SCAN_OFFLOAD_ABORT_CMD,
5554 : };
5555 0 : int err, status;
5556 :
5557 0 : err = iwm_send_cmd_status(sc, &cmd, &status);
5558 0 : if (err)
5559 0 : return err;
5560 :
5561 0 : if (status != IWM_CAN_ABORT_STATUS) {
5562 : /*
5563 : * The scan abort will return 1 for success or
5564 : * 2 for "failure". A failure condition can be
5565 : * due to simply not being in an active scan which
5566 : * can occur if we send the scan abort before the
5567 : * microcode has notified us that a scan is completed.
5568 : */
5569 0 : return EBUSY;
5570 : }
5571 :
5572 0 : return 0;
5573 0 : }
5574 :
5575 : int
5576 0 : iwm_scan_abort(struct iwm_softc *sc)
5577 : {
5578 : int err;
5579 :
5580 0 : if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5581 0 : err = iwm_umac_scan_abort(sc);
5582 : else
5583 0 : err = iwm_lmac_scan_abort(sc);
5584 :
5585 0 : if (err == 0)
5586 0 : sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
5587 0 : return err;
5588 : }
5589 :
5590 : int
5591 0 : iwm_auth(struct iwm_softc *sc)
5592 : {
5593 0 : struct ieee80211com *ic = &sc->sc_ic;
5594 0 : struct iwm_node *in = (void *)ic->ic_bss;
5595 : uint32_t duration;
5596 0 : int generation = sc->sc_generation, err;
5597 :
5598 0 : splassert(IPL_NET);
5599 :
5600 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR)
5601 0 : sc->sc_phyctxt[0].channel = ic->ic_ibss_chan;
5602 : else
5603 0 : sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5604 0 : err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5605 : IWM_FW_CTXT_ACTION_MODIFY, 0);
5606 0 : if (err) {
5607 0 : printf("%s: could not update PHY context (error %d)\n",
5608 0 : DEVNAME(sc), err);
5609 0 : return err;
5610 : }
5611 0 : in->in_phyctxt = &sc->sc_phyctxt[0];
5612 :
5613 0 : err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5614 0 : if (err) {
5615 0 : printf("%s: could not add MAC context (error %d)\n",
5616 0 : DEVNAME(sc), err);
5617 0 : return err;
5618 : }
5619 0 : sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
5620 :
5621 0 : err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5622 0 : if (err) {
5623 0 : printf("%s: could not add binding (error %d)\n",
5624 0 : DEVNAME(sc), err);
5625 0 : goto rm_mac_ctxt;
5626 : }
5627 0 : sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
5628 :
5629 0 : err = iwm_add_sta_cmd(sc, in, 0);
5630 0 : if (err) {
5631 0 : printf("%s: could not add sta (error %d)\n",
5632 0 : DEVNAME(sc), err);
5633 : goto rm_binding;
5634 : }
5635 0 : sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
5636 :
5637 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR)
5638 0 : return 0;
5639 :
5640 : /*
5641 : * Prevent the FW from wandering off channel during association
5642 : * by "protecting" the session with a time event.
5643 : */
5644 0 : if (in->in_ni.ni_intval)
5645 0 : duration = in->in_ni.ni_intval * 2;
5646 : else
5647 : duration = IEEE80211_DUR_TU;
5648 0 : iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5649 :
5650 0 : return 0;
5651 :
5652 : rm_binding:
5653 0 : if (generation == sc->sc_generation) {
5654 0 : iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
5655 0 : sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
5656 0 : }
5657 : rm_mac_ctxt:
5658 0 : if (generation == sc->sc_generation) {
5659 0 : iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
5660 0 : sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
5661 0 : }
5662 0 : return err;
5663 0 : }
5664 :
5665 : int
5666 0 : iwm_deauth(struct iwm_softc *sc)
5667 : {
5668 0 : struct ieee80211com *ic = &sc->sc_ic;
5669 0 : struct iwm_node *in = (void *)ic->ic_bss;
5670 : int ac, tfd_msk, err;
5671 :
5672 0 : splassert(IPL_NET);
5673 :
5674 0 : iwm_unprotect_session(sc, in);
5675 :
5676 0 : if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
5677 0 : err = iwm_rm_sta_cmd(sc, in);
5678 0 : if (err) {
5679 0 : printf("%s: could not remove STA (error %d)\n",
5680 0 : DEVNAME(sc), err);
5681 0 : return err;
5682 : }
5683 0 : sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
5684 0 : }
5685 :
5686 : tfd_msk = 0;
5687 0 : for (ac = 0; ac < EDCA_NUM_AC; ac++)
5688 0 : tfd_msk |= htole32(1 << iwm_ac_to_tx_fifo[ac]);
5689 0 : err = iwm_flush_tx_path(sc, tfd_msk);
5690 0 : if (err) {
5691 0 : printf("%s: could not flush Tx path (error %d)\n",
5692 0 : DEVNAME(sc), err);
5693 0 : return err;
5694 : }
5695 :
5696 0 : if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
5697 0 : err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
5698 0 : if (err) {
5699 0 : printf("%s: could not remove binding (error %d)\n",
5700 0 : DEVNAME(sc), err);
5701 0 : return err;
5702 : }
5703 0 : sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
5704 0 : }
5705 :
5706 0 : if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
5707 0 : err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
5708 0 : if (err) {
5709 0 : printf("%s: could not remove MAC context (error %d)\n",
5710 0 : DEVNAME(sc), err);
5711 0 : return err;
5712 : }
5713 0 : sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
5714 0 : }
5715 :
5716 0 : return 0;
5717 0 : }
5718 :
5719 : int
5720 0 : iwm_assoc(struct iwm_softc *sc)
5721 : {
5722 0 : struct ieee80211com *ic = &sc->sc_ic;
5723 0 : struct iwm_node *in = (void *)ic->ic_bss;
5724 0 : int update_sta = (sc->sc_flags & IWM_FLAG_STA_ACTIVE);
5725 : int err;
5726 :
5727 0 : splassert(IPL_NET);
5728 :
5729 0 : err = iwm_add_sta_cmd(sc, in, update_sta);
5730 0 : if (err) {
5731 0 : printf("%s: could not %s STA (error %d)\n",
5732 0 : DEVNAME(sc), update_sta ? "update" : "add", err);
5733 0 : return err;
5734 : }
5735 :
5736 0 : return 0;
5737 0 : }
5738 :
5739 : int
5740 0 : iwm_disassoc(struct iwm_softc *sc)
5741 : {
5742 0 : struct ieee80211com *ic = &sc->sc_ic;
5743 0 : struct iwm_node *in = (void *)ic->ic_bss;
5744 : int err;
5745 :
5746 0 : splassert(IPL_NET);
5747 :
5748 0 : if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
5749 0 : err = iwm_rm_sta_cmd(sc, in);
5750 0 : if (err) {
5751 0 : printf("%s: could not remove STA (error %d)\n",
5752 0 : DEVNAME(sc), err);
5753 0 : return err;
5754 : }
5755 0 : sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
5756 0 : }
5757 :
5758 0 : return 0;
5759 0 : }
5760 :
5761 : int
5762 0 : iwm_run(struct iwm_softc *sc)
5763 : {
5764 0 : struct ieee80211com *ic = &sc->sc_ic;
5765 0 : struct iwm_node *in = (void *)ic->ic_bss;
5766 : int err;
5767 :
5768 0 : splassert(IPL_NET);
5769 :
5770 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5771 : /* Add a MAC context and a sniffing STA. */
5772 0 : err = iwm_auth(sc);
5773 0 : if (err)
5774 0 : return err;
5775 : }
5776 :
5777 : /* Configure Rx chains for MIMO. */
5778 0 : if ((ic->ic_opmode == IEEE80211_M_MONITOR ||
5779 0 : (in->in_ni.ni_flags & IEEE80211_NODE_HT)) &&
5780 0 : !sc->sc_nvm.sku_cap_mimo_disable) {
5781 0 : err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0],
5782 : 2, 2, IWM_FW_CTXT_ACTION_MODIFY, 0);
5783 0 : if (err) {
5784 0 : printf("%s: failed to update PHY\n",
5785 0 : DEVNAME(sc));
5786 0 : return err;
5787 : }
5788 : }
5789 :
5790 : /* We have now been assigned an associd by the AP. */
5791 0 : err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5792 0 : if (err) {
5793 0 : printf("%s: failed to update MAC\n", DEVNAME(sc));
5794 0 : return err;
5795 : }
5796 :
5797 0 : err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5798 0 : if (err) {
5799 0 : printf("%s: could not set sf full on (error %d)\n",
5800 0 : DEVNAME(sc), err);
5801 0 : return err;
5802 : }
5803 :
5804 0 : err = iwm_allow_mcast(sc);
5805 0 : if (err) {
5806 0 : printf("%s: could not allow mcast (error %d)\n",
5807 0 : DEVNAME(sc), err);
5808 0 : return err;
5809 : }
5810 :
5811 0 : err = iwm_power_update_device(sc);
5812 0 : if (err) {
5813 0 : printf("%s: could not send power command (error %d)\n",
5814 0 : DEVNAME(sc), err);
5815 0 : return err;
5816 : }
5817 : #ifdef notyet
5818 : /*
5819 : * Disabled for now. Default beacon filter settings
5820 : * prevent net80211 from getting ERP and HT protection
5821 : * updates from beacons.
5822 : */
5823 : err = iwm_enable_beacon_filter(sc, in);
5824 : if (err) {
5825 : printf("%s: could not enable beacon filter\n",
5826 : DEVNAME(sc));
5827 : return err;
5828 : }
5829 : #endif
5830 0 : err = iwm_power_mac_update_mode(sc, in);
5831 0 : if (err) {
5832 0 : printf("%s: could not update MAC power (error %d)\n",
5833 0 : DEVNAME(sc), err);
5834 0 : return err;
5835 : }
5836 :
5837 0 : err = iwm_update_quotas(sc, in, 1);
5838 0 : if (err) {
5839 0 : printf("%s: could not update quotas (error %d)\n",
5840 0 : DEVNAME(sc), err);
5841 0 : return err;
5842 : }
5843 :
5844 0 : ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5845 0 : ieee80211_mira_node_init(&in->in_mn);
5846 :
5847 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5848 0 : iwm_led_blink_start(sc);
5849 0 : return 0;
5850 : }
5851 :
5852 : /* Start at lowest available bit-rate, AMRR will raise. */
5853 0 : in->in_ni.ni_txrate = 0;
5854 0 : in->in_ni.ni_txmcs = 0;
5855 0 : iwm_setrates(in);
5856 :
5857 0 : timeout_add_msec(&sc->sc_calib_to, 500);
5858 0 : iwm_led_enable(sc);
5859 :
5860 0 : return 0;
5861 0 : }
5862 :
5863 : int
5864 0 : iwm_run_stop(struct iwm_softc *sc)
5865 : {
5866 0 : struct ieee80211com *ic = &sc->sc_ic;
5867 0 : struct iwm_node *in = (void *)ic->ic_bss;
5868 : int err;
5869 :
5870 0 : splassert(IPL_NET);
5871 :
5872 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR)
5873 0 : iwm_led_blink_stop(sc);
5874 :
5875 0 : err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
5876 0 : if (err)
5877 0 : return err;
5878 :
5879 0 : iwm_disable_beacon_filter(sc);
5880 :
5881 0 : err = iwm_update_quotas(sc, in, 0);
5882 0 : if (err) {
5883 0 : printf("%s: could not update quotas (error %d)\n",
5884 0 : DEVNAME(sc), err);
5885 0 : return err;
5886 : }
5887 :
5888 0 : err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5889 0 : if (err) {
5890 0 : printf("%s: failed to update MAC\n", DEVNAME(sc));
5891 0 : return err;
5892 : }
5893 :
5894 : /* Reset Tx chains in case MIMO was enabled. */
5895 0 : if ((in->in_ni.ni_flags & IEEE80211_NODE_HT) &&
5896 0 : !sc->sc_nvm.sku_cap_mimo_disable) {
5897 0 : err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5898 : IWM_FW_CTXT_ACTION_MODIFY, 0);
5899 0 : if (err) {
5900 0 : printf("%s: failed to update PHY\n", DEVNAME(sc));
5901 0 : return err;
5902 : }
5903 : }
5904 :
5905 0 : return 0;
5906 0 : }
5907 :
5908 : struct ieee80211_node *
5909 0 : iwm_node_alloc(struct ieee80211com *ic)
5910 : {
5911 0 : return malloc(sizeof (struct iwm_node), M_DEVBUF, M_NOWAIT | M_ZERO);
5912 : }
5913 :
5914 : void
5915 0 : iwm_calib_timeout(void *arg)
5916 : {
5917 0 : struct iwm_softc *sc = arg;
5918 0 : struct ieee80211com *ic = &sc->sc_ic;
5919 0 : struct iwm_node *in = (void *)ic->ic_bss;
5920 0 : struct ieee80211_node *ni = &in->in_ni;
5921 : int s, otxrate;
5922 :
5923 0 : s = splnet();
5924 0 : if ((ic->ic_fixed_rate == -1 || ic->ic_fixed_mcs == -1) &&
5925 0 : ((ni->ni_flags & IEEE80211_NODE_HT) == 0) &&
5926 0 : ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5927 0 : otxrate = ni->ni_txrate;
5928 0 : ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5929 : /*
5930 : * If AMRR has chosen a new TX rate we must update
5931 : * the firwmare's LQ rate table from process context.
5932 : */
5933 0 : if (otxrate != ni->ni_txrate)
5934 0 : iwm_add_task(sc, systq, &sc->setrates_task);
5935 : }
5936 0 : splx(s);
5937 :
5938 0 : timeout_add_msec(&sc->sc_calib_to, 500);
5939 0 : }
5940 :
5941 : void
5942 0 : iwm_setrates_task(void *arg)
5943 : {
5944 0 : struct iwm_softc *sc = arg;
5945 0 : struct ieee80211com *ic = &sc->sc_ic;
5946 0 : struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5947 0 : int s = splnet();
5948 :
5949 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
5950 0 : refcnt_rele_wake(&sc->task_refs);
5951 0 : splx(s);
5952 0 : return;
5953 : }
5954 :
5955 : /* Update rates table based on new TX rate determined by AMRR. */
5956 0 : iwm_setrates(in);
5957 0 : refcnt_rele_wake(&sc->task_refs);
5958 0 : splx(s);
5959 0 : }
5960 :
5961 : void
5962 0 : iwm_setrates(struct iwm_node *in)
5963 : {
5964 0 : struct ieee80211_node *ni = &in->in_ni;
5965 0 : struct ieee80211com *ic = ni->ni_ic;
5966 0 : struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5967 0 : struct iwm_lq_cmd *lq = &in->in_lq;
5968 0 : struct ieee80211_rateset *rs = &ni->ni_rates;
5969 : int i, ridx, ridx_min, ridx_max, j, sgi_ok, mimo, tab = 0;
5970 0 : struct iwm_host_cmd cmd = {
5971 : .id = IWM_LQ_CMD,
5972 : .len = { sizeof(in->in_lq), },
5973 : };
5974 :
5975 0 : memset(lq, 0, sizeof(*lq));
5976 0 : lq->sta_id = IWM_STATION_ID;
5977 :
5978 0 : if (ic->ic_flags & IEEE80211_F_USEPROT)
5979 0 : lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5980 :
5981 0 : sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5982 0 : (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5983 :
5984 : /*
5985 : * Fill the LQ rate selection table with legacy and/or HT rates
5986 : * in descending order, i.e. with the node's current TX rate first.
5987 : * In cases where throughput of an HT rate corresponds to a legacy
5988 : * rate it makes no sense to add both. We rely on the fact that
5989 : * iwm_rates is laid out such that equivalent HT/legacy rates share
5990 : * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5991 : * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5992 : */
5993 : j = 0;
5994 0 : ridx_min = iwm_rval2ridx(ieee80211_min_basic_rate(ic));
5995 0 : mimo = iwm_is_mimo_mcs(ni->ni_txmcs);
5996 0 : ridx_max = (mimo ? IWM_RIDX_MAX : IWM_LAST_HT_SISO_RATE);
5997 0 : for (ridx = ridx_max; ridx >= ridx_min; ridx--) {
5998 0 : uint8_t plcp = iwm_rates[ridx].plcp;
5999 0 : uint8_t ht_plcp = iwm_rates[ridx].ht_plcp;
6000 :
6001 0 : if (j >= nitems(lq->rs_table))
6002 0 : break;
6003 : tab = 0;
6004 0 : if (ni->ni_flags & IEEE80211_NODE_HT) {
6005 0 : if (ht_plcp == IWM_RATE_HT_SISO_MCS_INV_PLCP)
6006 0 : continue;
6007 : /* Do not mix SISO and MIMO HT rates. */
6008 0 : if ((mimo && !iwm_is_mimo_ht_plcp(ht_plcp)) ||
6009 0 : (!mimo && iwm_is_mimo_ht_plcp(ht_plcp)))
6010 0 : continue;
6011 0 : for (i = ni->ni_txmcs; i >= 0; i--) {
6012 0 : if (isclr(ni->ni_rxmcs, i))
6013 : continue;
6014 0 : if (ridx == iwm_mcs2ridx[i]) {
6015 : tab = ht_plcp;
6016 0 : tab |= IWM_RATE_MCS_HT_MSK;
6017 0 : if (sgi_ok)
6018 0 : tab |= IWM_RATE_MCS_SGI_MSK;
6019 : break;
6020 : }
6021 : }
6022 0 : } else if (plcp != IWM_RATE_INVM_PLCP) {
6023 0 : for (i = ni->ni_txrate; i >= 0; i--) {
6024 0 : if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6025 : IEEE80211_RATE_VAL)) {
6026 : tab = plcp;
6027 0 : break;
6028 : }
6029 : }
6030 : }
6031 :
6032 0 : if (tab == 0)
6033 0 : continue;
6034 :
6035 0 : if (iwm_is_mimo_ht_plcp(ht_plcp))
6036 0 : tab |= IWM_RATE_MCS_ANT_AB_MSK;
6037 : else
6038 0 : tab |= IWM_RATE_MCS_ANT_A_MSK;
6039 :
6040 0 : if (IWM_RIDX_IS_CCK(ridx))
6041 0 : tab |= IWM_RATE_MCS_CCK_MSK;
6042 0 : lq->rs_table[j++] = htole32(tab);
6043 0 : }
6044 :
6045 0 : lq->mimo_delim = (mimo ? j : 0);
6046 :
6047 : /* Fill the rest with the lowest possible rate */
6048 0 : while (j < nitems(lq->rs_table)) {
6049 0 : tab = iwm_rates[ridx_min].plcp;
6050 0 : if (IWM_RIDX_IS_CCK(ridx_min))
6051 0 : tab |= IWM_RATE_MCS_CCK_MSK;
6052 0 : tab |= IWM_RATE_MCS_ANT_A_MSK;
6053 0 : lq->rs_table[j++] = htole32(tab);
6054 : }
6055 :
6056 0 : lq->single_stream_ant_msk = IWM_ANT_A;
6057 0 : lq->dual_stream_ant_msk = IWM_ANT_AB;
6058 :
6059 0 : lq->agg_time_limit = htole16(4000); /* 4ms */
6060 0 : lq->agg_disable_start_th = 3;
6061 : #ifdef notyet
6062 : lq->agg_frame_cnt_limit = 0x3f;
6063 : #else
6064 0 : lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6065 : #endif
6066 :
6067 0 : cmd.data[0] = &in->in_lq;
6068 0 : iwm_send_cmd(sc, &cmd);
6069 0 : }
6070 :
6071 : int
6072 0 : iwm_media_change(struct ifnet *ifp)
6073 : {
6074 0 : struct iwm_softc *sc = ifp->if_softc;
6075 0 : struct ieee80211com *ic = &sc->sc_ic;
6076 : uint8_t rate, ridx;
6077 : int err;
6078 :
6079 0 : err = ieee80211_media_change(ifp);
6080 0 : if (err != ENETRESET)
6081 0 : return err;
6082 :
6083 0 : if (ic->ic_fixed_mcs != -1)
6084 0 : sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6085 0 : else if (ic->ic_fixed_rate != -1) {
6086 0 : rate = ic->ic_sup_rates[ic->ic_curmode].
6087 0 : rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6088 : /* Map 802.11 rate to HW rate index. */
6089 0 : for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6090 0 : if (iwm_rates[ridx].rate == rate)
6091 : break;
6092 0 : sc->sc_fixed_ridx = ridx;
6093 0 : }
6094 :
6095 0 : if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6096 : (IFF_UP | IFF_RUNNING)) {
6097 0 : iwm_stop(ifp);
6098 0 : err = iwm_init(ifp);
6099 0 : }
6100 0 : return err;
6101 0 : }
6102 :
6103 : void
6104 0 : iwm_newstate_task(void *psc)
6105 : {
6106 0 : struct iwm_softc *sc = (struct iwm_softc *)psc;
6107 0 : struct ieee80211com *ic = &sc->sc_ic;
6108 0 : enum ieee80211_state nstate = sc->ns_nstate;
6109 0 : enum ieee80211_state ostate = ic->ic_state;
6110 0 : int arg = sc->ns_arg;
6111 0 : int err = 0, s = splnet();
6112 :
6113 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6114 : /* iwm_stop() is waiting for us. */
6115 0 : refcnt_rele_wake(&sc->task_refs);
6116 0 : splx(s);
6117 0 : return;
6118 : }
6119 :
6120 0 : if (ostate == IEEE80211_S_SCAN) {
6121 0 : if (nstate == ostate) {
6122 0 : if (sc->sc_flags & IWM_FLAG_SCANNING) {
6123 0 : refcnt_rele_wake(&sc->task_refs);
6124 0 : splx(s);
6125 0 : return;
6126 : }
6127 : /* Firmware is no longer scanning. Do another scan. */
6128 : goto next_scan;
6129 : } else
6130 0 : iwm_led_blink_stop(sc);
6131 0 : }
6132 :
6133 0 : if (nstate <= ostate) {
6134 0 : switch (ostate) {
6135 : case IEEE80211_S_RUN:
6136 0 : err = iwm_run_stop(sc);
6137 0 : if (err)
6138 : goto out;
6139 : /* FALLTHROUGH */
6140 : case IEEE80211_S_ASSOC:
6141 0 : if (nstate <= IEEE80211_S_ASSOC) {
6142 0 : err = iwm_disassoc(sc);
6143 0 : if (err)
6144 : goto out;
6145 : }
6146 : /* FALLTHROUGH */
6147 : case IEEE80211_S_AUTH:
6148 0 : if (nstate <= IEEE80211_S_AUTH) {
6149 0 : err = iwm_deauth(sc);
6150 0 : if (err)
6151 : goto out;
6152 : }
6153 : /* FALLTHROUGH */
6154 : case IEEE80211_S_SCAN:
6155 : case IEEE80211_S_INIT:
6156 : break;
6157 : }
6158 :
6159 : /* Die now if iwm_stop() was called while we were sleeping. */
6160 0 : if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
6161 0 : refcnt_rele_wake(&sc->task_refs);
6162 0 : splx(s);
6163 0 : return;
6164 : }
6165 : }
6166 :
6167 0 : switch (nstate) {
6168 : case IEEE80211_S_INIT:
6169 : break;
6170 :
6171 : case IEEE80211_S_SCAN:
6172 : next_scan:
6173 0 : err = iwm_scan(sc);
6174 0 : if (err)
6175 : break;
6176 0 : refcnt_rele_wake(&sc->task_refs);
6177 0 : splx(s);
6178 0 : return;
6179 :
6180 : case IEEE80211_S_AUTH:
6181 0 : err = iwm_auth(sc);
6182 0 : break;
6183 :
6184 : case IEEE80211_S_ASSOC:
6185 0 : err = iwm_assoc(sc);
6186 0 : break;
6187 :
6188 : case IEEE80211_S_RUN:
6189 0 : err = iwm_run(sc);
6190 0 : break;
6191 : }
6192 :
6193 : out:
6194 0 : if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
6195 0 : if (err)
6196 0 : task_add(systq, &sc->init_task);
6197 : else
6198 0 : sc->sc_newstate(ic, nstate, arg);
6199 : }
6200 0 : refcnt_rele_wake(&sc->task_refs);
6201 0 : splx(s);
6202 0 : }
6203 :
6204 : int
6205 0 : iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6206 : {
6207 0 : struct ifnet *ifp = IC2IFP(ic);
6208 0 : struct iwm_softc *sc = ifp->if_softc;
6209 0 : struct iwm_node *in = (void *)ic->ic_bss;
6210 :
6211 0 : if (ic->ic_state == IEEE80211_S_RUN) {
6212 0 : timeout_del(&sc->sc_calib_to);
6213 0 : ieee80211_mira_cancel_timeouts(&in->in_mn);
6214 0 : iwm_del_task(sc, systq, &sc->setrates_task);
6215 0 : iwm_del_task(sc, systq, &sc->ba_task);
6216 0 : iwm_del_task(sc, systq, &sc->htprot_task);
6217 0 : }
6218 :
6219 0 : sc->ns_nstate = nstate;
6220 0 : sc->ns_arg = arg;
6221 :
6222 0 : iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
6223 :
6224 0 : return 0;
6225 : }
6226 :
6227 : void
6228 0 : iwm_endscan(struct iwm_softc *sc)
6229 : {
6230 0 : struct ieee80211com *ic = &sc->sc_ic;
6231 :
6232 0 : if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
6233 0 : return;
6234 :
6235 0 : sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6236 0 : ieee80211_end_scan(&ic->ic_if);
6237 0 : }
6238 :
6239 : /*
6240 : * Aging and idle timeouts for the different possible scenarios
6241 : * in default configuration
6242 : */
6243 : static const uint32_t
6244 : iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6245 : {
6246 : htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6247 : htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6248 : },
6249 : {
6250 : htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6251 : htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6252 : },
6253 : {
6254 : htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6255 : htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6256 : },
6257 : {
6258 : htole32(IWM_SF_BA_AGING_TIMER_DEF),
6259 : htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6260 : },
6261 : {
6262 : htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6263 : htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6264 : },
6265 : };
6266 :
6267 : /*
6268 : * Aging and idle timeouts for the different possible scenarios
6269 : * in single BSS MAC configuration.
6270 : */
6271 : static const uint32_t
6272 : iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6273 : {
6274 : htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6275 : htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6276 : },
6277 : {
6278 : htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6279 : htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6280 : },
6281 : {
6282 : htole32(IWM_SF_MCAST_AGING_TIMER),
6283 : htole32(IWM_SF_MCAST_IDLE_TIMER)
6284 : },
6285 : {
6286 : htole32(IWM_SF_BA_AGING_TIMER),
6287 : htole32(IWM_SF_BA_IDLE_TIMER)
6288 : },
6289 : {
6290 : htole32(IWM_SF_TX_RE_AGING_TIMER),
6291 : htole32(IWM_SF_TX_RE_IDLE_TIMER)
6292 : },
6293 : };
6294 :
6295 : void
6296 0 : iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6297 : struct ieee80211_node *ni)
6298 : {
6299 : int i, j, watermark;
6300 :
6301 0 : sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6302 :
6303 : /*
6304 : * If we are in association flow - check antenna configuration
6305 : * capabilities of the AP station, and choose the watermark accordingly.
6306 : */
6307 0 : if (ni) {
6308 0 : if (ni->ni_flags & IEEE80211_NODE_HT) {
6309 0 : if (ni->ni_rxmcs[1] != 0)
6310 0 : watermark = IWM_SF_W_MARK_MIMO2;
6311 : else
6312 : watermark = IWM_SF_W_MARK_SISO;
6313 : } else {
6314 : watermark = IWM_SF_W_MARK_LEGACY;
6315 : }
6316 : /* default watermark value for unassociated mode. */
6317 : } else {
6318 : watermark = IWM_SF_W_MARK_MIMO2;
6319 : }
6320 0 : sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6321 :
6322 0 : for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6323 0 : for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6324 0 : sf_cmd->long_delay_timeouts[i][j] =
6325 : htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6326 : }
6327 : }
6328 :
6329 0 : if (ni) {
6330 0 : memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6331 : sizeof(iwm_sf_full_timeout));
6332 0 : } else {
6333 0 : memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6334 : sizeof(iwm_sf_full_timeout_def));
6335 : }
6336 :
6337 0 : }
6338 :
6339 : int
6340 0 : iwm_sf_config(struct iwm_softc *sc, int new_state)
6341 : {
6342 0 : struct ieee80211com *ic = &sc->sc_ic;
6343 0 : struct iwm_sf_cfg_cmd sf_cmd = {
6344 : .state = htole32(IWM_SF_FULL_ON),
6345 : };
6346 : int err = 0;
6347 :
6348 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6349 0 : sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6350 :
6351 0 : switch (new_state) {
6352 : case IWM_SF_UNINIT:
6353 : case IWM_SF_INIT_OFF:
6354 0 : iwm_fill_sf_command(sc, &sf_cmd, NULL);
6355 0 : break;
6356 : case IWM_SF_FULL_ON:
6357 0 : iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6358 0 : break;
6359 : default:
6360 0 : return EINVAL;
6361 : }
6362 :
6363 0 : err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6364 : sizeof(sf_cmd), &sf_cmd);
6365 0 : return err;
6366 0 : }
6367 :
6368 : int
6369 0 : iwm_send_bt_init_conf(struct iwm_softc *sc)
6370 : {
6371 0 : struct iwm_bt_coex_cmd bt_cmd;
6372 :
6373 0 : bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6374 0 : bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6375 :
6376 0 : return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
6377 : &bt_cmd);
6378 0 : }
6379 :
6380 : int
6381 0 : iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6382 : {
6383 0 : struct iwm_mcc_update_cmd mcc_cmd;
6384 0 : struct iwm_host_cmd hcmd = {
6385 : .id = IWM_MCC_UPDATE_CMD,
6386 : .flags = IWM_CMD_WANT_RESP,
6387 0 : .data = { &mcc_cmd },
6388 : };
6389 : int err;
6390 0 : int resp_v2 = isset(sc->sc_enabled_capa,
6391 : IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6392 :
6393 0 : memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6394 0 : mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6395 0 : if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6396 0 : isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6397 0 : mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6398 : else
6399 0 : mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6400 :
6401 0 : if (resp_v2) {
6402 0 : hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6403 0 : hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
6404 : sizeof(struct iwm_mcc_update_resp);
6405 0 : } else {
6406 0 : hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6407 0 : hcmd.resp_pkt_len = sizeof(struct iwm_rx_packet) +
6408 : sizeof(struct iwm_mcc_update_resp_v1);
6409 : }
6410 :
6411 0 : err = iwm_send_cmd(sc, &hcmd);
6412 0 : if (err)
6413 0 : return err;
6414 :
6415 0 : iwm_free_resp(sc, &hcmd);
6416 :
6417 0 : return 0;
6418 0 : }
6419 :
6420 : void
6421 0 : iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6422 : {
6423 0 : struct iwm_host_cmd cmd = {
6424 : .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6425 0 : .len = { sizeof(uint32_t), },
6426 0 : .data = { &backoff, },
6427 : };
6428 :
6429 0 : iwm_send_cmd(sc, &cmd);
6430 0 : }
6431 :
6432 : int
6433 0 : iwm_init_hw(struct iwm_softc *sc)
6434 : {
6435 0 : struct ieee80211com *ic = &sc->sc_ic;
6436 : int err, i, ac;
6437 :
6438 0 : err = iwm_preinit(sc);
6439 0 : if (err)
6440 0 : return err;
6441 :
6442 0 : err = iwm_start_hw(sc);
6443 0 : if (err) {
6444 0 : printf("%s: could not initialize hardware\n", DEVNAME(sc));
6445 0 : return err;
6446 : }
6447 :
6448 0 : err = iwm_run_init_mvm_ucode(sc, 0);
6449 0 : if (err)
6450 0 : return err;
6451 :
6452 : /* Should stop and start HW since INIT image just loaded. */
6453 0 : iwm_stop_device(sc);
6454 0 : err = iwm_start_hw(sc);
6455 0 : if (err) {
6456 0 : printf("%s: could not initialize hardware\n", DEVNAME(sc));
6457 0 : return err;
6458 : }
6459 :
6460 : /* Restart, this time with the regular firmware */
6461 0 : err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6462 0 : if (err) {
6463 0 : printf("%s: could not load firmware\n", DEVNAME(sc));
6464 0 : goto err;
6465 : }
6466 :
6467 0 : if (!iwm_nic_lock(sc))
6468 0 : return EBUSY;
6469 :
6470 0 : err = iwm_send_bt_init_conf(sc);
6471 0 : if (err) {
6472 0 : printf("%s: could not init bt coex (error %d)\n",
6473 0 : DEVNAME(sc), err);
6474 0 : goto err;
6475 : }
6476 :
6477 0 : err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6478 0 : if (err) {
6479 0 : printf("%s: could not init tx ant config (error %d)\n",
6480 0 : DEVNAME(sc), err);
6481 0 : goto err;
6482 : }
6483 :
6484 0 : err = iwm_send_phy_db_data(sc);
6485 0 : if (err) {
6486 0 : printf("%s: could not init phy db (error %d)\n",
6487 0 : DEVNAME(sc), err);
6488 0 : goto err;
6489 : }
6490 :
6491 0 : err = iwm_send_phy_cfg_cmd(sc);
6492 0 : if (err) {
6493 0 : printf("%s: could not send phy config (error %d)\n",
6494 0 : DEVNAME(sc), err);
6495 0 : goto err;
6496 : }
6497 :
6498 : /* Add auxiliary station for scanning */
6499 0 : err = iwm_add_aux_sta(sc);
6500 0 : if (err) {
6501 0 : printf("%s: could not add aux station (error %d)\n",
6502 0 : DEVNAME(sc), err);
6503 0 : goto err;
6504 : }
6505 :
6506 0 : for (i = 0; i < 1; i++) {
6507 : /*
6508 : * The channel used here isn't relevant as it's
6509 : * going to be overwritten in the other flows.
6510 : * For now use the first channel we have.
6511 : */
6512 0 : sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6513 0 : err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6514 : IWM_FW_CTXT_ACTION_ADD, 0);
6515 0 : if (err) {
6516 0 : printf("%s: could not add phy context %d (error %d)\n",
6517 0 : DEVNAME(sc), i, err);
6518 0 : goto err;
6519 : }
6520 : }
6521 :
6522 : /* Initialize tx backoffs to the minimum. */
6523 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6524 0 : iwm_tt_tx_backoff(sc, 0);
6525 :
6526 0 : err = iwm_power_update_device(sc);
6527 0 : if (err) {
6528 0 : printf("%s: could not send power command (error %d)\n",
6529 0 : DEVNAME(sc), err);
6530 0 : goto err;
6531 : }
6532 :
6533 0 : if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6534 0 : err = iwm_send_update_mcc_cmd(sc, "ZZ");
6535 0 : if (err) {
6536 0 : printf("%s: could not init LAR (error %d)\n",
6537 0 : DEVNAME(sc), err);
6538 0 : goto err;
6539 : }
6540 : }
6541 :
6542 0 : if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6543 0 : err = iwm_config_umac_scan(sc);
6544 0 : if (err) {
6545 0 : printf("%s: could not configure scan (error %d)\n",
6546 0 : DEVNAME(sc), err);
6547 0 : goto err;
6548 : }
6549 : }
6550 :
6551 0 : for (ac = 0; ac < EDCA_NUM_AC; ac++) {
6552 0 : err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6553 0 : iwm_ac_to_tx_fifo[ac]);
6554 0 : if (err) {
6555 0 : printf("%s: could not enable Tx queue %d (error %d)\n",
6556 0 : DEVNAME(sc), ac, err);
6557 0 : goto err;
6558 : }
6559 : }
6560 :
6561 0 : err = iwm_disable_beacon_filter(sc);
6562 0 : if (err) {
6563 0 : printf("%s: could not disable beacon filter (error %d)\n",
6564 0 : DEVNAME(sc), err);
6565 0 : goto err;
6566 : }
6567 :
6568 : err:
6569 0 : iwm_nic_unlock(sc);
6570 0 : return err;
6571 0 : }
6572 :
6573 : /* Allow multicast from our BSSID. */
6574 : int
6575 0 : iwm_allow_mcast(struct iwm_softc *sc)
6576 : {
6577 0 : struct ieee80211com *ic = &sc->sc_ic;
6578 0 : struct ieee80211_node *ni = ic->ic_bss;
6579 : struct iwm_mcast_filter_cmd *cmd;
6580 : size_t size;
6581 : int err;
6582 :
6583 : size = roundup(sizeof(*cmd), 4);
6584 0 : cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
6585 0 : if (cmd == NULL)
6586 0 : return ENOMEM;
6587 0 : cmd->filter_own = 1;
6588 0 : cmd->port_id = 0;
6589 0 : cmd->count = 0;
6590 0 : cmd->pass_all = 1;
6591 0 : IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6592 :
6593 0 : err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
6594 : 0, size, cmd);
6595 0 : free(cmd, M_DEVBUF, size);
6596 0 : return err;
6597 0 : }
6598 :
6599 : int
6600 0 : iwm_init(struct ifnet *ifp)
6601 : {
6602 0 : struct iwm_softc *sc = ifp->if_softc;
6603 0 : struct ieee80211com *ic = &sc->sc_ic;
6604 : int err, generation;
6605 :
6606 0 : rw_assert_wrlock(&sc->ioctl_rwl);
6607 :
6608 0 : generation = ++sc->sc_generation;
6609 :
6610 0 : KASSERT(sc->task_refs.refs == 0);
6611 0 : refcnt_init(&sc->task_refs);
6612 :
6613 0 : err = iwm_init_hw(sc);
6614 0 : if (err) {
6615 0 : if (generation == sc->sc_generation)
6616 0 : iwm_stop(ifp);
6617 0 : return err;
6618 : }
6619 :
6620 0 : ifq_clr_oactive(&ifp->if_snd);
6621 0 : ifp->if_flags |= IFF_RUNNING;
6622 :
6623 0 : if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6624 0 : ic->ic_bss->ni_chan = ic->ic_ibss_chan;
6625 0 : ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
6626 0 : return 0;
6627 : }
6628 :
6629 0 : ieee80211_begin_scan(ifp);
6630 :
6631 : /*
6632 : * ieee80211_begin_scan() ends up scheduling iwm_newstate_task().
6633 : * Wait until the transition to SCAN state has completed.
6634 : */
6635 0 : do {
6636 0 : err = tsleep(&ic->ic_state, PCATCH, "iwminit", hz);
6637 0 : if (generation != sc->sc_generation)
6638 0 : return ENXIO;
6639 0 : if (err)
6640 0 : return err;
6641 0 : } while (ic->ic_state != IEEE80211_S_SCAN);
6642 :
6643 0 : return 0;
6644 0 : }
6645 :
6646 : void
6647 0 : iwm_start(struct ifnet *ifp)
6648 : {
6649 0 : struct iwm_softc *sc = ifp->if_softc;
6650 0 : struct ieee80211com *ic = &sc->sc_ic;
6651 0 : struct ieee80211_node *ni;
6652 : struct ether_header *eh;
6653 : struct mbuf *m;
6654 : int ac = EDCA_AC_BE; /* XXX */
6655 :
6656 0 : if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
6657 0 : return;
6658 :
6659 0 : for (;;) {
6660 : /* why isn't this done per-queue? */
6661 0 : if (sc->qfullmsk != 0) {
6662 0 : ifq_set_oactive(&ifp->if_snd);
6663 0 : break;
6664 : }
6665 :
6666 : /* need to send management frames even if we're not RUNning */
6667 0 : m = mq_dequeue(&ic->ic_mgtq);
6668 0 : if (m) {
6669 0 : ni = m->m_pkthdr.ph_cookie;
6670 0 : goto sendit;
6671 : }
6672 :
6673 0 : if (ic->ic_state != IEEE80211_S_RUN ||
6674 0 : (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
6675 : break;
6676 :
6677 0 : IFQ_DEQUEUE(&ifp->if_snd, m);
6678 0 : if (!m)
6679 : break;
6680 0 : if (m->m_len < sizeof (*eh) &&
6681 0 : (m = m_pullup(m, sizeof (*eh))) == NULL) {
6682 0 : ifp->if_oerrors++;
6683 0 : continue;
6684 : }
6685 : #if NBPFILTER > 0
6686 0 : if (ifp->if_bpf != NULL)
6687 0 : bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
6688 : #endif
6689 0 : if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
6690 0 : ifp->if_oerrors++;
6691 0 : continue;
6692 : }
6693 :
6694 : sendit:
6695 : #if NBPFILTER > 0
6696 0 : if (ic->ic_rawbpf != NULL)
6697 0 : bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
6698 : #endif
6699 0 : if (iwm_tx(sc, m, ni, ac) != 0) {
6700 0 : ieee80211_release_node(ic, ni);
6701 0 : ifp->if_oerrors++;
6702 0 : continue;
6703 : }
6704 :
6705 0 : if (ifp->if_flags & IFF_UP) {
6706 0 : sc->sc_tx_timer = 15;
6707 0 : ifp->if_timer = 1;
6708 0 : }
6709 : }
6710 :
6711 0 : return;
6712 0 : }
6713 :
6714 : void
6715 0 : iwm_stop(struct ifnet *ifp)
6716 : {
6717 0 : struct iwm_softc *sc = ifp->if_softc;
6718 0 : struct ieee80211com *ic = &sc->sc_ic;
6719 0 : struct iwm_node *in = (void *)ic->ic_bss;
6720 0 : int i, s = splnet();
6721 :
6722 0 : rw_assert_wrlock(&sc->ioctl_rwl);
6723 :
6724 0 : sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
6725 :
6726 : /* Cancel scheduled tasks and let any stale tasks finish up. */
6727 0 : task_del(systq, &sc->init_task);
6728 0 : iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
6729 0 : iwm_del_task(sc, systq, &sc->setrates_task);
6730 0 : iwm_del_task(sc, systq, &sc->ba_task);
6731 0 : iwm_del_task(sc, systq, &sc->htprot_task);
6732 0 : KASSERT(sc->task_refs.refs >= 1);
6733 0 : refcnt_finalize(&sc->task_refs, "iwmstop");
6734 :
6735 0 : iwm_stop_device(sc);
6736 :
6737 : /* Reset soft state. */
6738 :
6739 0 : sc->sc_generation++;
6740 0 : for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
6741 0 : free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
6742 0 : sc->sc_cmd_resp_pkt[i] = NULL;
6743 0 : sc->sc_cmd_resp_len[i] = 0;
6744 : }
6745 0 : ifp->if_flags &= ~IFF_RUNNING;
6746 0 : ifq_clr_oactive(&ifp->if_snd);
6747 :
6748 0 : in->in_phyctxt = NULL;
6749 0 : if (ic->ic_state == IEEE80211_S_RUN)
6750 0 : ieee80211_mira_cancel_timeouts(&in->in_mn); /* XXX refcount? */
6751 :
6752 0 : sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
6753 0 : sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
6754 0 : sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
6755 0 : sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
6756 0 : sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
6757 0 : sc->sc_flags &= ~IWM_FLAG_HW_ERR;
6758 0 : sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
6759 :
6760 0 : sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6761 :
6762 0 : timeout_del(&sc->sc_calib_to); /* XXX refcount? */
6763 0 : iwm_led_blink_stop(sc);
6764 0 : ifp->if_timer = sc->sc_tx_timer = 0;
6765 :
6766 0 : splx(s);
6767 0 : }
6768 :
6769 : void
6770 0 : iwm_watchdog(struct ifnet *ifp)
6771 : {
6772 0 : struct iwm_softc *sc = ifp->if_softc;
6773 :
6774 0 : ifp->if_timer = 0;
6775 0 : if (sc->sc_tx_timer > 0) {
6776 0 : if (--sc->sc_tx_timer == 0) {
6777 0 : printf("%s: device timeout\n", DEVNAME(sc));
6778 : #ifdef IWM_DEBUG
6779 : iwm_nic_error(sc);
6780 : #endif
6781 0 : if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
6782 0 : task_add(systq, &sc->init_task);
6783 0 : ifp->if_oerrors++;
6784 0 : return;
6785 : }
6786 0 : ifp->if_timer = 1;
6787 0 : }
6788 :
6789 0 : ieee80211_watchdog(ifp);
6790 0 : }
6791 :
6792 : int
6793 0 : iwm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6794 : {
6795 0 : struct iwm_softc *sc = ifp->if_softc;
6796 0 : int s, err = 0, generation = sc->sc_generation;
6797 :
6798 : /*
6799 : * Prevent processes from entering this function while another
6800 : * process is tsleep'ing in it.
6801 : */
6802 0 : err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
6803 0 : if (err == 0 && generation != sc->sc_generation) {
6804 0 : rw_exit(&sc->ioctl_rwl);
6805 0 : return ENXIO;
6806 : }
6807 0 : if (err)
6808 0 : return err;
6809 0 : s = splnet();
6810 :
6811 0 : switch (cmd) {
6812 : case SIOCSIFADDR:
6813 0 : ifp->if_flags |= IFF_UP;
6814 : /* FALLTHROUGH */
6815 : case SIOCSIFFLAGS:
6816 0 : if (ifp->if_flags & IFF_UP) {
6817 0 : if (!(ifp->if_flags & IFF_RUNNING)) {
6818 0 : err = iwm_init(ifp);
6819 0 : }
6820 : } else {
6821 0 : if (ifp->if_flags & IFF_RUNNING)
6822 0 : iwm_stop(ifp);
6823 : }
6824 : break;
6825 :
6826 : default:
6827 0 : err = ieee80211_ioctl(ifp, cmd, data);
6828 0 : }
6829 :
6830 0 : if (err == ENETRESET) {
6831 : err = 0;
6832 0 : if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6833 : (IFF_UP | IFF_RUNNING)) {
6834 0 : iwm_stop(ifp);
6835 0 : err = iwm_init(ifp);
6836 0 : }
6837 : }
6838 :
6839 0 : splx(s);
6840 0 : rw_exit(&sc->ioctl_rwl);
6841 :
6842 0 : return err;
6843 0 : }
6844 :
6845 : #ifdef IWM_DEBUG
6846 : /*
6847 : * Note: This structure is read from the device with IO accesses,
6848 : * and the reading already does the endian conversion. As it is
6849 : * read with uint32_t-sized accesses, any members with a different size
6850 : * need to be ordered correctly though!
6851 : */
6852 : struct iwm_error_event_table {
6853 : uint32_t valid; /* (nonzero) valid, (0) log is empty */
6854 : uint32_t error_id; /* type of error */
6855 : uint32_t trm_hw_status0; /* TRM HW status */
6856 : uint32_t trm_hw_status1; /* TRM HW status */
6857 : uint32_t blink2; /* branch link */
6858 : uint32_t ilink1; /* interrupt link */
6859 : uint32_t ilink2; /* interrupt link */
6860 : uint32_t data1; /* error-specific data */
6861 : uint32_t data2; /* error-specific data */
6862 : uint32_t data3; /* error-specific data */
6863 : uint32_t bcon_time; /* beacon timer */
6864 : uint32_t tsf_low; /* network timestamp function timer */
6865 : uint32_t tsf_hi; /* network timestamp function timer */
6866 : uint32_t gp1; /* GP1 timer register */
6867 : uint32_t gp2; /* GP2 timer register */
6868 : uint32_t fw_rev_type; /* firmware revision type */
6869 : uint32_t major; /* uCode version major */
6870 : uint32_t minor; /* uCode version minor */
6871 : uint32_t hw_ver; /* HW Silicon version */
6872 : uint32_t brd_ver; /* HW board version */
6873 : uint32_t log_pc; /* log program counter */
6874 : uint32_t frame_ptr; /* frame pointer */
6875 : uint32_t stack_ptr; /* stack pointer */
6876 : uint32_t hcmd; /* last host command header */
6877 : uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6878 : * rxtx_flag */
6879 : uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6880 : * host_flag */
6881 : uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6882 : * enc_flag */
6883 : uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6884 : * time_flag */
6885 : uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6886 : * wico interrupt */
6887 : uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6888 : uint32_t wait_event; /* wait event() caller address */
6889 : uint32_t l2p_control; /* L2pControlField */
6890 : uint32_t l2p_duration; /* L2pDurationField */
6891 : uint32_t l2p_mhvalid; /* L2pMhValidBits */
6892 : uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6893 : uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6894 : * (LMPM_PMG_SEL) */
6895 : uint32_t u_timestamp; /* indicate when the date and time of the
6896 : * compilation */
6897 : uint32_t flow_handler; /* FH read/write pointers, RX credit */
6898 : } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6899 :
6900 : /*
6901 : * UMAC error struct - relevant starting from family 8000 chip.
6902 : * Note: This structure is read from the device with IO accesses,
6903 : * and the reading already does the endian conversion. As it is
6904 : * read with u32-sized accesses, any members with a different size
6905 : * need to be ordered correctly though!
6906 : */
6907 : struct iwm_umac_error_event_table {
6908 : uint32_t valid; /* (nonzero) valid, (0) log is empty */
6909 : uint32_t error_id; /* type of error */
6910 : uint32_t blink1; /* branch link */
6911 : uint32_t blink2; /* branch link */
6912 : uint32_t ilink1; /* interrupt link */
6913 : uint32_t ilink2; /* interrupt link */
6914 : uint32_t data1; /* error-specific data */
6915 : uint32_t data2; /* error-specific data */
6916 : uint32_t data3; /* error-specific data */
6917 : uint32_t umac_major;
6918 : uint32_t umac_minor;
6919 : uint32_t frame_pointer; /* core register 27*/
6920 : uint32_t stack_pointer; /* core register 28 */
6921 : uint32_t cmd_header; /* latest host cmd sent to UMAC */
6922 : uint32_t nic_isr_pref; /* ISR status register */
6923 : } __packed;
6924 :
6925 : #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6926 : #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6927 :
6928 : void
6929 : iwm_nic_umac_error(struct iwm_softc *sc)
6930 : {
6931 : struct iwm_umac_error_event_table table;
6932 : uint32_t base;
6933 :
6934 : base = sc->sc_uc.uc_umac_error_event_table;
6935 :
6936 : if (base < 0x800000) {
6937 : printf("%s: Invalid error log pointer 0x%08x\n",
6938 : DEVNAME(sc), base);
6939 : return;
6940 : }
6941 :
6942 : if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
6943 : printf("%s: reading errlog failed\n", DEVNAME(sc));
6944 : return;
6945 : }
6946 :
6947 : if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6948 : printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
6949 : printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
6950 : sc->sc_flags, table.valid);
6951 : }
6952 :
6953 : printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
6954 : iwm_desc_lookup(table.error_id));
6955 : printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
6956 : printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
6957 : printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
6958 : printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
6959 : printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
6960 : printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
6961 : printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
6962 : printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
6963 : printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
6964 : printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
6965 : table.frame_pointer);
6966 : printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
6967 : table.stack_pointer);
6968 : printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
6969 : printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
6970 : table.nic_isr_pref);
6971 : }
6972 :
6973 : struct {
6974 : const char *name;
6975 : uint8_t num;
6976 : } advanced_lookup[] = {
6977 : { "NMI_INTERRUPT_WDG", 0x34 },
6978 : { "SYSASSERT", 0x35 },
6979 : { "UCODE_VERSION_MISMATCH", 0x37 },
6980 : { "BAD_COMMAND", 0x38 },
6981 : { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6982 : { "FATAL_ERROR", 0x3D },
6983 : { "NMI_TRM_HW_ERR", 0x46 },
6984 : { "NMI_INTERRUPT_TRM", 0x4C },
6985 : { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6986 : { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6987 : { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6988 : { "NMI_INTERRUPT_HOST", 0x66 },
6989 : { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6990 : { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6991 : { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6992 : { "ADVANCED_SYSASSERT", 0 },
6993 : };
6994 :
6995 : const char *
6996 : iwm_desc_lookup(uint32_t num)
6997 : {
6998 : int i;
6999 :
7000 : for (i = 0; i < nitems(advanced_lookup) - 1; i++)
7001 : if (advanced_lookup[i].num == num)
7002 : return advanced_lookup[i].name;
7003 :
7004 : /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7005 : return advanced_lookup[i].name;
7006 : }
7007 :
7008 : /*
7009 : * Support for dumping the error log seemed like a good idea ...
7010 : * but it's mostly hex junk and the only sensible thing is the
7011 : * hw/ucode revision (which we know anyway). Since it's here,
7012 : * I'll just leave it in, just in case e.g. the Intel guys want to
7013 : * help us decipher some "ADVANCED_SYSASSERT" later.
7014 : */
7015 : void
7016 : iwm_nic_error(struct iwm_softc *sc)
7017 : {
7018 : struct iwm_error_event_table table;
7019 : uint32_t base;
7020 :
7021 : printf("%s: dumping device error log\n", DEVNAME(sc));
7022 : base = sc->sc_uc.uc_error_event_table;
7023 : if (base < 0x800000) {
7024 : printf("%s: Invalid error log pointer 0x%08x\n",
7025 : DEVNAME(sc), base);
7026 : return;
7027 : }
7028 :
7029 : if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
7030 : printf("%s: reading errlog failed\n", DEVNAME(sc));
7031 : return;
7032 : }
7033 :
7034 : if (!table.valid) {
7035 : printf("%s: errlog not found, skipping\n", DEVNAME(sc));
7036 : return;
7037 : }
7038 :
7039 : if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
7040 : printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
7041 : printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
7042 : sc->sc_flags, table.valid);
7043 : }
7044 :
7045 : printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
7046 : iwm_desc_lookup(table.error_id));
7047 : printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
7048 : table.trm_hw_status0);
7049 : printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
7050 : table.trm_hw_status1);
7051 : printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
7052 : printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
7053 : printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
7054 : printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
7055 : printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
7056 : printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
7057 : printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
7058 : printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
7059 : printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
7060 : printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
7061 : printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
7062 : printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
7063 : table.fw_rev_type);
7064 : printf("%s: %08X | uCode version major\n", DEVNAME(sc),
7065 : table.major);
7066 : printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
7067 : table.minor);
7068 : printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
7069 : printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
7070 : printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
7071 : printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
7072 : printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
7073 : printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
7074 : printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
7075 : printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
7076 : printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
7077 : printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
7078 : printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
7079 : printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
7080 : printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
7081 : printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
7082 : printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
7083 : printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
7084 : printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
7085 :
7086 : if (sc->sc_uc.uc_umac_error_event_table)
7087 : iwm_nic_umac_error(sc);
7088 : }
7089 : #endif
7090 :
7091 : #define SYNC_RESP_STRUCT(_var_, _pkt_) \
7092 : do { \
7093 : bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7094 : sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
7095 : _var_ = (void *)((_pkt_)+1); \
7096 : } while (/*CONSTCOND*/0)
7097 :
7098 : #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
7099 : do { \
7100 : bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7101 : sizeof(len), BUS_DMASYNC_POSTREAD); \
7102 : _ptr_ = (void *)((_pkt_)+1); \
7103 : } while (/*CONSTCOND*/0)
7104 :
7105 : #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7106 :
7107 : void
7108 0 : iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, size_t maxlen)
7109 : {
7110 0 : struct ieee80211com *ic = &sc->sc_ic;
7111 0 : struct ifnet *ifp = IC2IFP(ic);
7112 : struct iwm_rx_packet *pkt;
7113 : struct iwm_rx_mpdu_res_start *rx_res;
7114 : uint32_t len;
7115 : uint32_t rx_pkt_status;
7116 : int rxfail;
7117 :
7118 0 : pkt = mtod(m, struct iwm_rx_packet *);
7119 0 : rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
7120 0 : len = le16toh(rx_res->byte_count);
7121 0 : if (len < IEEE80211_MIN_LEN) {
7122 0 : ic->ic_stats.is_rx_tooshort++;
7123 0 : IC2IFP(ic)->if_ierrors++;
7124 0 : m_freem(m);
7125 0 : return;
7126 : }
7127 0 : if (len > maxlen) {
7128 0 : IC2IFP(ic)->if_ierrors++;
7129 0 : m_freem(m);
7130 0 : return;
7131 : }
7132 :
7133 0 : rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
7134 : sizeof(*rx_res) + len));
7135 0 : rxfail = ((rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) == 0 ||
7136 0 : (rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK) == 0);
7137 0 : if (rxfail) {
7138 0 : ifp->if_ierrors++;
7139 0 : m_freem(m);
7140 0 : return;
7141 : }
7142 :
7143 : /* Extract the 802.11 frame. */
7144 0 : m->m_data = (caddr_t)pkt->data + sizeof(*rx_res);
7145 0 : m->m_pkthdr.len = m->m_len = len;
7146 0 : if (iwm_rx_frame(sc, m) != 0) {
7147 0 : ifp->if_ierrors++;
7148 0 : m_freem(m);
7149 0 : }
7150 0 : }
7151 :
7152 : void
7153 0 : iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data)
7154 : {
7155 0 : struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7156 : struct iwm_rx_packet *pkt;
7157 : uint32_t offset = 0, nmpdu = 0, len;
7158 : struct mbuf *m0;
7159 : const uint32_t minsz = sizeof(uint32_t) + sizeof(struct iwm_cmd_header);
7160 : int qid, idx, code;
7161 :
7162 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
7163 : BUS_DMASYNC_POSTREAD);
7164 :
7165 0 : m0 = data->m;
7166 0 : while (offset + minsz < IWM_RBUF_SIZE) {
7167 0 : pkt = (struct iwm_rx_packet *)(m0->m_data + offset);
7168 0 : qid = pkt->hdr.qid & ~0x80;
7169 0 : idx = pkt->hdr.idx;
7170 :
7171 0 : code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7172 :
7173 0 : if ((code == 0 && qid == 0 && idx == 0) ||
7174 0 : pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
7175 : break;
7176 : }
7177 :
7178 0 : len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
7179 0 : if (len < sizeof(struct iwm_cmd_header) ||
7180 0 : len > (IWM_RBUF_SIZE - offset))
7181 : break;
7182 0 : len += sizeof(uint32_t); /* account for status word */
7183 :
7184 0 : if (code == IWM_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
7185 : /* Take mbuf m0 off the RX ring. */
7186 0 : if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
7187 0 : ifp->if_ierrors++;
7188 0 : break;
7189 : }
7190 0 : KASSERT(data->m != m0);
7191 : }
7192 :
7193 0 : switch (code) {
7194 : case IWM_REPLY_RX_PHY_CMD:
7195 0 : iwm_rx_rx_phy_cmd(sc, pkt, data);
7196 0 : break;
7197 :
7198 : case IWM_REPLY_RX_MPDU_CMD: {
7199 : /*
7200 : * Create an mbuf which points to the current packet.
7201 : * Always copy from offset zero to preserve m_pkthdr.
7202 : */
7203 0 : struct mbuf *m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
7204 0 : if (m == NULL) {
7205 0 : ifp->if_ierrors++;
7206 0 : break;
7207 : }
7208 0 : m_adj(m, offset);
7209 :
7210 0 : iwm_rx_mpdu(sc, m, IWM_RBUF_SIZE - offset);
7211 0 : break;
7212 : }
7213 :
7214 : case IWM_TX_CMD:
7215 0 : iwm_rx_tx_cmd(sc, pkt, data);
7216 0 : break;
7217 :
7218 : case IWM_MISSED_BEACONS_NOTIFICATION:
7219 0 : iwm_rx_bmiss(sc, pkt, data);
7220 0 : break;
7221 :
7222 : case IWM_MFUART_LOAD_NOTIFICATION:
7223 : break;
7224 :
7225 : case IWM_ALIVE: {
7226 : struct iwm_alive_resp_v1 *resp1;
7227 : struct iwm_alive_resp_v2 *resp2;
7228 : struct iwm_alive_resp_v3 *resp3;
7229 :
7230 0 : if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7231 0 : SYNC_RESP_STRUCT(resp1, pkt);
7232 0 : sc->sc_uc.uc_error_event_table
7233 0 : = le32toh(resp1->error_event_table_ptr);
7234 0 : sc->sc_uc.uc_log_event_table
7235 0 : = le32toh(resp1->log_event_table_ptr);
7236 0 : sc->sched_base = le32toh(resp1->scd_base_ptr);
7237 0 : if (resp1->status == IWM_ALIVE_STATUS_OK)
7238 0 : sc->sc_uc.uc_ok = 1;
7239 : else
7240 0 : sc->sc_uc.uc_ok = 0;
7241 : }
7242 :
7243 0 : if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7244 0 : SYNC_RESP_STRUCT(resp2, pkt);
7245 0 : sc->sc_uc.uc_error_event_table
7246 0 : = le32toh(resp2->error_event_table_ptr);
7247 0 : sc->sc_uc.uc_log_event_table
7248 0 : = le32toh(resp2->log_event_table_ptr);
7249 0 : sc->sched_base = le32toh(resp2->scd_base_ptr);
7250 0 : sc->sc_uc.uc_umac_error_event_table
7251 0 : = le32toh(resp2->error_info_addr);
7252 0 : if (resp2->status == IWM_ALIVE_STATUS_OK)
7253 0 : sc->sc_uc.uc_ok = 1;
7254 : else
7255 0 : sc->sc_uc.uc_ok = 0;
7256 : }
7257 :
7258 0 : if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7259 0 : SYNC_RESP_STRUCT(resp3, pkt);
7260 0 : sc->sc_uc.uc_error_event_table
7261 0 : = le32toh(resp3->error_event_table_ptr);
7262 0 : sc->sc_uc.uc_log_event_table
7263 0 : = le32toh(resp3->log_event_table_ptr);
7264 0 : sc->sched_base = le32toh(resp3->scd_base_ptr);
7265 0 : sc->sc_uc.uc_umac_error_event_table
7266 0 : = le32toh(resp3->error_info_addr);
7267 0 : if (resp3->status == IWM_ALIVE_STATUS_OK)
7268 0 : sc->sc_uc.uc_ok = 1;
7269 : else
7270 0 : sc->sc_uc.uc_ok = 0;
7271 : }
7272 :
7273 0 : sc->sc_uc.uc_intr = 1;
7274 0 : wakeup(&sc->sc_uc);
7275 : break;
7276 : }
7277 :
7278 : case IWM_CALIB_RES_NOTIF_PHY_DB: {
7279 : struct iwm_calib_res_notif_phy_db *phy_db_notif;
7280 0 : SYNC_RESP_STRUCT(phy_db_notif, pkt);
7281 0 : iwm_phy_db_set_section(sc, phy_db_notif);
7282 0 : sc->sc_init_complete |= IWM_CALIB_COMPLETE;
7283 0 : wakeup(&sc->sc_init_complete);
7284 : break;
7285 : }
7286 :
7287 : case IWM_STATISTICS_NOTIFICATION: {
7288 : struct iwm_notif_statistics *stats;
7289 0 : SYNC_RESP_STRUCT(stats, pkt);
7290 0 : memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7291 0 : sc->sc_noise = iwm_get_noise(&stats->rx.general);
7292 : break;
7293 : }
7294 :
7295 : case IWM_MCC_CHUB_UPDATE_CMD: {
7296 : struct iwm_mcc_chub_notif *notif;
7297 0 : SYNC_RESP_STRUCT(notif, pkt);
7298 :
7299 0 : sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7300 0 : sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7301 0 : sc->sc_fw_mcc[2] = '\0';
7302 0 : }
7303 :
7304 : case IWM_DTS_MEASUREMENT_NOTIFICATION:
7305 : break;
7306 :
7307 : case IWM_PHY_CONFIGURATION_CMD:
7308 : case IWM_TX_ANT_CONFIGURATION_CMD:
7309 : case IWM_ADD_STA:
7310 : case IWM_MAC_CONTEXT_CMD:
7311 : case IWM_REPLY_SF_CFG_CMD:
7312 : case IWM_POWER_TABLE_CMD:
7313 : case IWM_PHY_CONTEXT_CMD:
7314 : case IWM_BINDING_CONTEXT_CMD:
7315 : case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7316 : case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7317 : case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7318 : case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7319 : case IWM_SCAN_OFFLOAD_ABORT_CMD:
7320 : case IWM_REPLY_BEACON_FILTERING_CMD:
7321 : case IWM_MAC_PM_POWER_TABLE:
7322 : case IWM_TIME_QUOTA_CMD:
7323 : case IWM_REMOVE_STA:
7324 : case IWM_TXPATH_FLUSH:
7325 : case IWM_LQ_CMD:
7326 : case IWM_BT_CONFIG:
7327 : case IWM_REPLY_THERMAL_MNG_BACKOFF:
7328 : case IWM_NVM_ACCESS_CMD:
7329 : case IWM_MCC_UPDATE_CMD:
7330 : case IWM_TIME_EVENT_CMD: {
7331 : size_t pkt_len;
7332 :
7333 0 : if (sc->sc_cmd_resp_pkt[idx] == NULL)
7334 0 : break;
7335 :
7336 0 : bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7337 : sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7338 :
7339 0 : pkt_len = sizeof(pkt->len_n_flags) +
7340 0 : iwm_rx_packet_len(pkt);
7341 :
7342 0 : if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
7343 0 : pkt_len < sizeof(*pkt) ||
7344 0 : pkt_len > sc->sc_cmd_resp_len[idx]) {
7345 0 : free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
7346 0 : sc->sc_cmd_resp_len[idx]);
7347 0 : sc->sc_cmd_resp_pkt[idx] = NULL;
7348 0 : break;
7349 : }
7350 :
7351 0 : bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
7352 : pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
7353 0 : memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
7354 0 : break;
7355 : }
7356 :
7357 : /* ignore */
7358 : case 0x6c: /* IWM_PHY_DB_CMD */
7359 : break;
7360 :
7361 : case IWM_INIT_COMPLETE_NOTIF:
7362 0 : sc->sc_init_complete |= IWM_INIT_COMPLETE;
7363 0 : wakeup(&sc->sc_init_complete);
7364 0 : break;
7365 :
7366 : case IWM_SCAN_OFFLOAD_COMPLETE: {
7367 : struct iwm_periodic_scan_complete *notif;
7368 0 : SYNC_RESP_STRUCT(notif, pkt);
7369 : break;
7370 : }
7371 :
7372 : case IWM_SCAN_ITERATION_COMPLETE: {
7373 : struct iwm_lmac_scan_complete_notif *notif;
7374 0 : SYNC_RESP_STRUCT(notif, pkt);
7375 0 : iwm_endscan(sc);
7376 : break;
7377 : }
7378 :
7379 : case IWM_SCAN_COMPLETE_UMAC: {
7380 : struct iwm_umac_scan_complete *notif;
7381 0 : SYNC_RESP_STRUCT(notif, pkt);
7382 0 : iwm_endscan(sc);
7383 : break;
7384 : }
7385 :
7386 : case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7387 : struct iwm_umac_scan_iter_complete_notif *notif;
7388 0 : SYNC_RESP_STRUCT(notif, pkt);
7389 0 : iwm_endscan(sc);
7390 : break;
7391 : }
7392 :
7393 : case IWM_REPLY_ERROR: {
7394 : struct iwm_error_resp *resp;
7395 0 : SYNC_RESP_STRUCT(resp, pkt);
7396 0 : printf("%s: firmware error 0x%x, cmd 0x%x\n",
7397 0 : DEVNAME(sc), le32toh(resp->error_type),
7398 0 : resp->cmd_id);
7399 : break;
7400 : }
7401 :
7402 : case IWM_TIME_EVENT_NOTIFICATION: {
7403 : struct iwm_time_event_notif *notif;
7404 : uint32_t action;
7405 0 : SYNC_RESP_STRUCT(notif, pkt);
7406 :
7407 0 : if (sc->sc_time_event_uid != le32toh(notif->unique_id))
7408 0 : break;
7409 0 : action = le32toh(notif->action);
7410 0 : if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
7411 0 : sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
7412 0 : break;
7413 : }
7414 :
7415 : case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
7416 : IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
7417 : break;
7418 :
7419 : /*
7420 : * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
7421 : * messages. Just ignore them for now.
7422 : */
7423 : case IWM_DEBUG_LOG_MSG:
7424 : break;
7425 :
7426 : case IWM_MCAST_FILTER_CMD:
7427 : break;
7428 :
7429 : case IWM_SCD_QUEUE_CFG: {
7430 : struct iwm_scd_txq_cfg_rsp *rsp;
7431 0 : SYNC_RESP_STRUCT(rsp, pkt);
7432 :
7433 : break;
7434 : }
7435 :
7436 : default:
7437 0 : printf("%s: unhandled firmware response 0x%x/0x%x "
7438 : "rx ring %d[%d]\n",
7439 0 : DEVNAME(sc), pkt->hdr.code, pkt->len_n_flags, qid,
7440 : idx);
7441 0 : break;
7442 : }
7443 :
7444 : /*
7445 : * uCode sets bit 0x80 when it originates the notification,
7446 : * i.e. when the notification is not a direct response to a
7447 : * command sent by the driver.
7448 : * For example, uCode issues IWM_REPLY_RX when it sends a
7449 : * received frame to the driver.
7450 : */
7451 0 : if (!(pkt->hdr.qid & (1 << 7))) {
7452 0 : iwm_cmd_done(sc, pkt);
7453 0 : }
7454 :
7455 0 : offset += roundup(len, IWM_FH_RSCSR_FRAME_ALIGN);
7456 : }
7457 :
7458 0 : if (m0 != data->m)
7459 0 : m_freem(m0);
7460 0 : }
7461 :
7462 : void
7463 0 : iwm_notif_intr(struct iwm_softc *sc)
7464 : {
7465 : uint16_t hw;
7466 :
7467 0 : bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7468 : 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7469 :
7470 0 : hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7471 0 : while (sc->rxq.cur != hw) {
7472 0 : struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7473 0 : iwm_rx_pkt(sc, data);
7474 0 : ADVANCE_RXQ(sc);
7475 : }
7476 :
7477 : /*
7478 : * Tell the firmware what we have processed.
7479 : * Seems like the hardware gets upset unless we align the write by 8??
7480 : */
7481 0 : hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7482 0 : IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7483 0 : }
7484 :
7485 : int
7486 0 : iwm_intr(void *arg)
7487 : {
7488 0 : struct iwm_softc *sc = arg;
7489 : int handled = 0;
7490 : int r1, r2, rv = 0;
7491 : int isperiodic = 0;
7492 :
7493 0 : IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7494 :
7495 0 : if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7496 0 : uint32_t *ict = sc->ict_dma.vaddr;
7497 : int tmp;
7498 :
7499 0 : tmp = htole32(ict[sc->ict_cur]);
7500 0 : if (!tmp)
7501 0 : goto out_ena;
7502 :
7503 : /*
7504 : * ok, there was something. keep plowing until we have all.
7505 : */
7506 : r1 = r2 = 0;
7507 0 : while (tmp) {
7508 0 : r1 |= tmp;
7509 0 : ict[sc->ict_cur] = 0;
7510 0 : sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
7511 0 : tmp = htole32(ict[sc->ict_cur]);
7512 : }
7513 :
7514 : /* this is where the fun begins. don't ask */
7515 0 : if (r1 == 0xffffffff)
7516 0 : r1 = 0;
7517 :
7518 : /* i am not expected to understand this */
7519 0 : if (r1 & 0xc0000)
7520 0 : r1 |= 0x8000;
7521 0 : r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7522 0 : } else {
7523 0 : r1 = IWM_READ(sc, IWM_CSR_INT);
7524 0 : if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7525 : goto out;
7526 0 : r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7527 : }
7528 0 : if (r1 == 0 && r2 == 0) {
7529 : goto out_ena;
7530 : }
7531 :
7532 0 : IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7533 :
7534 : /* ignored */
7535 0 : handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
7536 :
7537 0 : if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7538 0 : handled |= IWM_CSR_INT_BIT_RF_KILL;
7539 0 : if (iwm_check_rfkill(sc)) {
7540 0 : task_add(systq, &sc->init_task);
7541 : rv = 1;
7542 0 : goto out;
7543 : }
7544 : }
7545 :
7546 0 : if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7547 : #ifdef IWM_DEBUG
7548 : int i;
7549 :
7550 : iwm_nic_error(sc);
7551 :
7552 : /* Dump driver status (TX and RX rings) while we're here. */
7553 : DPRINTF(("driver status:\n"));
7554 : for (i = 0; i < IWM_MAX_QUEUES; i++) {
7555 : struct iwm_tx_ring *ring = &sc->txq[i];
7556 : DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7557 : "queued=%-3d\n",
7558 : i, ring->qid, ring->cur, ring->queued));
7559 : }
7560 : DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7561 : DPRINTF((" 802.11 state %s\n",
7562 : ieee80211_state_name[sc->sc_ic.ic_state]));
7563 : #endif
7564 :
7565 0 : printf("%s: fatal firmware error\n", DEVNAME(sc));
7566 0 : if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
7567 0 : task_add(systq, &sc->init_task);
7568 : rv = 1;
7569 0 : goto out;
7570 :
7571 : }
7572 :
7573 0 : if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7574 : handled |= IWM_CSR_INT_BIT_HW_ERR;
7575 0 : printf("%s: hardware error, stopping device \n", DEVNAME(sc));
7576 0 : if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
7577 0 : sc->sc_flags |= IWM_FLAG_HW_ERR;
7578 0 : task_add(systq, &sc->init_task);
7579 0 : }
7580 : rv = 1;
7581 0 : goto out;
7582 : }
7583 :
7584 : /* firmware chunk loaded */
7585 0 : if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7586 0 : IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7587 0 : handled |= IWM_CSR_INT_BIT_FH_TX;
7588 :
7589 0 : sc->sc_fw_chunk_done = 1;
7590 0 : wakeup(&sc->sc_fw);
7591 0 : }
7592 :
7593 0 : if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7594 0 : handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
7595 0 : IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7596 0 : if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7597 0 : IWM_WRITE_1(sc,
7598 : IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7599 : isperiodic = 1;
7600 0 : }
7601 :
7602 0 : if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7603 0 : isperiodic) {
7604 : handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
7605 0 : IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7606 :
7607 0 : iwm_notif_intr(sc);
7608 :
7609 : /* enable periodic interrupt, see above */
7610 0 : if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7611 : !isperiodic)
7612 0 : IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7613 : IWM_CSR_INT_PERIODIC_ENA);
7614 : }
7615 :
7616 0 : rv = 1;
7617 :
7618 : out_ena:
7619 0 : iwm_restore_interrupts(sc);
7620 : out:
7621 0 : return rv;
7622 0 : }
7623 :
7624 : typedef void *iwm_match_t;
7625 :
7626 : static const struct pci_matchid iwm_devices[] = {
7627 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_1 },
7628 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3160_2 },
7629 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_1 },
7630 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3165_2 },
7631 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_3168_1 },
7632 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_1 },
7633 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7260_2 },
7634 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_1 },
7635 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_7265_2 },
7636 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_1 },
7637 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8260_2 },
7638 : { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_8265_1 },
7639 : };
7640 :
7641 : int
7642 0 : iwm_match(struct device *parent, iwm_match_t match __unused, void *aux)
7643 : {
7644 0 : return pci_matchbyid((struct pci_attach_args *)aux, iwm_devices,
7645 : nitems(iwm_devices));
7646 : }
7647 :
7648 : int
7649 0 : iwm_preinit(struct iwm_softc *sc)
7650 : {
7651 0 : struct ieee80211com *ic = &sc->sc_ic;
7652 0 : struct ifnet *ifp = IC2IFP(ic);
7653 : int err;
7654 : static int attached;
7655 :
7656 0 : err = iwm_prepare_card_hw(sc);
7657 0 : if (err) {
7658 0 : printf("%s: could not initialize hardware\n", DEVNAME(sc));
7659 0 : return err;
7660 : }
7661 :
7662 0 : if (attached) {
7663 : /* Update MAC in case the upper layers changed it. */
7664 0 : IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
7665 : ((struct arpcom *)ifp)->ac_enaddr);
7666 0 : return 0;
7667 : }
7668 :
7669 0 : err = iwm_start_hw(sc);
7670 0 : if (err) {
7671 0 : printf("%s: could not initialize hardware\n", DEVNAME(sc));
7672 0 : return err;
7673 : }
7674 :
7675 0 : err = iwm_run_init_mvm_ucode(sc, 1);
7676 0 : iwm_stop_device(sc);
7677 0 : if (err)
7678 0 : return err;
7679 :
7680 : /* Print version info and MAC address on first successful fw load. */
7681 0 : attached = 1;
7682 0 : printf("%s: hw rev 0x%x, fw ver %s, address %s\n",
7683 0 : DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
7684 0 : sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
7685 :
7686 0 : if (sc->sc_nvm.sku_cap_11n_enable)
7687 0 : iwm_setup_ht_rates(sc);
7688 :
7689 : /* not all hardware can do 5GHz band */
7690 0 : if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
7691 0 : memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
7692 : sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
7693 :
7694 : /* Configure channel information obtained from firmware. */
7695 0 : ieee80211_channel_init(ifp);
7696 :
7697 : /* Configure MAC address. */
7698 0 : err = if_setlladdr(ifp, ic->ic_myaddr);
7699 0 : if (err)
7700 0 : printf("%s: could not set MAC address (error %d)\n",
7701 : DEVNAME(sc), err);
7702 :
7703 0 : ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
7704 :
7705 0 : return 0;
7706 0 : }
7707 :
7708 : void
7709 0 : iwm_attach_hook(struct device *self)
7710 : {
7711 0 : struct iwm_softc *sc = (void *)self;
7712 :
7713 0 : KASSERT(!cold);
7714 :
7715 0 : iwm_preinit(sc);
7716 0 : }
7717 :
7718 : void
7719 0 : iwm_attach(struct device *parent, struct device *self, void *aux)
7720 : {
7721 0 : struct iwm_softc *sc = (void *)self;
7722 0 : struct pci_attach_args *pa = aux;
7723 0 : pci_intr_handle_t ih;
7724 : pcireg_t reg, memtype;
7725 0 : struct ieee80211com *ic = &sc->sc_ic;
7726 0 : struct ifnet *ifp = &ic->ic_if;
7727 : const char *intrstr;
7728 : int err;
7729 : int txq_i, i;
7730 :
7731 0 : sc->sc_pct = pa->pa_pc;
7732 0 : sc->sc_pcitag = pa->pa_tag;
7733 0 : sc->sc_dmat = pa->pa_dmat;
7734 :
7735 0 : rw_init(&sc->ioctl_rwl, "iwmioctl");
7736 :
7737 0 : err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7738 0 : PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7739 0 : if (err == 0) {
7740 0 : printf("%s: PCIe capability structure not found!\n",
7741 0 : DEVNAME(sc));
7742 0 : return;
7743 : }
7744 :
7745 : /* Clear device-specific "PCI retry timeout" register (41h). */
7746 0 : reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7747 0 : pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7748 :
7749 : /* Enable bus-mastering and hardware bug workaround. */
7750 0 : reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7751 0 : reg |= PCI_COMMAND_MASTER_ENABLE;
7752 : /* if !MSI */
7753 0 : if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
7754 0 : reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
7755 0 : }
7756 0 : pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7757 :
7758 0 : memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7759 0 : err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7760 0 : &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
7761 0 : if (err) {
7762 0 : printf("%s: can't map mem space\n", DEVNAME(sc));
7763 0 : return;
7764 : }
7765 :
7766 0 : if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
7767 0 : printf("%s: can't map interrupt\n", DEVNAME(sc));
7768 0 : return;
7769 : }
7770 :
7771 0 : intrstr = pci_intr_string(sc->sc_pct, ih);
7772 0 : sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc,
7773 0 : DEVNAME(sc));
7774 :
7775 0 : if (sc->sc_ih == NULL) {
7776 0 : printf("\n");
7777 0 : printf("%s: can't establish interrupt", DEVNAME(sc));
7778 0 : if (intrstr != NULL)
7779 0 : printf(" at %s", intrstr);
7780 0 : printf("\n");
7781 0 : return;
7782 : }
7783 0 : printf(", %s\n", intrstr);
7784 :
7785 0 : sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7786 0 : switch (PCI_PRODUCT(pa->pa_id)) {
7787 : case PCI_PRODUCT_INTEL_WL_3160_1:
7788 : case PCI_PRODUCT_INTEL_WL_3160_2:
7789 0 : sc->sc_fwname = "iwm-3160-16";
7790 0 : sc->host_interrupt_operation_mode = 1;
7791 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7792 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7793 0 : break;
7794 : case PCI_PRODUCT_INTEL_WL_3165_1:
7795 : case PCI_PRODUCT_INTEL_WL_3165_2:
7796 0 : sc->sc_fwname = "iwm-7265-16";
7797 0 : sc->host_interrupt_operation_mode = 0;
7798 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7799 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7800 0 : break;
7801 : case PCI_PRODUCT_INTEL_WL_3168_1:
7802 0 : sc->sc_fwname = "iwm-3168-22";
7803 0 : sc->host_interrupt_operation_mode = 0;
7804 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7805 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7806 0 : break;
7807 : case PCI_PRODUCT_INTEL_WL_7260_1:
7808 : case PCI_PRODUCT_INTEL_WL_7260_2:
7809 0 : sc->sc_fwname = "iwm-7260-16";
7810 0 : sc->host_interrupt_operation_mode = 1;
7811 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7812 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7813 0 : break;
7814 : case PCI_PRODUCT_INTEL_WL_7265_1:
7815 : case PCI_PRODUCT_INTEL_WL_7265_2:
7816 0 : sc->sc_fwname = "iwm-7265-16";
7817 0 : sc->host_interrupt_operation_mode = 0;
7818 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7819 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7820 0 : break;
7821 : case PCI_PRODUCT_INTEL_WL_8260_1:
7822 : case PCI_PRODUCT_INTEL_WL_8260_2:
7823 0 : sc->sc_fwname = "iwm-8000C-16";
7824 0 : sc->host_interrupt_operation_mode = 0;
7825 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7826 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7827 0 : break;
7828 : case PCI_PRODUCT_INTEL_WL_8265_1:
7829 0 : sc->sc_fwname = "iwm-8265-22";
7830 0 : sc->host_interrupt_operation_mode = 0;
7831 0 : sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7832 0 : sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7833 0 : break;
7834 : default:
7835 0 : printf("%s: unknown adapter type\n", DEVNAME(sc));
7836 0 : return;
7837 : }
7838 :
7839 : /*
7840 : * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7841 : * changed, and now the revision step also includes bit 0-1 (no more
7842 : * "dash" value). To keep hw_rev backwards compatible - we'll store it
7843 : * in the old format.
7844 : */
7845 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7846 0 : sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7847 0 : (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7848 :
7849 0 : if (iwm_prepare_card_hw(sc) != 0) {
7850 0 : printf("%s: could not initialize hardware\n", DEVNAME(sc));
7851 0 : return;
7852 : }
7853 :
7854 0 : if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7855 : uint32_t hw_step;
7856 :
7857 : /*
7858 : * In order to recognize C step the driver should read the
7859 : * chip version id located at the AUX bus MISC address.
7860 : */
7861 0 : IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7862 : IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7863 0 : DELAY(2);
7864 :
7865 0 : err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7866 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7867 : IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7868 : 25000);
7869 0 : if (!err) {
7870 0 : printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
7871 0 : return;
7872 : }
7873 :
7874 0 : if (iwm_nic_lock(sc)) {
7875 0 : hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7876 0 : hw_step |= IWM_ENABLE_WFPM;
7877 0 : iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7878 0 : hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7879 0 : hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7880 0 : if (hw_step == 0x3)
7881 0 : sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7882 : (IWM_SILICON_C_STEP << 2);
7883 0 : iwm_nic_unlock(sc);
7884 : } else {
7885 0 : printf("%s: Failed to lock the nic\n", DEVNAME(sc));
7886 0 : return;
7887 : }
7888 0 : }
7889 :
7890 : /*
7891 : * Allocate DMA memory for firmware transfers.
7892 : * Must be aligned on a 16-byte boundary.
7893 : */
7894 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
7895 0 : sc->sc_fwdmasegsz, 16);
7896 0 : if (err) {
7897 0 : printf("%s: could not allocate memory for firmware\n",
7898 : DEVNAME(sc));
7899 0 : return;
7900 : }
7901 :
7902 : /* Allocate "Keep Warm" page, used internally by the card. */
7903 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7904 0 : if (err) {
7905 0 : printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
7906 0 : goto fail1;
7907 : }
7908 :
7909 : /* Allocate interrupt cause table (ICT).*/
7910 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
7911 : IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
7912 0 : if (err) {
7913 0 : printf("%s: could not allocate ICT table\n", DEVNAME(sc));
7914 0 : goto fail2;
7915 : }
7916 :
7917 : /* TX scheduler rings must be aligned on a 1KB boundary. */
7918 0 : err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7919 : nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7920 0 : if (err) {
7921 0 : printf("%s: could not allocate TX scheduler rings\n",
7922 : DEVNAME(sc));
7923 0 : goto fail3;
7924 : }
7925 :
7926 0 : for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
7927 0 : err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7928 0 : if (err) {
7929 0 : printf("%s: could not allocate TX ring %d\n",
7930 : DEVNAME(sc), txq_i);
7931 0 : goto fail4;
7932 : }
7933 : }
7934 :
7935 0 : err = iwm_alloc_rx_ring(sc, &sc->rxq);
7936 0 : if (err) {
7937 0 : printf("%s: could not allocate RX ring\n", DEVNAME(sc));
7938 0 : goto fail4;
7939 : }
7940 :
7941 0 : sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
7942 0 : if (sc->sc_nswq == NULL)
7943 : goto fail4;
7944 :
7945 : /* Clear pending interrupts. */
7946 0 : IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7947 :
7948 0 : ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7949 0 : ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7950 0 : ic->ic_state = IEEE80211_S_INIT;
7951 :
7952 : /* Set device capabilities. */
7953 0 : ic->ic_caps =
7954 : IEEE80211_C_WEP | /* WEP */
7955 : IEEE80211_C_RSN | /* WPA/RSN */
7956 : IEEE80211_C_SCANALL | /* device scans all channels at once */
7957 : IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7958 : IEEE80211_C_MONITOR | /* monitor mode supported */
7959 : IEEE80211_C_SHSLOT | /* short slot time supported */
7960 : IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7961 :
7962 0 : ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7963 0 : ic->ic_htcaps |=
7964 : (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
7965 0 : ic->ic_htxcaps = 0;
7966 0 : ic->ic_txbfcaps = 0;
7967 0 : ic->ic_aselcaps = 0;
7968 0 : ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7969 :
7970 0 : ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7971 0 : ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7972 0 : ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7973 :
7974 0 : for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
7975 0 : sc->sc_phyctxt[i].id = i;
7976 : }
7977 :
7978 0 : sc->sc_amrr.amrr_min_success_threshold = 1;
7979 0 : sc->sc_amrr.amrr_max_success_threshold = 15;
7980 :
7981 : /* IBSS channel undefined for now. */
7982 0 : ic->ic_ibss_chan = &ic->ic_channels[1];
7983 :
7984 0 : ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7985 :
7986 0 : ifp->if_softc = sc;
7987 0 : ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7988 0 : ifp->if_ioctl = iwm_ioctl;
7989 0 : ifp->if_start = iwm_start;
7990 0 : ifp->if_watchdog = iwm_watchdog;
7991 0 : memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7992 :
7993 0 : if_attach(ifp);
7994 0 : ieee80211_ifattach(ifp);
7995 0 : ieee80211_media_init(ifp, iwm_media_change, ieee80211_media_status);
7996 :
7997 : #if NBPFILTER > 0
7998 0 : iwm_radiotap_attach(sc);
7999 : #endif
8000 0 : timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
8001 0 : timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8002 0 : task_set(&sc->init_task, iwm_init_task, sc);
8003 0 : task_set(&sc->newstate_task, iwm_newstate_task, sc);
8004 0 : task_set(&sc->setrates_task, iwm_setrates_task, sc);
8005 0 : task_set(&sc->ba_task, iwm_ba_task, sc);
8006 0 : task_set(&sc->htprot_task, iwm_htprot_task, sc);
8007 :
8008 0 : ic->ic_node_alloc = iwm_node_alloc;
8009 0 : ic->ic_bgscan_start = iwm_bgscan;
8010 :
8011 : /* Override 802.11 state transition machine. */
8012 0 : sc->sc_newstate = ic->ic_newstate;
8013 0 : ic->ic_newstate = iwm_newstate;
8014 0 : ic->ic_update_htprot = iwm_update_htprot;
8015 0 : ic->ic_ampdu_rx_start = iwm_ampdu_rx_start;
8016 0 : ic->ic_ampdu_rx_stop = iwm_ampdu_rx_stop;
8017 : #ifdef notyet
8018 : ic->ic_ampdu_tx_start = iwm_ampdu_tx_start;
8019 : ic->ic_ampdu_tx_stop = iwm_ampdu_tx_stop;
8020 : #endif
8021 : /*
8022 : * We cannot read the MAC address without loading the
8023 : * firmware from disk. Postpone until mountroot is done.
8024 : */
8025 0 : config_mountroot(self, iwm_attach_hook);
8026 :
8027 0 : return;
8028 :
8029 0 : fail4: while (--txq_i >= 0)
8030 0 : iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8031 0 : iwm_free_rx_ring(sc, &sc->rxq);
8032 0 : iwm_dma_contig_free(&sc->sched_dma);
8033 0 : fail3: if (sc->ict_dma.vaddr != NULL)
8034 0 : iwm_dma_contig_free(&sc->ict_dma);
8035 :
8036 0 : fail2: iwm_dma_contig_free(&sc->kw_dma);
8037 0 : fail1: iwm_dma_contig_free(&sc->fw_dma);
8038 0 : return;
8039 0 : }
8040 :
8041 : #if NBPFILTER > 0
8042 : void
8043 0 : iwm_radiotap_attach(struct iwm_softc *sc)
8044 : {
8045 0 : bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
8046 : sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
8047 :
8048 0 : sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8049 0 : sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8050 0 : sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8051 :
8052 0 : sc->sc_txtap_len = sizeof sc->sc_txtapu;
8053 0 : sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8054 0 : sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8055 0 : }
8056 : #endif
8057 :
8058 : void
8059 0 : iwm_init_task(void *arg1)
8060 : {
8061 0 : struct iwm_softc *sc = arg1;
8062 0 : struct ifnet *ifp = &sc->sc_ic.ic_if;
8063 0 : int s = splnet();
8064 0 : int generation = sc->sc_generation;
8065 0 : int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
8066 :
8067 0 : rw_enter_write(&sc->ioctl_rwl);
8068 0 : if (generation != sc->sc_generation) {
8069 0 : rw_exit(&sc->ioctl_rwl);
8070 0 : splx(s);
8071 0 : return;
8072 : }
8073 :
8074 0 : if (ifp->if_flags & IFF_RUNNING)
8075 0 : iwm_stop(ifp);
8076 : else
8077 0 : sc->sc_flags &= ~IWM_FLAG_HW_ERR;
8078 :
8079 0 : if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8080 0 : iwm_init(ifp);
8081 :
8082 0 : rw_exit(&sc->ioctl_rwl);
8083 0 : splx(s);
8084 0 : }
8085 :
8086 : int
8087 0 : iwm_resume(struct iwm_softc *sc)
8088 : {
8089 : pcireg_t reg;
8090 :
8091 : /* Clear device-specific "PCI retry timeout" register (41h). */
8092 0 : reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8093 0 : pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8094 :
8095 0 : return iwm_prepare_card_hw(sc);
8096 : }
8097 :
8098 : int
8099 0 : iwm_activate(struct device *self, int act)
8100 : {
8101 0 : struct iwm_softc *sc = (struct iwm_softc *)self;
8102 0 : struct ifnet *ifp = &sc->sc_ic.ic_if;
8103 : int err = 0;
8104 :
8105 0 : switch (act) {
8106 : case DVACT_QUIESCE:
8107 0 : if (ifp->if_flags & IFF_RUNNING) {
8108 0 : rw_enter_write(&sc->ioctl_rwl);
8109 0 : iwm_stop(ifp);
8110 0 : rw_exit(&sc->ioctl_rwl);
8111 0 : }
8112 : break;
8113 : case DVACT_RESUME:
8114 0 : err = iwm_resume(sc);
8115 0 : if (err)
8116 0 : printf("%s: could not initialize hardware\n",
8117 0 : DEVNAME(sc));
8118 : break;
8119 : case DVACT_WAKEUP:
8120 : /* Hardware should be up at this point. */
8121 0 : if (iwm_set_hw_ready(sc))
8122 0 : task_add(systq, &sc->init_task);
8123 : break;
8124 : }
8125 :
8126 0 : return 0;
8127 : }
8128 :
8129 : struct cfdriver iwm_cd = {
8130 : NULL, "iwm", DV_IFNET
8131 : };
8132 :
8133 : struct cfattach iwm_ca = {
8134 : sizeof(struct iwm_softc), iwm_match, iwm_attach,
8135 : NULL, iwm_activate
8136 : };
|