if_ural.c revision 189275
1/* $FreeBSD: head/sys/dev/usb/wlan/if_ural.c 189275 2009-03-02 05:37:05Z thompsa $ */ 2 3/*- 4 * Copyright (c) 2005, 2006 5 * Damien Bergamini <damien.bergamini@free.fr> 6 * 7 * Copyright (c) 2006, 2008 8 * Hans Petter Selasky <hselasky@FreeBSD.org> 9 * 10 * Permission to use, copy, modify, and distribute this software for any 11 * purpose with or without fee is hereby granted, provided that the above 12 * copyright notice and this permission notice appear in all copies. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 21 */ 22 23#include <sys/cdefs.h> 24__FBSDID("$FreeBSD: head/sys/dev/usb/wlan/if_ural.c 189275 2009-03-02 05:37:05Z thompsa $"); 25 26/*- 27 * Ralink Technology RT2500USB chipset driver 28 * http://www.ralinktech.com/ 29 */ 30 31#include "usbdevs.h" 32#include <dev/usb/usb.h> 33#include <dev/usb/usb_mfunc.h> 34#include <dev/usb/usb_error.h> 35 36#define USB_DEBUG_VAR ural_debug 37 38#include <dev/usb/usb_core.h> 39#include <dev/usb/usb_lookup.h> 40#include <dev/usb/usb_process.h> 41#include <dev/usb/usb_debug.h> 42#include <dev/usb/usb_request.h> 43#include <dev/usb/usb_busdma.h> 44#include <dev/usb/usb_util.h> 45 46#include <dev/usb/wlan/usb_wlan.h> 47#include <dev/usb/wlan/if_uralreg.h> 48#include <dev/usb/wlan/if_uralvar.h> 49 50#if USB_DEBUG 51static int ural_debug = 0; 52 53SYSCTL_NODE(_hw_usb2, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural"); 54SYSCTL_INT(_hw_usb2_ural, OID_AUTO, debug, CTLFLAG_RW, &ural_debug, 0, 55 "Debug level"); 56#endif 57 58#define URAL_RSSI(rssi) \ 59 ((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \ 60 ((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0) 61 62/* various supported device vendors/products */ 63static const struct usb2_device_id ural_devs[] = { 64 { USB_VP(USB_VENDOR_ASUS, USB_PRODUCT_ASUS_WL167G) }, 65 { USB_VP(USB_VENDOR_ASUS, USB_PRODUCT_RALINK_RT2570) }, 66 { USB_VP(USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050) }, 67 { USB_VP(USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7051) }, 68 { USB_VP(USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS) }, 69 { USB_VP(USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G) }, 70 { USB_VP(USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP) }, 71 { USB_VP(USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU) }, 72 { USB_VP(USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG122) }, 73 { USB_VP(USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GN54G) }, 74 { USB_VP(USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG) }, 75 { USB_VP(USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254) }, 76 { USB_VP(USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54) }, 77 { USB_VP(USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI) }, 78 { USB_VP(USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB) }, 79 { USB_VP(USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI) }, 80 { USB_VP(USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570) }, 81 { USB_VP(USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2) }, 82 { USB_VP(USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3) }, 83 { USB_VP(USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902) }, 84 { USB_VP(USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570) }, 85 { USB_VP(USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2) }, 86 { USB_VP(USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3) }, 87 { USB_VP(USB_VENDOR_SIEMENS2, USB_PRODUCT_SIEMENS2_WL54G) }, 88 { USB_VP(USB_VENDOR_SMC, USB_PRODUCT_SMC_2862WG) }, 89 { USB_VP(USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R) }, 90 { USB_VP(USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570) }, 91 { USB_VP(USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570) }, 92 { USB_VP(USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570) }, 93}; 94 95static usb2_callback_t ural_bulk_read_callback; 96static usb2_callback_t ural_bulk_write_callback; 97 98static usb2_proc_callback_t ural_command_wrapper; 99static usb2_proc_callback_t ural_attach_post; 100static usb2_proc_callback_t ural_task; 101static usb2_proc_callback_t ural_scantask; 102static usb2_proc_callback_t ural_promisctask; 103static usb2_proc_callback_t ural_amrr_task; 104static usb2_proc_callback_t ural_init_task; 105static usb2_proc_callback_t ural_stop_task; 106static usb2_proc_callback_t ural_flush_task; 107 108static usb2_error_t ural_do_request(struct ural_softc *sc, 109 struct usb2_device_request *req, void *data); 110static struct ieee80211vap *ural_vap_create(struct ieee80211com *, 111 const char name[IFNAMSIZ], int unit, int opmode, 112 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], 113 const uint8_t mac[IEEE80211_ADDR_LEN]); 114static void ural_vap_delete(struct ieee80211vap *); 115static void ural_tx_free(struct ural_tx_data *, int); 116static void ural_setup_tx_list(struct ural_softc *); 117static void ural_unsetup_tx_list(struct ural_softc *); 118static int ural_newstate(struct ieee80211vap *, 119 enum ieee80211_state, int); 120static void ural_setup_tx_desc(struct ural_softc *, 121 struct ural_tx_desc *, uint32_t, int, int); 122static int ural_tx_bcn(struct ural_softc *, struct mbuf *, 123 struct ieee80211_node *); 124static int ural_tx_mgt(struct ural_softc *, struct mbuf *, 125 struct ieee80211_node *); 126static int ural_tx_data(struct ural_softc *, struct mbuf *, 127 struct ieee80211_node *); 128static void ural_start(struct ifnet *); 129static int ural_ioctl(struct ifnet *, u_long, caddr_t); 130static void ural_set_testmode(struct ural_softc *); 131static void ural_eeprom_read(struct ural_softc *, uint16_t, void *, 132 int); 133static uint16_t ural_read(struct ural_softc *, uint16_t); 134static void ural_read_multi(struct ural_softc *, uint16_t, void *, 135 int); 136static void ural_write(struct ural_softc *, uint16_t, uint16_t); 137static void ural_write_multi(struct ural_softc *, uint16_t, void *, 138 int) __unused; 139static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); 140static uint8_t ural_bbp_read(struct ural_softc *, uint8_t); 141static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); 142static struct ieee80211_node *ural_node_alloc(struct ieee80211vap *, 143 const uint8_t mac[IEEE80211_ADDR_LEN]); 144static void ural_newassoc(struct ieee80211_node *, int); 145static void ural_scan_start(struct ieee80211com *); 146static void ural_scan_end(struct ieee80211com *); 147static void ural_set_channel(struct ieee80211com *); 148static void ural_set_chan(struct ural_softc *, 149 struct ieee80211_channel *); 150static void ural_disable_rf_tune(struct ural_softc *); 151static void ural_enable_tsf_sync(struct ural_softc *); 152static void ural_update_slot(struct ifnet *); 153static void ural_set_txpreamble(struct ural_softc *); 154static void ural_set_basicrates(struct ural_softc *, 155 const struct ieee80211_channel *); 156static void ural_set_bssid(struct ural_softc *, const uint8_t *); 157static void ural_set_macaddr(struct ural_softc *, uint8_t *); 158static void ural_update_promisc(struct ifnet *); 159static const char *ural_get_rf(int); 160static void ural_read_eeprom(struct ural_softc *); 161static int ural_bbp_init(struct ural_softc *); 162static void ural_set_txantenna(struct ural_softc *, int); 163static void ural_set_rxantenna(struct ural_softc *, int); 164static void ural_init(void *); 165static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *, 166 const struct ieee80211_bpf_params *); 167static void ural_amrr_start(struct ural_softc *, 168 struct ieee80211_node *); 169static void ural_amrr_timeout(void *); 170static int ural_pause(struct ural_softc *sc, int timeout); 171static void ural_queue_command(struct ural_softc *, 172 usb2_proc_callback_t *, struct usb2_proc_msg *, 173 struct usb2_proc_msg *); 174 175/* 176 * Default values for MAC registers; values taken from the reference driver. 177 */ 178static const struct { 179 uint16_t reg; 180 uint16_t val; 181} ural_def_mac[] = { 182 { RAL_TXRX_CSR5, 0x8c8d }, 183 { RAL_TXRX_CSR6, 0x8b8a }, 184 { RAL_TXRX_CSR7, 0x8687 }, 185 { RAL_TXRX_CSR8, 0x0085 }, 186 { RAL_MAC_CSR13, 0x1111 }, 187 { RAL_MAC_CSR14, 0x1e11 }, 188 { RAL_TXRX_CSR21, 0xe78f }, 189 { RAL_MAC_CSR9, 0xff1d }, 190 { RAL_MAC_CSR11, 0x0002 }, 191 { RAL_MAC_CSR22, 0x0053 }, 192 { RAL_MAC_CSR15, 0x0000 }, 193 { RAL_MAC_CSR8, RAL_FRAME_SIZE }, 194 { RAL_TXRX_CSR19, 0x0000 }, 195 { RAL_TXRX_CSR18, 0x005a }, 196 { RAL_PHY_CSR2, 0x0000 }, 197 { RAL_TXRX_CSR0, 0x1ec0 }, 198 { RAL_PHY_CSR4, 0x000f } 199}; 200 201/* 202 * Default values for BBP registers; values taken from the reference driver. 203 */ 204static const struct { 205 uint8_t reg; 206 uint8_t val; 207} ural_def_bbp[] = { 208 { 3, 0x02 }, 209 { 4, 0x19 }, 210 { 14, 0x1c }, 211 { 15, 0x30 }, 212 { 16, 0xac }, 213 { 17, 0x48 }, 214 { 18, 0x18 }, 215 { 19, 0xff }, 216 { 20, 0x1e }, 217 { 21, 0x08 }, 218 { 22, 0x08 }, 219 { 23, 0x08 }, 220 { 24, 0x80 }, 221 { 25, 0x50 }, 222 { 26, 0x08 }, 223 { 27, 0x23 }, 224 { 30, 0x10 }, 225 { 31, 0x2b }, 226 { 32, 0xb9 }, 227 { 34, 0x12 }, 228 { 35, 0x50 }, 229 { 39, 0xc4 }, 230 { 40, 0x02 }, 231 { 41, 0x60 }, 232 { 53, 0x10 }, 233 { 54, 0x18 }, 234 { 56, 0x08 }, 235 { 57, 0x10 }, 236 { 58, 0x08 }, 237 { 61, 0x60 }, 238 { 62, 0x10 }, 239 { 75, 0xff } 240}; 241 242/* 243 * Default values for RF register R2 indexed by channel numbers. 244 */ 245static const uint32_t ural_rf2522_r2[] = { 246 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, 247 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e 248}; 249 250static const uint32_t ural_rf2523_r2[] = { 251 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 252 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 253}; 254 255static const uint32_t ural_rf2524_r2[] = { 256 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 257 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 258}; 259 260static const uint32_t ural_rf2525_r2[] = { 261 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, 262 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 263}; 264 265static const uint32_t ural_rf2525_hi_r2[] = { 266 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, 267 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e 268}; 269 270static const uint32_t ural_rf2525e_r2[] = { 271 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, 272 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b 273}; 274 275static const uint32_t ural_rf2526_hi_r2[] = { 276 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, 277 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 278}; 279 280static const uint32_t ural_rf2526_r2[] = { 281 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, 282 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d 283}; 284 285/* 286 * For dual-band RF, RF registers R1 and R4 also depend on channel number; 287 * values taken from the reference driver. 288 */ 289static const struct { 290 uint8_t chan; 291 uint32_t r1; 292 uint32_t r2; 293 uint32_t r4; 294} ural_rf5222[] = { 295 { 1, 0x08808, 0x0044d, 0x00282 }, 296 { 2, 0x08808, 0x0044e, 0x00282 }, 297 { 3, 0x08808, 0x0044f, 0x00282 }, 298 { 4, 0x08808, 0x00460, 0x00282 }, 299 { 5, 0x08808, 0x00461, 0x00282 }, 300 { 6, 0x08808, 0x00462, 0x00282 }, 301 { 7, 0x08808, 0x00463, 0x00282 }, 302 { 8, 0x08808, 0x00464, 0x00282 }, 303 { 9, 0x08808, 0x00465, 0x00282 }, 304 { 10, 0x08808, 0x00466, 0x00282 }, 305 { 11, 0x08808, 0x00467, 0x00282 }, 306 { 12, 0x08808, 0x00468, 0x00282 }, 307 { 13, 0x08808, 0x00469, 0x00282 }, 308 { 14, 0x08808, 0x0046b, 0x00286 }, 309 310 { 36, 0x08804, 0x06225, 0x00287 }, 311 { 40, 0x08804, 0x06226, 0x00287 }, 312 { 44, 0x08804, 0x06227, 0x00287 }, 313 { 48, 0x08804, 0x06228, 0x00287 }, 314 { 52, 0x08804, 0x06229, 0x00287 }, 315 { 56, 0x08804, 0x0622a, 0x00287 }, 316 { 60, 0x08804, 0x0622b, 0x00287 }, 317 { 64, 0x08804, 0x0622c, 0x00287 }, 318 319 { 100, 0x08804, 0x02200, 0x00283 }, 320 { 104, 0x08804, 0x02201, 0x00283 }, 321 { 108, 0x08804, 0x02202, 0x00283 }, 322 { 112, 0x08804, 0x02203, 0x00283 }, 323 { 116, 0x08804, 0x02204, 0x00283 }, 324 { 120, 0x08804, 0x02205, 0x00283 }, 325 { 124, 0x08804, 0x02206, 0x00283 }, 326 { 128, 0x08804, 0x02207, 0x00283 }, 327 { 132, 0x08804, 0x02208, 0x00283 }, 328 { 136, 0x08804, 0x02209, 0x00283 }, 329 { 140, 0x08804, 0x0220a, 0x00283 }, 330 331 { 149, 0x08808, 0x02429, 0x00281 }, 332 { 153, 0x08808, 0x0242b, 0x00281 }, 333 { 157, 0x08808, 0x0242d, 0x00281 }, 334 { 161, 0x08808, 0x0242f, 0x00281 } 335}; 336 337static const struct usb2_config ural_config[URAL_N_TRANSFER] = { 338 [URAL_BULK_WR] = { 339 .type = UE_BULK, 340 .endpoint = UE_ADDR_ANY, 341 .direction = UE_DIR_OUT, 342 .mh.bufsize = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE + 4), 343 .mh.flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 344 .mh.callback = ural_bulk_write_callback, 345 .mh.timeout = 5000, /* ms */ 346 }, 347 [URAL_BULK_RD] = { 348 .type = UE_BULK, 349 .endpoint = UE_ADDR_ANY, 350 .direction = UE_DIR_IN, 351 .mh.bufsize = (RAL_FRAME_SIZE + RAL_RX_DESC_SIZE), 352 .mh.flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 353 .mh.callback = ural_bulk_read_callback, 354 }, 355}; 356 357static device_probe_t ural_match; 358static device_attach_t ural_attach; 359static device_detach_t ural_detach; 360 361static device_method_t ural_methods[] = { 362 /* Device interface */ 363 DEVMETHOD(device_probe, ural_match), 364 DEVMETHOD(device_attach, ural_attach), 365 DEVMETHOD(device_detach, ural_detach), 366 367 { 0, 0 } 368}; 369 370static driver_t ural_driver = { 371 .name = "ural", 372 .methods = ural_methods, 373 .size = sizeof(struct ural_softc), 374}; 375 376static devclass_t ural_devclass; 377 378DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, NULL, 0); 379MODULE_DEPEND(ural, usb, 1, 1, 1); 380MODULE_DEPEND(ural, wlan, 1, 1, 1); 381MODULE_DEPEND(ural, wlan_amrr, 1, 1, 1); 382 383static int 384ural_match(device_t self) 385{ 386 struct usb2_attach_arg *uaa = device_get_ivars(self); 387 388 if (uaa->usb2_mode != USB_MODE_HOST) 389 return (ENXIO); 390 if (uaa->info.bConfigIndex != 0) 391 return (ENXIO); 392 if (uaa->info.bIfaceIndex != RAL_IFACE_INDEX) 393 return (ENXIO); 394 395 return (usb2_lookup_id_by_uaa(ural_devs, sizeof(ural_devs), uaa)); 396} 397 398static int 399ural_attach(device_t self) 400{ 401 struct usb2_attach_arg *uaa = device_get_ivars(self); 402 struct ural_softc *sc = device_get_softc(self); 403 int error; 404 uint8_t iface_index; 405 406 device_set_usb2_desc(self); 407 sc->sc_udev = uaa->device; 408 sc->sc_dev = self; 409 410 mtx_init(&sc->sc_mtx, device_get_nameunit(self), 411 MTX_NETWORK_LOCK, MTX_DEF); 412 413 cv_init(&sc->sc_cmd_cv, "wtxdone"); 414 415 iface_index = RAL_IFACE_INDEX; 416 error = usb2_transfer_setup(uaa->device, 417 &iface_index, sc->sc_xfer, ural_config, 418 URAL_N_TRANSFER, sc, &sc->sc_mtx); 419 if (error) { 420 device_printf(self, "could not allocate USB transfers, " 421 "err=%s\n", usb2_errstr(error)); 422 goto detach; 423 } 424 error = usb2_proc_create(&sc->sc_tq, &sc->sc_mtx, 425 device_get_nameunit(self), USB_PRI_MED); 426 if (error) { 427 device_printf(self, "could not setup config thread!\n"); 428 goto detach; 429 } 430 431 /* fork rest of the attach code */ 432 RAL_LOCK(sc); 433 ural_queue_command(sc, ural_attach_post, 434 &sc->sc_synctask[0].hdr, 435 &sc->sc_synctask[1].hdr); 436 RAL_UNLOCK(sc); 437 return (0); 438 439detach: 440 ural_detach(self); 441 return (ENXIO); /* failure */ 442} 443 444static void 445ural_attach_post(struct usb2_proc_msg *pm) 446{ 447 struct ural_task *task = (struct ural_task *)pm; 448 struct ural_softc *sc = task->sc; 449 struct ifnet *ifp; 450 struct ieee80211com *ic; 451 uint8_t bands; 452 453 /* retrieve RT2570 rev. no */ 454 sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); 455 456 /* retrieve MAC address and various other things from EEPROM */ 457 ural_read_eeprom(sc); 458 459 /* XXX Async attach race */ 460 if (usb2_proc_is_gone(&sc->sc_tq)) 461 return; 462 463 RAL_UNLOCK(sc); 464 465 device_printf(sc->sc_dev, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n", 466 sc->asic_rev, ural_get_rf(sc->rf_rev)); 467 468 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 469 if (ifp == NULL) { 470 device_printf(sc->sc_dev, "can not if_alloc()\n"); 471 RAL_LOCK(sc); 472 return; 473 } 474 ic = ifp->if_l2com; 475 476 ifp->if_softc = sc; 477 if_initname(ifp, "ural", device_get_unit(sc->sc_dev)); 478 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 479 ifp->if_init = ural_init; 480 ifp->if_ioctl = ural_ioctl; 481 ifp->if_start = ural_start; 482 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 483 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 484 IFQ_SET_READY(&ifp->if_snd); 485 486 ic->ic_ifp = ifp; 487 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 488 IEEE80211_ADDR_COPY(ic->ic_myaddr, sc->sc_bssid); 489 490 /* set device capabilities */ 491 ic->ic_caps = 492 IEEE80211_C_STA /* station mode supported */ 493 | IEEE80211_C_IBSS /* IBSS mode supported */ 494 | IEEE80211_C_MONITOR /* monitor mode supported */ 495 | IEEE80211_C_HOSTAP /* HostAp mode supported */ 496 | IEEE80211_C_TXPMGT /* tx power management */ 497 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 498 | IEEE80211_C_SHSLOT /* short slot time supported */ 499 | IEEE80211_C_BGSCAN /* bg scanning supported */ 500 | IEEE80211_C_WPA /* 802.11i */ 501 ; 502 503 bands = 0; 504 setbit(&bands, IEEE80211_MODE_11B); 505 setbit(&bands, IEEE80211_MODE_11G); 506 if (sc->rf_rev == RAL_RF_5222) 507 setbit(&bands, IEEE80211_MODE_11A); 508 ieee80211_init_channels(ic, NULL, &bands); 509 510 ieee80211_ifattach(ic); 511 ic->ic_update_promisc = ural_update_promisc; 512 ic->ic_newassoc = ural_newassoc; 513 ic->ic_raw_xmit = ural_raw_xmit; 514 ic->ic_node_alloc = ural_node_alloc; 515 ic->ic_scan_start = ural_scan_start; 516 ic->ic_scan_end = ural_scan_end; 517 ic->ic_set_channel = ural_set_channel; 518 519 ic->ic_vap_create = ural_vap_create; 520 ic->ic_vap_delete = ural_vap_delete; 521 522 sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); 523 524 bpfattach(ifp, DLT_IEEE802_11_RADIO, 525 sizeof (struct ieee80211_frame) + sizeof(sc->sc_txtap)); 526 527 sc->sc_rxtap_len = sizeof sc->sc_rxtap; 528 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); 529 sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT); 530 531 sc->sc_txtap_len = sizeof sc->sc_txtap; 532 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); 533 sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT); 534 535 if (bootverbose) 536 ieee80211_announce(ic); 537 538 RAL_LOCK(sc); 539} 540 541static int 542ural_detach(device_t self) 543{ 544 struct ural_softc *sc = device_get_softc(self); 545 struct ifnet *ifp = sc->sc_ifp; 546 struct ieee80211com *ic; 547 548 /* wait for any post attach or other command to complete */ 549 usb2_proc_drain(&sc->sc_tq); 550 551 /* stop all USB transfers */ 552 usb2_transfer_unsetup(sc->sc_xfer, URAL_N_TRANSFER); 553 usb2_proc_free(&sc->sc_tq); 554 555 /* free TX list, if any */ 556 RAL_LOCK(sc); 557 ural_unsetup_tx_list(sc); 558 RAL_UNLOCK(sc); 559 560 if (ifp) { 561 ic = ifp->if_l2com; 562 bpfdetach(ifp); 563 ieee80211_ifdetach(ic); 564 if_free(ifp); 565 } 566 cv_destroy(&sc->sc_cmd_cv); 567 mtx_destroy(&sc->sc_mtx); 568 569 return (0); 570} 571 572static usb2_error_t 573ural_do_request(struct ural_softc *sc, 574 struct usb2_device_request *req, void *data) 575{ 576 usb2_error_t err; 577 int ntries = 10; 578 579 while (ntries--) { 580 err = usb2_do_request_proc(sc->sc_udev, &sc->sc_tq, 581 req, data, 0, NULL, 250 /* ms */); 582 if (err == 0) 583 break; 584 585 DPRINTFN(1, "Control request failed, %s (retrying)\n", 586 usb2_errstr(err)); 587 if (ural_pause(sc, hz / 100)) 588 break; 589 } 590 return (err); 591} 592 593static struct ieee80211vap * 594ural_vap_create(struct ieee80211com *ic, 595 const char name[IFNAMSIZ], int unit, int opmode, int flags, 596 const uint8_t bssid[IEEE80211_ADDR_LEN], 597 const uint8_t mac[IEEE80211_ADDR_LEN]) 598{ 599 struct ural_softc *sc = ic->ic_ifp->if_softc; 600 struct ural_vap *uvp; 601 struct ieee80211vap *vap; 602 603 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 604 return NULL; 605 uvp = (struct ural_vap *) malloc(sizeof(struct ural_vap), 606 M_80211_VAP, M_NOWAIT | M_ZERO); 607 if (uvp == NULL) 608 return NULL; 609 vap = &uvp->vap; 610 /* enable s/w bmiss handling for sta mode */ 611 ieee80211_vap_setup(ic, vap, name, unit, opmode, 612 flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); 613 614 /* override state transition machine */ 615 uvp->newstate = vap->iv_newstate; 616 vap->iv_newstate = ural_newstate; 617 618 uvp->sc = sc; 619 usb2_callout_init_mtx(&uvp->amrr_ch, &sc->sc_mtx, 0); 620 ieee80211_amrr_init(&uvp->amrr, vap, 621 IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, 622 IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 623 1000 /* 1 sec */); 624 625 /* complete setup */ 626 ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); 627 ic->ic_opmode = opmode; 628 return vap; 629} 630 631static void 632ural_flush_task(struct usb2_proc_msg *pm) 633{ 634 /* nothing to do */ 635} 636 637static void 638ural_vap_delete(struct ieee80211vap *vap) 639{ 640 struct ural_vap *uvp = URAL_VAP(vap); 641 struct ural_softc *sc = uvp->sc; 642 643 RAL_LOCK(sc); 644 /* wait for any pending tasks to complete */ 645 ural_queue_command(sc, ural_flush_task, 646 &sc->sc_synctask[0].hdr, 647 &sc->sc_synctask[1].hdr); 648 RAL_UNLOCK(sc); 649 650 usb2_callout_drain(&uvp->amrr_ch); 651 ieee80211_amrr_cleanup(&uvp->amrr); 652 ieee80211_vap_detach(vap); 653 free(uvp, M_80211_VAP); 654} 655 656static void 657ural_tx_free(struct ural_tx_data *data, int txerr) 658{ 659 struct ural_softc *sc = data->sc; 660 661 if (data->m != NULL) { 662 if (data->m->m_flags & M_TXCB) 663 ieee80211_process_callback(data->ni, data->m, 664 txerr ? ETIMEDOUT : 0); 665 m_freem(data->m); 666 data->m = NULL; 667 668 ieee80211_free_node(data->ni); 669 data->ni = NULL; 670 } 671 STAILQ_INSERT_TAIL(&sc->tx_free, data, next); 672 sc->tx_nfree++; 673} 674 675static void 676ural_setup_tx_list(struct ural_softc *sc) 677{ 678 struct ural_tx_data *data; 679 int i; 680 681 sc->tx_nfree = 0; 682 STAILQ_INIT(&sc->tx_q); 683 STAILQ_INIT(&sc->tx_free); 684 685 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 686 data = &sc->tx_data[i]; 687 688 data->sc = sc; 689 STAILQ_INSERT_TAIL(&sc->tx_free, data, next); 690 sc->tx_nfree++; 691 } 692} 693 694static void 695ural_unsetup_tx_list(struct ural_softc *sc) 696{ 697 struct ural_tx_data *data; 698 int i; 699 700 /* make sure any subsequent use of the queues will fail */ 701 sc->tx_nfree = 0; 702 STAILQ_INIT(&sc->tx_q); 703 STAILQ_INIT(&sc->tx_free); 704 705 /* free up all node references and mbufs */ 706 for (i = 0; i < RAL_TX_LIST_COUNT; i++) { 707 data = &sc->tx_data[i]; 708 709 if (data->m != NULL) { 710 m_freem(data->m); 711 data->m = NULL; 712 } 713 if (data->ni != NULL) { 714 ieee80211_free_node(data->ni); 715 data->ni = NULL; 716 } 717 } 718} 719 720static void 721ural_task(struct usb2_proc_msg *pm) 722{ 723 struct ural_task *task = (struct ural_task *)pm; 724 struct ural_softc *sc = task->sc; 725 struct ifnet *ifp = sc->sc_ifp; 726 struct ieee80211com *ic = ifp->if_l2com; 727 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 728 struct ural_vap *uvp = URAL_VAP(vap); 729 const struct ieee80211_txparam *tp; 730 enum ieee80211_state ostate; 731 struct ieee80211_node *ni; 732 struct mbuf *m; 733 734 ostate = vap->iv_state; 735 736 switch (sc->sc_state) { 737 case IEEE80211_S_INIT: 738 if (ostate == IEEE80211_S_RUN) { 739 /* abort TSF synchronization */ 740 ural_write(sc, RAL_TXRX_CSR19, 0); 741 742 /* force tx led to stop blinking */ 743 ural_write(sc, RAL_MAC_CSR20, 0); 744 } 745 break; 746 747 case IEEE80211_S_RUN: 748 ni = vap->iv_bss; 749 750 if (vap->iv_opmode != IEEE80211_M_MONITOR) { 751 ural_update_slot(ic->ic_ifp); 752 ural_set_txpreamble(sc); 753 ural_set_basicrates(sc, ic->ic_bsschan); 754 IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); 755 ural_set_bssid(sc, sc->sc_bssid); 756 } 757 758 if (vap->iv_opmode == IEEE80211_M_HOSTAP || 759 vap->iv_opmode == IEEE80211_M_IBSS) { 760 m = ieee80211_beacon_alloc(ni, &uvp->bo); 761 if (m == NULL) { 762 device_printf(sc->sc_dev, 763 "could not allocate beacon\n"); 764 return; 765 } 766 767 if (ural_tx_bcn(sc, m, ni) != 0) { 768 device_printf(sc->sc_dev, 769 "could not send beacon\n"); 770 return; 771 } 772 } 773 774 /* make tx led blink on tx (controlled by ASIC) */ 775 ural_write(sc, RAL_MAC_CSR20, 1); 776 777 if (vap->iv_opmode != IEEE80211_M_MONITOR) 778 ural_enable_tsf_sync(sc); 779 780 /* enable automatic rate adaptation */ 781 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; 782 if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) 783 ural_amrr_start(sc, ni); 784 785 break; 786 787 default: 788 break; 789 } 790 791 RAL_UNLOCK(sc); 792 IEEE80211_LOCK(ic); 793 uvp->newstate(vap, sc->sc_state, sc->sc_arg); 794 if (vap->iv_newstate_cb != NULL) 795 vap->iv_newstate_cb(vap, sc->sc_state, sc->sc_arg); 796 IEEE80211_UNLOCK(ic); 797 RAL_LOCK(sc); 798} 799 800static void 801ural_scantask(struct usb2_proc_msg *pm) 802{ 803 struct ural_task *task = (struct ural_task *)pm; 804 struct ural_softc *sc = task->sc; 805 struct ifnet *ifp = sc->sc_ifp; 806 struct ieee80211com *ic = ifp->if_l2com; 807 808 RAL_LOCK_ASSERT(sc, MA_OWNED); 809 810 switch (sc->sc_scan_action) { 811 case URAL_SCAN_START: 812 /* abort TSF synchronization */ 813 DPRINTF("starting scan\n"); 814 ural_write(sc, RAL_TXRX_CSR19, 0); 815 ural_set_bssid(sc, ifp->if_broadcastaddr); 816 break; 817 818 case URAL_SET_CHANNEL: 819 ural_set_chan(sc, ic->ic_curchan); 820 break; 821 822 default: /* URAL_SCAN_END */ 823 DPRINTF("stopping scan\n"); 824 ural_enable_tsf_sync(sc); 825 ural_set_bssid(sc, sc->sc_bssid); 826 break; 827 } 828} 829 830static int 831ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 832{ 833 struct ural_vap *uvp = URAL_VAP(vap); 834 struct ieee80211com *ic = vap->iv_ic; 835 struct ural_softc *sc = ic->ic_ifp->if_softc; 836 837 DPRINTF("%s -> %s\n", 838 ieee80211_state_name[vap->iv_state], 839 ieee80211_state_name[nstate]); 840 841 RAL_LOCK(sc); 842 usb2_callout_stop(&uvp->amrr_ch); 843 844 /* do it in a process context */ 845 sc->sc_state = nstate; 846 sc->sc_arg = arg; 847 RAL_UNLOCK(sc); 848 849 if (nstate == IEEE80211_S_INIT) { 850 uvp->newstate(vap, nstate, arg); 851 return 0; 852 } else { 853 RAL_LOCK(sc); 854 ural_queue_command(sc, ural_task, &sc->sc_task[0].hdr, 855 &sc->sc_task[1].hdr); 856 RAL_UNLOCK(sc); 857 return EINPROGRESS; 858 } 859} 860 861 862static void 863ural_bulk_write_callback(struct usb2_xfer *xfer) 864{ 865 struct ural_softc *sc = xfer->priv_sc; 866 struct ifnet *ifp = sc->sc_ifp; 867 struct ieee80211com *ic = ifp->if_l2com; 868 struct ieee80211_channel *c = ic->ic_curchan; 869 struct ural_tx_data *data; 870 struct mbuf *m; 871 unsigned int len; 872 873 /* wakeup waiting command, if any */ 874 if (sc->sc_last_task != NULL) 875 cv_signal(&sc->sc_cmd_cv); 876 877 switch (USB_GET_STATE(xfer)) { 878 case USB_ST_TRANSFERRED: 879 DPRINTFN(11, "transfer complete, %d bytes\n", xfer->actlen); 880 881 /* free resources */ 882 data = xfer->priv_fifo; 883 ural_tx_free(data, 0); 884 xfer->priv_fifo = NULL; 885 886 ifp->if_opackets++; 887 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 888 889 /* FALLTHROUGH */ 890 case USB_ST_SETUP: 891tr_setup: 892 /* wait for command to complete, if any */ 893 if (sc->sc_last_task != NULL) 894 break; 895 896 data = STAILQ_FIRST(&sc->tx_q); 897 if (data) { 898 STAILQ_REMOVE_HEAD(&sc->tx_q, next); 899 m = data->m; 900 901 if (m->m_pkthdr.len > (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE)) { 902 DPRINTFN(0, "data overflow, %u bytes\n", 903 m->m_pkthdr.len); 904 m->m_pkthdr.len = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE); 905 } 906 usb2_copy_in(xfer->frbuffers, 0, &data->desc, 907 RAL_TX_DESC_SIZE); 908 usb2_m_copy_in(xfer->frbuffers, RAL_TX_DESC_SIZE, m, 0, 909 m->m_pkthdr.len); 910 911 if (bpf_peers_present(ifp->if_bpf)) { 912 struct ural_tx_radiotap_header *tap = &sc->sc_txtap; 913 914 tap->wt_flags = 0; 915 tap->wt_rate = data->rate; 916 tap->wt_chan_freq = htole16(c->ic_freq); 917 tap->wt_chan_flags = htole16(c->ic_flags); 918 tap->wt_antenna = sc->tx_ant; 919 920 bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m); 921 } 922 923 /* xfer length needs to be a multiple of two! */ 924 len = (RAL_TX_DESC_SIZE + m->m_pkthdr.len + 1) & ~1; 925 if ((len % 64) == 0) 926 len += 2; 927 928 DPRINTFN(11, "sending frame len=%u xferlen=%u\n", 929 m->m_pkthdr.len, len); 930 931 xfer->frlengths[0] = len; 932 xfer->priv_fifo = data; 933 934 usb2_start_hardware(xfer); 935 } 936 break; 937 938 default: /* Error */ 939 DPRINTFN(11, "transfer error, %s\n", 940 usb2_errstr(xfer->error)); 941 942 ifp->if_oerrors++; 943 data = xfer->priv_fifo; 944 if (data != NULL) { 945 ural_tx_free(data, xfer->error); 946 xfer->priv_fifo = NULL; 947 } 948 949 if (xfer->error == USB_ERR_STALLED) { 950 /* try to clear stall first */ 951 xfer->flags.stall_pipe = 1; 952 goto tr_setup; 953 } 954 if (xfer->error == USB_ERR_TIMEOUT) 955 device_printf(sc->sc_dev, "device timeout\n"); 956 break; 957 } 958} 959 960static void 961ural_bulk_read_callback(struct usb2_xfer *xfer) 962{ 963 struct ural_softc *sc = xfer->priv_sc; 964 struct ifnet *ifp = sc->sc_ifp; 965 struct ieee80211com *ic = ifp->if_l2com; 966 struct ieee80211_node *ni; 967 struct mbuf *m = NULL; 968 uint32_t flags; 969 uint8_t rssi = 0; 970 unsigned int len; 971 972 switch (USB_GET_STATE(xfer)) { 973 case USB_ST_TRANSFERRED: 974 975 DPRINTFN(15, "rx done, actlen=%d\n", xfer->actlen); 976 977 len = xfer->actlen; 978 if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) { 979 DPRINTF("%s: xfer too short %d\n", 980 device_get_nameunit(sc->sc_dev), len); 981 ifp->if_ierrors++; 982 goto tr_setup; 983 } 984 985 len -= RAL_RX_DESC_SIZE; 986 /* rx descriptor is located at the end */ 987 usb2_copy_out(xfer->frbuffers, len, &sc->sc_rx_desc, 988 RAL_RX_DESC_SIZE); 989 990 rssi = URAL_RSSI(sc->sc_rx_desc.rssi); 991 flags = le32toh(sc->sc_rx_desc.flags); 992 if (flags & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) { 993 /* 994 * This should not happen since we did not 995 * request to receive those frames when we 996 * filled RAL_TXRX_CSR2: 997 */ 998 DPRINTFN(5, "PHY or CRC error\n"); 999 ifp->if_ierrors++; 1000 goto tr_setup; 1001 } 1002 1003 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1004 if (m == NULL) { 1005 DPRINTF("could not allocate mbuf\n"); 1006 ifp->if_ierrors++; 1007 goto tr_setup; 1008 } 1009 usb2_copy_out(xfer->frbuffers, 0, mtod(m, uint8_t *), len); 1010 1011 /* finalize mbuf */ 1012 m->m_pkthdr.rcvif = ifp; 1013 m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff; 1014 1015 if (bpf_peers_present(ifp->if_bpf)) { 1016 struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; 1017 1018 tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; 1019 tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, 1020 (flags & RAL_RX_OFDM) ? 1021 IEEE80211_T_OFDM : IEEE80211_T_CCK); 1022 tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); 1023 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 1024 tap->wr_antenna = sc->rx_ant; 1025 tap->wr_antsignal = rssi; 1026 1027 bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); 1028 } 1029 /* Strip trailing 802.11 MAC FCS. */ 1030 m_adj(m, -IEEE80211_CRC_LEN); 1031 1032 /* FALLTHROUGH */ 1033 case USB_ST_SETUP: 1034tr_setup: 1035 xfer->frlengths[0] = xfer->max_data_length; 1036 usb2_start_hardware(xfer); 1037 1038 /* 1039 * At the end of a USB callback it is always safe to unlock 1040 * the private mutex of a device! That is why we do the 1041 * "ieee80211_input" here, and not some lines up! 1042 */ 1043 if (m) { 1044 RAL_UNLOCK(sc); 1045 ni = ieee80211_find_rxnode(ic, 1046 mtod(m, struct ieee80211_frame_min *)); 1047 if (ni != NULL) { 1048 (void) ieee80211_input(ni, m, rssi, 1049 RAL_NOISE_FLOOR, 0); 1050 ieee80211_free_node(ni); 1051 } else 1052 (void) ieee80211_input_all(ic, m, rssi, 1053 RAL_NOISE_FLOOR, 0); 1054 RAL_LOCK(sc); 1055 } 1056 return; 1057 1058 default: /* Error */ 1059 if (xfer->error != USB_ERR_CANCELLED) { 1060 /* try to clear stall first */ 1061 xfer->flags.stall_pipe = 1; 1062 goto tr_setup; 1063 } 1064 return; 1065 } 1066} 1067 1068static uint8_t 1069ural_plcp_signal(int rate) 1070{ 1071 switch (rate) { 1072 /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ 1073 case 12: return 0xb; 1074 case 18: return 0xf; 1075 case 24: return 0xa; 1076 case 36: return 0xe; 1077 case 48: return 0x9; 1078 case 72: return 0xd; 1079 case 96: return 0x8; 1080 case 108: return 0xc; 1081 1082 /* CCK rates (NB: not IEEE std, device-specific) */ 1083 case 2: return 0x0; 1084 case 4: return 0x1; 1085 case 11: return 0x2; 1086 case 22: return 0x3; 1087 } 1088 return 0xff; /* XXX unsupported/unknown rate */ 1089} 1090 1091static void 1092ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, 1093 uint32_t flags, int len, int rate) 1094{ 1095 struct ifnet *ifp = sc->sc_ifp; 1096 struct ieee80211com *ic = ifp->if_l2com; 1097 uint16_t plcp_length; 1098 int remainder; 1099 1100 desc->flags = htole32(flags); 1101 desc->flags |= htole32(RAL_TX_NEWSEQ); 1102 desc->flags |= htole32(len << 16); 1103 1104 desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5)); 1105 desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame))); 1106 1107 /* setup PLCP fields */ 1108 desc->plcp_signal = ural_plcp_signal(rate); 1109 desc->plcp_service = 4; 1110 1111 len += IEEE80211_CRC_LEN; 1112 if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { 1113 desc->flags |= htole32(RAL_TX_OFDM); 1114 1115 plcp_length = len & 0xfff; 1116 desc->plcp_length_hi = plcp_length >> 6; 1117 desc->plcp_length_lo = plcp_length & 0x3f; 1118 } else { 1119 plcp_length = (16 * len + rate - 1) / rate; 1120 if (rate == 22) { 1121 remainder = (16 * len) % 22; 1122 if (remainder != 0 && remainder < 7) 1123 desc->plcp_service |= RAL_PLCP_LENGEXT; 1124 } 1125 desc->plcp_length_hi = plcp_length >> 8; 1126 desc->plcp_length_lo = plcp_length & 0xff; 1127 1128 if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) 1129 desc->plcp_signal |= 0x08; 1130 } 1131 1132 desc->iv = 0; 1133 desc->eiv = 0; 1134} 1135 1136#define RAL_TX_TIMEOUT 5000 1137 1138static int 1139ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1140{ 1141 struct ieee80211vap *vap = ni->ni_vap; 1142 struct ieee80211com *ic = ni->ni_ic; 1143 struct ifnet *ifp = sc->sc_ifp; 1144 const struct ieee80211_txparam *tp; 1145 struct ural_tx_data *data; 1146 1147 if (sc->tx_nfree == 0) { 1148 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1149 m_freem(m0); 1150 ieee80211_free_node(ni); 1151 return EIO; 1152 } 1153 data = STAILQ_FIRST(&sc->tx_free); 1154 STAILQ_REMOVE_HEAD(&sc->tx_free, next); 1155 sc->tx_nfree--; 1156 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; 1157 1158 data->m = m0; 1159 data->ni = ni; 1160 data->rate = tp->mgmtrate; 1161 1162 ural_setup_tx_desc(sc, &data->desc, 1163 RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len, 1164 tp->mgmtrate); 1165 1166 DPRINTFN(10, "sending beacon frame len=%u rate=%u\n", 1167 m0->m_pkthdr.len, tp->mgmtrate); 1168 1169 STAILQ_INSERT_TAIL(&sc->tx_q, data, next); 1170 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 1171 1172 return (0); 1173} 1174 1175static int 1176ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1177{ 1178 struct ieee80211vap *vap = ni->ni_vap; 1179 struct ieee80211com *ic = ni->ni_ic; 1180 const struct ieee80211_txparam *tp; 1181 struct ural_tx_data *data; 1182 struct ieee80211_frame *wh; 1183 struct ieee80211_key *k; 1184 uint32_t flags; 1185 uint16_t dur; 1186 1187 RAL_LOCK_ASSERT(sc, MA_OWNED); 1188 1189 data = STAILQ_FIRST(&sc->tx_free); 1190 STAILQ_REMOVE_HEAD(&sc->tx_free, next); 1191 sc->tx_nfree--; 1192 1193 tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; 1194 1195 wh = mtod(m0, struct ieee80211_frame *); 1196 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 1197 k = ieee80211_crypto_encap(ni, m0); 1198 if (k == NULL) { 1199 m_freem(m0); 1200 return ENOBUFS; 1201 } 1202 wh = mtod(m0, struct ieee80211_frame *); 1203 } 1204 1205 data->m = m0; 1206 data->ni = ni; 1207 data->rate = tp->mgmtrate; 1208 1209 flags = 0; 1210 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1211 flags |= RAL_TX_ACK; 1212 1213 dur = ieee80211_ack_duration(sc->sc_rates, tp->mgmtrate, 1214 ic->ic_flags & IEEE80211_F_SHPREAMBLE); 1215 *(uint16_t *)wh->i_dur = htole16(dur); 1216 1217 /* tell hardware to add timestamp for probe responses */ 1218 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == 1219 IEEE80211_FC0_TYPE_MGT && 1220 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == 1221 IEEE80211_FC0_SUBTYPE_PROBE_RESP) 1222 flags |= RAL_TX_TIMESTAMP; 1223 } 1224 1225 ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, tp->mgmtrate); 1226 1227 DPRINTFN(10, "sending mgt frame len=%u rate=%u\n", 1228 m0->m_pkthdr.len, tp->mgmtrate); 1229 1230 STAILQ_INSERT_TAIL(&sc->tx_q, data, next); 1231 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 1232 1233 return 0; 1234} 1235 1236static int 1237ural_sendprot(struct ural_softc *sc, 1238 const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) 1239{ 1240 struct ieee80211com *ic = ni->ni_ic; 1241 const struct ieee80211_frame *wh; 1242 struct ural_tx_data *data; 1243 struct mbuf *mprot; 1244 int protrate, ackrate, pktlen, flags, isshort; 1245 uint16_t dur; 1246 1247 KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, 1248 ("protection %d", prot)); 1249 1250 wh = mtod(m, const struct ieee80211_frame *); 1251 pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; 1252 1253 protrate = ieee80211_ctl_rate(sc->sc_rates, rate); 1254 ackrate = ieee80211_ack_rate(sc->sc_rates, rate); 1255 1256 isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; 1257 dur = ieee80211_compute_duration(sc->sc_rates, pktlen, rate, isshort); 1258 + ieee80211_ack_duration(sc->sc_rates, rate, isshort); 1259 flags = RAL_TX_RETRY(7); 1260 if (prot == IEEE80211_PROT_RTSCTS) { 1261 /* NB: CTS is the same size as an ACK */ 1262 dur += ieee80211_ack_duration(sc->sc_rates, rate, isshort); 1263 flags |= RAL_TX_ACK; 1264 mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); 1265 } else { 1266 mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); 1267 } 1268 if (mprot == NULL) { 1269 /* XXX stat + msg */ 1270 return ENOBUFS; 1271 } 1272 data = STAILQ_FIRST(&sc->tx_free); 1273 STAILQ_REMOVE_HEAD(&sc->tx_free, next); 1274 sc->tx_nfree--; 1275 1276 data->m = mprot; 1277 data->ni = ieee80211_ref_node(ni); 1278 data->rate = protrate; 1279 ural_setup_tx_desc(sc, &data->desc, flags, mprot->m_pkthdr.len, protrate); 1280 1281 STAILQ_INSERT_TAIL(&sc->tx_q, data, next); 1282 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 1283 1284 return 0; 1285} 1286 1287static int 1288ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, 1289 const struct ieee80211_bpf_params *params) 1290{ 1291 struct ural_tx_data *data; 1292 uint32_t flags; 1293 int error; 1294 int rate; 1295 1296 RAL_LOCK_ASSERT(sc, MA_OWNED); 1297 KASSERT(params != NULL, ("no raw xmit params")); 1298 1299 rate = params->ibp_rate0 & IEEE80211_RATE_VAL; 1300 /* XXX validate */ 1301 if (rate == 0) { 1302 m_freem(m0); 1303 return EINVAL; 1304 } 1305 flags = 0; 1306 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 1307 flags |= RAL_TX_ACK; 1308 if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { 1309 error = ural_sendprot(sc, m0, ni, 1310 params->ibp_flags & IEEE80211_BPF_RTS ? 1311 IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, 1312 rate); 1313 if (error || sc->tx_nfree == 0) { 1314 m_freem(m0); 1315 return ENOBUFS; 1316 } 1317 flags |= RAL_TX_IFS_SIFS; 1318 } 1319 1320 data = STAILQ_FIRST(&sc->tx_free); 1321 STAILQ_REMOVE_HEAD(&sc->tx_free, next); 1322 sc->tx_nfree--; 1323 1324 data->m = m0; 1325 data->ni = ni; 1326 data->rate = rate; 1327 1328 /* XXX need to setup descriptor ourself */ 1329 ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); 1330 1331 DPRINTFN(10, "sending raw frame len=%u rate=%u\n", 1332 m0->m_pkthdr.len, rate); 1333 1334 STAILQ_INSERT_TAIL(&sc->tx_q, data, next); 1335 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 1336 1337 return 0; 1338} 1339 1340static int 1341ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) 1342{ 1343 struct ieee80211vap *vap = ni->ni_vap; 1344 struct ieee80211com *ic = ni->ni_ic; 1345 struct ural_tx_data *data; 1346 struct ieee80211_frame *wh; 1347 const struct ieee80211_txparam *tp; 1348 struct ieee80211_key *k; 1349 uint32_t flags = 0; 1350 uint16_t dur; 1351 int error, rate; 1352 1353 RAL_LOCK_ASSERT(sc, MA_OWNED); 1354 1355 wh = mtod(m0, struct ieee80211_frame *); 1356 1357 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 1358 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 1359 rate = tp->mcastrate; 1360 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 1361 rate = tp->ucastrate; 1362 else 1363 rate = ni->ni_txrate; 1364 1365 if (wh->i_fc[1] & IEEE80211_FC1_WEP) { 1366 k = ieee80211_crypto_encap(ni, m0); 1367 if (k == NULL) { 1368 m_freem(m0); 1369 return ENOBUFS; 1370 } 1371 /* packet header may have moved, reset our local pointer */ 1372 wh = mtod(m0, struct ieee80211_frame *); 1373 } 1374 1375 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1376 int prot = IEEE80211_PROT_NONE; 1377 if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) 1378 prot = IEEE80211_PROT_RTSCTS; 1379 else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 1380 ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) 1381 prot = ic->ic_protmode; 1382 if (prot != IEEE80211_PROT_NONE) { 1383 error = ural_sendprot(sc, m0, ni, prot, rate); 1384 if (error || sc->tx_nfree == 0) { 1385 m_freem(m0); 1386 return ENOBUFS; 1387 } 1388 flags |= RAL_TX_IFS_SIFS; 1389 } 1390 } 1391 1392 data = STAILQ_FIRST(&sc->tx_free); 1393 STAILQ_REMOVE_HEAD(&sc->tx_free, next); 1394 sc->tx_nfree--; 1395 1396 data->m = m0; 1397 data->ni = ni; 1398 data->rate = rate; 1399 1400 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 1401 flags |= RAL_TX_ACK; 1402 flags |= RAL_TX_RETRY(7); 1403 1404 dur = ieee80211_ack_duration(sc->sc_rates, rate, 1405 ic->ic_flags & IEEE80211_F_SHPREAMBLE); 1406 *(uint16_t *)wh->i_dur = htole16(dur); 1407 } 1408 1409 ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate); 1410 1411 DPRINTFN(10, "sending data frame len=%u rate=%u\n", 1412 m0->m_pkthdr.len, rate); 1413 1414 STAILQ_INSERT_TAIL(&sc->tx_q, data, next); 1415 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 1416 1417 return 0; 1418} 1419 1420static void 1421ural_start(struct ifnet *ifp) 1422{ 1423 struct ural_softc *sc = ifp->if_softc; 1424 struct ieee80211_node *ni; 1425 struct mbuf *m; 1426 1427 RAL_LOCK(sc); 1428 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1429 RAL_UNLOCK(sc); 1430 return; 1431 } 1432 for (;;) { 1433 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1434 if (m == NULL) 1435 break; 1436 if (sc->tx_nfree < RAL_TX_MINFREE) { 1437 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1438 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1439 break; 1440 } 1441 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 1442 m = ieee80211_encap(ni, m); 1443 if (m == NULL) { 1444 ieee80211_free_node(ni); 1445 ifp->if_oerrors++; 1446 continue; 1447 } 1448 if (ural_tx_data(sc, m, ni) != 0) { 1449 ieee80211_free_node(ni); 1450 ifp->if_oerrors++; 1451 break; 1452 } 1453 } 1454 RAL_UNLOCK(sc); 1455} 1456 1457static int 1458ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1459{ 1460 struct ural_softc *sc = ifp->if_softc; 1461 struct ieee80211com *ic = ifp->if_l2com; 1462 struct ifreq *ifr = (struct ifreq *) data; 1463 int error = 0, startall = 0; 1464 1465 switch (cmd) { 1466 case SIOCSIFFLAGS: 1467 RAL_LOCK(sc); 1468 if (ifp->if_flags & IFF_UP) { 1469 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1470 ural_queue_command(sc, ural_init_task, 1471 &sc->sc_synctask[0].hdr, 1472 &sc->sc_synctask[1].hdr); 1473 startall = 1; 1474 } else 1475 ural_queue_command(sc, ural_promisctask, 1476 &sc->sc_promisctask[0].hdr, 1477 &sc->sc_promisctask[1].hdr); 1478 } else { 1479 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1480 ural_queue_command(sc, ural_stop_task, 1481 &sc->sc_synctask[0].hdr, 1482 &sc->sc_synctask[1].hdr); 1483 } 1484 } 1485 RAL_UNLOCK(sc); 1486 if (startall) 1487 ieee80211_start_all(ic); 1488 break; 1489 case SIOCGIFMEDIA: 1490 case SIOCSIFMEDIA: 1491 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 1492 break; 1493 default: 1494 error = ether_ioctl(ifp, cmd, data); 1495 break; 1496 } 1497 return error; 1498} 1499 1500static void 1501ural_set_testmode(struct ural_softc *sc) 1502{ 1503 struct usb2_device_request req; 1504 usb2_error_t error; 1505 1506 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1507 req.bRequest = RAL_VENDOR_REQUEST; 1508 USETW(req.wValue, 4); 1509 USETW(req.wIndex, 1); 1510 USETW(req.wLength, 0); 1511 1512 error = ural_do_request(sc, &req, NULL); 1513 if (error != 0) { 1514 device_printf(sc->sc_dev, "could not set test mode: %s\n", 1515 usb2_errstr(error)); 1516 } 1517} 1518 1519static void 1520ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) 1521{ 1522 struct usb2_device_request req; 1523 usb2_error_t error; 1524 1525 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1526 req.bRequest = RAL_READ_EEPROM; 1527 USETW(req.wValue, 0); 1528 USETW(req.wIndex, addr); 1529 USETW(req.wLength, len); 1530 1531 error = ural_do_request(sc, &req, buf); 1532 if (error != 0) { 1533 device_printf(sc->sc_dev, "could not read EEPROM: %s\n", 1534 usb2_errstr(error)); 1535 } 1536} 1537 1538static uint16_t 1539ural_read(struct ural_softc *sc, uint16_t reg) 1540{ 1541 struct usb2_device_request req; 1542 usb2_error_t error; 1543 uint16_t val; 1544 1545 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1546 req.bRequest = RAL_READ_MAC; 1547 USETW(req.wValue, 0); 1548 USETW(req.wIndex, reg); 1549 USETW(req.wLength, sizeof (uint16_t)); 1550 1551 error = ural_do_request(sc, &req, &val); 1552 if (error != 0) { 1553 device_printf(sc->sc_dev, "could not read MAC register: %s\n", 1554 usb2_errstr(error)); 1555 return 0; 1556 } 1557 1558 return le16toh(val); 1559} 1560 1561static void 1562ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1563{ 1564 struct usb2_device_request req; 1565 usb2_error_t error; 1566 1567 req.bmRequestType = UT_READ_VENDOR_DEVICE; 1568 req.bRequest = RAL_READ_MULTI_MAC; 1569 USETW(req.wValue, 0); 1570 USETW(req.wIndex, reg); 1571 USETW(req.wLength, len); 1572 1573 error = ural_do_request(sc, &req, buf); 1574 if (error != 0) { 1575 device_printf(sc->sc_dev, "could not read MAC register: %s\n", 1576 usb2_errstr(error)); 1577 } 1578} 1579 1580static void 1581ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) 1582{ 1583 struct usb2_device_request req; 1584 usb2_error_t error; 1585 1586 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1587 req.bRequest = RAL_WRITE_MAC; 1588 USETW(req.wValue, val); 1589 USETW(req.wIndex, reg); 1590 USETW(req.wLength, 0); 1591 1592 error = ural_do_request(sc, &req, NULL); 1593 if (error != 0) { 1594 device_printf(sc->sc_dev, "could not write MAC register: %s\n", 1595 usb2_errstr(error)); 1596 } 1597} 1598 1599static void 1600ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) 1601{ 1602 struct usb2_device_request req; 1603 usb2_error_t error; 1604 1605 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 1606 req.bRequest = RAL_WRITE_MULTI_MAC; 1607 USETW(req.wValue, 0); 1608 USETW(req.wIndex, reg); 1609 USETW(req.wLength, len); 1610 1611 error = ural_do_request(sc, &req, buf); 1612 if (error != 0) { 1613 device_printf(sc->sc_dev, "could not write MAC register: %s\n", 1614 usb2_errstr(error)); 1615 } 1616} 1617 1618static void 1619ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) 1620{ 1621 uint16_t tmp; 1622 int ntries; 1623 1624 for (ntries = 0; ntries < 100; ntries++) { 1625 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1626 break; 1627 if (ural_pause(sc, hz / 100)) 1628 break; 1629 } 1630 if (ntries == 100) { 1631 device_printf(sc->sc_dev, "could not write to BBP\n"); 1632 return; 1633 } 1634 1635 tmp = reg << 8 | val; 1636 ural_write(sc, RAL_PHY_CSR7, tmp); 1637} 1638 1639static uint8_t 1640ural_bbp_read(struct ural_softc *sc, uint8_t reg) 1641{ 1642 uint16_t val; 1643 int ntries; 1644 1645 val = RAL_BBP_WRITE | reg << 8; 1646 ural_write(sc, RAL_PHY_CSR7, val); 1647 1648 for (ntries = 0; ntries < 100; ntries++) { 1649 if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) 1650 break; 1651 if (ural_pause(sc, hz / 100)) 1652 break; 1653 } 1654 if (ntries == 100) { 1655 device_printf(sc->sc_dev, "could not read BBP\n"); 1656 return 0; 1657 } 1658 1659 return ural_read(sc, RAL_PHY_CSR7) & 0xff; 1660} 1661 1662static void 1663ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) 1664{ 1665 uint32_t tmp; 1666 int ntries; 1667 1668 for (ntries = 0; ntries < 100; ntries++) { 1669 if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) 1670 break; 1671 if (ural_pause(sc, hz / 100)) 1672 break; 1673 } 1674 if (ntries == 100) { 1675 device_printf(sc->sc_dev, "could not write to RF\n"); 1676 return; 1677 } 1678 1679 tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); 1680 ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); 1681 ural_write(sc, RAL_PHY_CSR10, tmp >> 16); 1682 1683 /* remember last written value in sc */ 1684 sc->rf_regs[reg] = val; 1685 1686 DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); 1687} 1688 1689/* ARGUSED */ 1690static struct ieee80211_node * 1691ural_node_alloc(struct ieee80211vap *vap __unused, 1692 const uint8_t mac[IEEE80211_ADDR_LEN] __unused) 1693{ 1694 struct ural_node *un; 1695 1696 un = malloc(sizeof(struct ural_node), M_80211_NODE, M_NOWAIT | M_ZERO); 1697 return un != NULL ? &un->ni : NULL; 1698} 1699 1700static void 1701ural_newassoc(struct ieee80211_node *ni, int isnew) 1702{ 1703 struct ieee80211vap *vap = ni->ni_vap; 1704 1705 ieee80211_amrr_node_init(&URAL_VAP(vap)->amrr, &URAL_NODE(ni)->amn, ni); 1706} 1707 1708static void 1709ural_scan_start(struct ieee80211com *ic) 1710{ 1711 struct ural_softc *sc = ic->ic_ifp->if_softc; 1712 1713 RAL_LOCK(sc); 1714 /* do it in a process context */ 1715 sc->sc_scan_action = URAL_SCAN_START; 1716 ural_queue_command(sc, ural_scantask, 1717 &sc->sc_scantask[0].hdr, &sc->sc_scantask[1].hdr); 1718 RAL_UNLOCK(sc); 1719 1720} 1721 1722static void 1723ural_scan_end(struct ieee80211com *ic) 1724{ 1725 struct ural_softc *sc = ic->ic_ifp->if_softc; 1726 1727 RAL_LOCK(sc); 1728 /* do it in a process context */ 1729 sc->sc_scan_action = URAL_SCAN_END; 1730 ural_queue_command(sc, ural_scantask, 1731 &sc->sc_scantask[0].hdr, &sc->sc_scantask[1].hdr); 1732 RAL_UNLOCK(sc); 1733 1734} 1735 1736static void 1737ural_set_channel(struct ieee80211com *ic) 1738{ 1739 struct ural_softc *sc = ic->ic_ifp->if_softc; 1740 1741 RAL_LOCK(sc); 1742 /* do it in a process context */ 1743 sc->sc_scan_action = URAL_SET_CHANNEL; 1744 ural_queue_command(sc, ural_scantask, 1745 &sc->sc_scantask[0].hdr, &sc->sc_scantask[1].hdr); 1746 1747 sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); 1748 RAL_UNLOCK(sc); 1749} 1750 1751static void 1752ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) 1753{ 1754 struct ifnet *ifp = sc->sc_ifp; 1755 struct ieee80211com *ic = ifp->if_l2com; 1756 uint8_t power, tmp; 1757 int i, chan; 1758 1759 chan = ieee80211_chan2ieee(ic, c); 1760 if (chan == 0 || chan == IEEE80211_CHAN_ANY) 1761 return; 1762 1763 if (IEEE80211_IS_CHAN_2GHZ(c)) 1764 power = min(sc->txpow[chan - 1], 31); 1765 else 1766 power = 31; 1767 1768 /* adjust txpower using ifconfig settings */ 1769 power -= (100 - ic->ic_txpowlimit) / 8; 1770 1771 DPRINTFN(2, "setting channel to %u, txpower to %u\n", chan, power); 1772 1773 switch (sc->rf_rev) { 1774 case RAL_RF_2522: 1775 ural_rf_write(sc, RAL_RF1, 0x00814); 1776 ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); 1777 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1778 break; 1779 1780 case RAL_RF_2523: 1781 ural_rf_write(sc, RAL_RF1, 0x08804); 1782 ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); 1783 ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); 1784 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1785 break; 1786 1787 case RAL_RF_2524: 1788 ural_rf_write(sc, RAL_RF1, 0x0c808); 1789 ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); 1790 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1791 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1792 break; 1793 1794 case RAL_RF_2525: 1795 ural_rf_write(sc, RAL_RF1, 0x08808); 1796 ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); 1797 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1798 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1799 1800 ural_rf_write(sc, RAL_RF1, 0x08808); 1801 ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); 1802 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1803 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); 1804 break; 1805 1806 case RAL_RF_2525E: 1807 ural_rf_write(sc, RAL_RF1, 0x08808); 1808 ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); 1809 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1810 ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); 1811 break; 1812 1813 case RAL_RF_2526: 1814 ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); 1815 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1816 ural_rf_write(sc, RAL_RF1, 0x08804); 1817 1818 ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); 1819 ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); 1820 ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); 1821 break; 1822 1823 /* dual-band RF */ 1824 case RAL_RF_5222: 1825 for (i = 0; ural_rf5222[i].chan != chan; i++); 1826 1827 ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1); 1828 ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2); 1829 ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); 1830 ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4); 1831 break; 1832 } 1833 1834 if (ic->ic_opmode != IEEE80211_M_MONITOR && 1835 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 1836 /* set Japan filter bit for channel 14 */ 1837 tmp = ural_bbp_read(sc, 70); 1838 1839 tmp &= ~RAL_JAPAN_FILTER; 1840 if (chan == 14) 1841 tmp |= RAL_JAPAN_FILTER; 1842 1843 ural_bbp_write(sc, 70, tmp); 1844 1845 /* clear CRC errors */ 1846 ural_read(sc, RAL_STA_CSR0); 1847 1848 ural_pause(sc, hz / 100); 1849 ural_disable_rf_tune(sc); 1850 } 1851 1852 /* XXX doesn't belong here */ 1853 /* update basic rate set */ 1854 ural_set_basicrates(sc, c); 1855 1856 /* give the hardware some time to do the switchover */ 1857 ural_pause(sc, hz / 100); 1858} 1859 1860/* 1861 * Disable RF auto-tuning. 1862 */ 1863static void 1864ural_disable_rf_tune(struct ural_softc *sc) 1865{ 1866 uint32_t tmp; 1867 1868 if (sc->rf_rev != RAL_RF_2523) { 1869 tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; 1870 ural_rf_write(sc, RAL_RF1, tmp); 1871 } 1872 1873 tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; 1874 ural_rf_write(sc, RAL_RF3, tmp); 1875 1876 DPRINTFN(2, "disabling RF autotune\n"); 1877} 1878 1879/* 1880 * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF 1881 * synchronization. 1882 */ 1883static void 1884ural_enable_tsf_sync(struct ural_softc *sc) 1885{ 1886 struct ifnet *ifp = sc->sc_ifp; 1887 struct ieee80211com *ic = ifp->if_l2com; 1888 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1889 uint16_t logcwmin, preload, tmp; 1890 1891 /* first, disable TSF synchronization */ 1892 ural_write(sc, RAL_TXRX_CSR19, 0); 1893 1894 tmp = (16 * vap->iv_bss->ni_intval) << 4; 1895 ural_write(sc, RAL_TXRX_CSR18, tmp); 1896 1897 logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0; 1898 preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6; 1899 tmp = logcwmin << 12 | preload; 1900 ural_write(sc, RAL_TXRX_CSR20, tmp); 1901 1902 /* finally, enable TSF synchronization */ 1903 tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; 1904 if (ic->ic_opmode == IEEE80211_M_STA) 1905 tmp |= RAL_ENABLE_TSF_SYNC(1); 1906 else 1907 tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; 1908 ural_write(sc, RAL_TXRX_CSR19, tmp); 1909 1910 DPRINTF("enabling TSF synchronization\n"); 1911} 1912 1913#define RAL_RXTX_TURNAROUND 5 /* us */ 1914static void 1915ural_update_slot(struct ifnet *ifp) 1916{ 1917 struct ural_softc *sc = ifp->if_softc; 1918 struct ieee80211com *ic = ifp->if_l2com; 1919 uint16_t slottime, sifs, eifs; 1920 1921 slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; 1922 1923 /* 1924 * These settings may sound a bit inconsistent but this is what the 1925 * reference driver does. 1926 */ 1927 if (ic->ic_curmode == IEEE80211_MODE_11B) { 1928 sifs = 16 - RAL_RXTX_TURNAROUND; 1929 eifs = 364; 1930 } else { 1931 sifs = 10 - RAL_RXTX_TURNAROUND; 1932 eifs = 64; 1933 } 1934 1935 ural_write(sc, RAL_MAC_CSR10, slottime); 1936 ural_write(sc, RAL_MAC_CSR11, sifs); 1937 ural_write(sc, RAL_MAC_CSR12, eifs); 1938} 1939 1940static void 1941ural_set_txpreamble(struct ural_softc *sc) 1942{ 1943 struct ifnet *ifp = sc->sc_ifp; 1944 struct ieee80211com *ic = ifp->if_l2com; 1945 uint16_t tmp; 1946 1947 tmp = ural_read(sc, RAL_TXRX_CSR10); 1948 1949 tmp &= ~RAL_SHORT_PREAMBLE; 1950 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 1951 tmp |= RAL_SHORT_PREAMBLE; 1952 1953 ural_write(sc, RAL_TXRX_CSR10, tmp); 1954} 1955 1956static void 1957ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c) 1958{ 1959 /* XXX wrong, take from rate set */ 1960 /* update basic rate set */ 1961 if (IEEE80211_IS_CHAN_5GHZ(c)) { 1962 /* 11a basic rates: 6, 12, 24Mbps */ 1963 ural_write(sc, RAL_TXRX_CSR11, 0x150); 1964 } else if (IEEE80211_IS_CHAN_ANYG(c)) { 1965 /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ 1966 ural_write(sc, RAL_TXRX_CSR11, 0x15f); 1967 } else { 1968 /* 11b basic rates: 1, 2Mbps */ 1969 ural_write(sc, RAL_TXRX_CSR11, 0x3); 1970 } 1971} 1972 1973static void 1974ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) 1975{ 1976 uint16_t tmp; 1977 1978 tmp = bssid[0] | bssid[1] << 8; 1979 ural_write(sc, RAL_MAC_CSR5, tmp); 1980 1981 tmp = bssid[2] | bssid[3] << 8; 1982 ural_write(sc, RAL_MAC_CSR6, tmp); 1983 1984 tmp = bssid[4] | bssid[5] << 8; 1985 ural_write(sc, RAL_MAC_CSR7, tmp); 1986 1987 DPRINTF("setting BSSID to %6D\n", bssid, ":"); 1988} 1989 1990static void 1991ural_set_macaddr(struct ural_softc *sc, uint8_t *addr) 1992{ 1993 uint16_t tmp; 1994 1995 tmp = addr[0] | addr[1] << 8; 1996 ural_write(sc, RAL_MAC_CSR2, tmp); 1997 1998 tmp = addr[2] | addr[3] << 8; 1999 ural_write(sc, RAL_MAC_CSR3, tmp); 2000 2001 tmp = addr[4] | addr[5] << 8; 2002 ural_write(sc, RAL_MAC_CSR4, tmp); 2003 2004 DPRINTF("setting MAC address to %6D\n", addr, ":"); 2005} 2006 2007static void 2008ural_promisctask(struct usb2_proc_msg *pm) 2009{ 2010 struct ural_task *task = (struct ural_task *)pm; 2011 struct ural_softc *sc = task->sc; 2012 struct ifnet *ifp = sc->sc_ifp; 2013 uint32_t tmp; 2014 2015 tmp = ural_read(sc, RAL_TXRX_CSR2); 2016 2017 tmp &= ~RAL_DROP_NOT_TO_ME; 2018 if (!(ifp->if_flags & IFF_PROMISC)) 2019 tmp |= RAL_DROP_NOT_TO_ME; 2020 2021 ural_write(sc, RAL_TXRX_CSR2, tmp); 2022 2023 DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? 2024 "entering" : "leaving"); 2025} 2026 2027static void 2028ural_update_promisc(struct ifnet *ifp) 2029{ 2030 struct ural_softc *sc = ifp->if_softc; 2031 2032 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2033 return; 2034 2035 RAL_LOCK(sc); 2036 ural_queue_command(sc, ural_promisctask, 2037 &sc->sc_promisctask[0].hdr, 2038 &sc->sc_promisctask[1].hdr); 2039 RAL_UNLOCK(sc); 2040} 2041 2042static const char * 2043ural_get_rf(int rev) 2044{ 2045 switch (rev) { 2046 case RAL_RF_2522: return "RT2522"; 2047 case RAL_RF_2523: return "RT2523"; 2048 case RAL_RF_2524: return "RT2524"; 2049 case RAL_RF_2525: return "RT2525"; 2050 case RAL_RF_2525E: return "RT2525e"; 2051 case RAL_RF_2526: return "RT2526"; 2052 case RAL_RF_5222: return "RT5222"; 2053 default: return "unknown"; 2054 } 2055} 2056 2057static void 2058ural_read_eeprom(struct ural_softc *sc) 2059{ 2060 uint16_t val; 2061 2062 ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); 2063 val = le16toh(val); 2064 sc->rf_rev = (val >> 11) & 0x7; 2065 sc->hw_radio = (val >> 10) & 0x1; 2066 sc->led_mode = (val >> 6) & 0x7; 2067 sc->rx_ant = (val >> 4) & 0x3; 2068 sc->tx_ant = (val >> 2) & 0x3; 2069 sc->nb_ant = val & 0x3; 2070 2071 /* read MAC address */ 2072 ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, sc->sc_bssid, 6); 2073 2074 /* read default values for BBP registers */ 2075 ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); 2076 2077 /* read Tx power for all b/g channels */ 2078 ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); 2079} 2080 2081static int 2082ural_bbp_init(struct ural_softc *sc) 2083{ 2084#define N(a) (sizeof (a) / sizeof ((a)[0])) 2085 int i, ntries; 2086 2087 /* wait for BBP to be ready */ 2088 for (ntries = 0; ntries < 100; ntries++) { 2089 if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) 2090 break; 2091 if (ural_pause(sc, hz / 100)) 2092 break; 2093 } 2094 if (ntries == 100) { 2095 device_printf(sc->sc_dev, "timeout waiting for BBP\n"); 2096 return EIO; 2097 } 2098 2099 /* initialize BBP registers to default values */ 2100 for (i = 0; i < N(ural_def_bbp); i++) 2101 ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); 2102 2103#if 0 2104 /* initialize BBP registers to values stored in EEPROM */ 2105 for (i = 0; i < 16; i++) { 2106 if (sc->bbp_prom[i].reg == 0xff) 2107 continue; 2108 ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); 2109 } 2110#endif 2111 2112 return 0; 2113#undef N 2114} 2115 2116static void 2117ural_set_txantenna(struct ural_softc *sc, int antenna) 2118{ 2119 uint16_t tmp; 2120 uint8_t tx; 2121 2122 tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; 2123 if (antenna == 1) 2124 tx |= RAL_BBP_ANTA; 2125 else if (antenna == 2) 2126 tx |= RAL_BBP_ANTB; 2127 else 2128 tx |= RAL_BBP_DIVERSITY; 2129 2130 /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ 2131 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || 2132 sc->rf_rev == RAL_RF_5222) 2133 tx |= RAL_BBP_FLIPIQ; 2134 2135 ural_bbp_write(sc, RAL_BBP_TX, tx); 2136 2137 /* update values in PHY_CSR5 and PHY_CSR6 */ 2138 tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; 2139 ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); 2140 2141 tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; 2142 ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); 2143} 2144 2145static void 2146ural_set_rxantenna(struct ural_softc *sc, int antenna) 2147{ 2148 uint8_t rx; 2149 2150 rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; 2151 if (antenna == 1) 2152 rx |= RAL_BBP_ANTA; 2153 else if (antenna == 2) 2154 rx |= RAL_BBP_ANTB; 2155 else 2156 rx |= RAL_BBP_DIVERSITY; 2157 2158 /* need to force no I/Q flip for RF 2525e and 2526 */ 2159 if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) 2160 rx &= ~RAL_BBP_FLIPIQ; 2161 2162 ural_bbp_write(sc, RAL_BBP_RX, rx); 2163} 2164 2165static void 2166ural_init_task(struct usb2_proc_msg *pm) 2167{ 2168#define N(a) (sizeof (a) / sizeof ((a)[0])) 2169 struct ural_task *task = (struct ural_task *)pm; 2170 struct ural_softc *sc = task->sc; 2171 struct ifnet *ifp = sc->sc_ifp; 2172 struct ieee80211com *ic = ifp->if_l2com; 2173 uint16_t tmp; 2174 int i, ntries; 2175 2176 RAL_LOCK_ASSERT(sc, MA_OWNED); 2177 2178 ural_set_testmode(sc); 2179 ural_write(sc, 0x308, 0x00f0); /* XXX magic */ 2180 2181 ural_stop_task(pm); 2182 2183 /* initialize MAC registers to default values */ 2184 for (i = 0; i < N(ural_def_mac); i++) 2185 ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); 2186 2187 /* wait for BBP and RF to wake up (this can take a long time!) */ 2188 for (ntries = 0; ntries < 100; ntries++) { 2189 tmp = ural_read(sc, RAL_MAC_CSR17); 2190 if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == 2191 (RAL_BBP_AWAKE | RAL_RF_AWAKE)) 2192 break; 2193 if (ural_pause(sc, hz / 100)) 2194 break; 2195 } 2196 if (ntries == 100) { 2197 device_printf(sc->sc_dev, 2198 "timeout waiting for BBP/RF to wakeup\n"); 2199 goto fail; 2200 } 2201 2202 /* we're ready! */ 2203 ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); 2204 2205 /* set basic rate set (will be updated later) */ 2206 ural_write(sc, RAL_TXRX_CSR11, 0x15f); 2207 2208 if (ural_bbp_init(sc) != 0) 2209 goto fail; 2210 2211 ural_set_chan(sc, ic->ic_curchan); 2212 2213 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 2214 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 2215 2216 ural_set_txantenna(sc, sc->tx_ant); 2217 ural_set_rxantenna(sc, sc->rx_ant); 2218 2219 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); 2220 ural_set_macaddr(sc, ic->ic_myaddr); 2221 2222 /* 2223 * Allocate Tx and Rx xfer queues. 2224 */ 2225 ural_setup_tx_list(sc); 2226 2227 /* kick Rx */ 2228 tmp = RAL_DROP_PHY | RAL_DROP_CRC; 2229 if (ic->ic_opmode != IEEE80211_M_MONITOR) { 2230 tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION; 2231 if (ic->ic_opmode != IEEE80211_M_HOSTAP) 2232 tmp |= RAL_DROP_TODS; 2233 if (!(ifp->if_flags & IFF_PROMISC)) 2234 tmp |= RAL_DROP_NOT_TO_ME; 2235 } 2236 ural_write(sc, RAL_TXRX_CSR2, tmp); 2237 2238 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2239 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2240 usb2_transfer_set_stall(sc->sc_xfer[URAL_BULK_WR]); 2241 usb2_transfer_start(sc->sc_xfer[URAL_BULK_RD]); 2242 return; 2243 2244fail: ural_stop_task(pm); 2245#undef N 2246} 2247 2248static void 2249ural_init(void *priv) 2250{ 2251 struct ural_softc *sc = priv; 2252 struct ifnet *ifp = sc->sc_ifp; 2253 struct ieee80211com *ic = ifp->if_l2com; 2254 2255 RAL_LOCK(sc); 2256 ural_queue_command(sc, ural_init_task, 2257 &sc->sc_synctask[0].hdr, 2258 &sc->sc_synctask[1].hdr); 2259 RAL_UNLOCK(sc); 2260 2261 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2262 ieee80211_start_all(ic); /* start all vap's */ 2263} 2264 2265static void 2266ural_stop_task(struct usb2_proc_msg *pm) 2267{ 2268 struct ural_task *task = (struct ural_task *)pm; 2269 struct ural_softc *sc = task->sc; 2270 struct ifnet *ifp = sc->sc_ifp; 2271 2272 RAL_LOCK_ASSERT(sc, MA_OWNED); 2273 2274 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2275 2276 /* 2277 * Drain all the transfers, if not already drained: 2278 */ 2279 RAL_UNLOCK(sc); 2280 usb2_transfer_drain(sc->sc_xfer[URAL_BULK_WR]); 2281 usb2_transfer_drain(sc->sc_xfer[URAL_BULK_RD]); 2282 RAL_LOCK(sc); 2283 2284 ural_unsetup_tx_list(sc); 2285 2286 /* disable Rx */ 2287 ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); 2288 /* reset ASIC and BBP (but won't reset MAC registers!) */ 2289 ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); 2290 /* wait a little */ 2291 ural_pause(sc, hz / 10); 2292 ural_write(sc, RAL_MAC_CSR1, 0); 2293 /* wait a little */ 2294 ural_pause(sc, hz / 10); 2295} 2296 2297static int 2298ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2299 const struct ieee80211_bpf_params *params) 2300{ 2301 struct ieee80211com *ic = ni->ni_ic; 2302 struct ifnet *ifp = ic->ic_ifp; 2303 struct ural_softc *sc = ifp->if_softc; 2304 2305 RAL_LOCK(sc); 2306 /* prevent management frames from being sent if we're not ready */ 2307 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2308 RAL_UNLOCK(sc); 2309 m_freem(m); 2310 ieee80211_free_node(ni); 2311 return ENETDOWN; 2312 } 2313 if (sc->tx_nfree < RAL_TX_MINFREE) { 2314 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2315 RAL_UNLOCK(sc); 2316 m_freem(m); 2317 ieee80211_free_node(ni); 2318 return EIO; 2319 } 2320 2321 ifp->if_opackets++; 2322 2323 if (params == NULL) { 2324 /* 2325 * Legacy path; interpret frame contents to decide 2326 * precisely how to send the frame. 2327 */ 2328 if (ural_tx_mgt(sc, m, ni) != 0) 2329 goto bad; 2330 } else { 2331 /* 2332 * Caller supplied explicit parameters to use in 2333 * sending the frame. 2334 */ 2335 if (ural_tx_raw(sc, m, ni, params) != 0) 2336 goto bad; 2337 } 2338 RAL_UNLOCK(sc); 2339 return 0; 2340bad: 2341 ifp->if_oerrors++; 2342 RAL_UNLOCK(sc); 2343 ieee80211_free_node(ni); 2344 return EIO; /* XXX */ 2345} 2346 2347static void 2348ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni) 2349{ 2350 struct ieee80211vap *vap = ni->ni_vap; 2351 struct ural_vap *uvp = URAL_VAP(vap); 2352 2353 /* clear statistic registers (STA_CSR0 to STA_CSR10) */ 2354 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); 2355 2356 ieee80211_amrr_node_init(&uvp->amrr, &URAL_NODE(ni)->amn, ni); 2357 2358 usb2_callout_reset(&uvp->amrr_ch, hz, ural_amrr_timeout, uvp); 2359} 2360 2361static void 2362ural_amrr_timeout(void *arg) 2363{ 2364 struct ural_vap *uvp = arg; 2365 struct ural_softc *sc = uvp->sc; 2366 2367 ural_queue_command(sc, ural_amrr_task, 2368 &uvp->amrr_task[0].hdr, &uvp->amrr_task[1].hdr); 2369} 2370 2371static void 2372ural_amrr_task(struct usb2_proc_msg *pm) 2373{ 2374 struct ural_task *task = (struct ural_task *)pm; 2375 struct ural_softc *sc = task->sc; 2376 struct ifnet *ifp = sc->sc_ifp; 2377 struct ieee80211com *ic = ifp->if_l2com; 2378 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2379 struct ural_vap *uvp = URAL_VAP(vap); 2380 struct ieee80211_node *ni = vap->iv_bss; 2381 int ok, fail; 2382 2383 /* read and clear statistic registers (STA_CSR0 to STA_CSR10) */ 2384 ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof(sc->sta)); 2385 2386 ok = sc->sta[7] + /* TX ok w/o retry */ 2387 sc->sta[8]; /* TX ok w/ retry */ 2388 fail = sc->sta[9]; /* TX retry-fail count */ 2389 2390 ieee80211_amrr_tx_update(&URAL_NODE(ni)->amn, 2391 ok+fail, ok, sc->sta[8] + fail); 2392 (void) ieee80211_amrr_choose(ni, &URAL_NODE(ni)->amn); 2393 2394 ifp->if_oerrors += fail; /* count TX retry-fail as Tx errors */ 2395 2396 usb2_callout_reset(&uvp->amrr_ch, hz, ural_amrr_timeout, uvp); 2397} 2398 2399static int 2400ural_pause(struct ural_softc *sc, int timeout) 2401{ 2402 if (usb2_proc_is_gone(&sc->sc_tq)) 2403 return (1); 2404 2405 usb2_pause_mtx(&sc->sc_mtx, timeout); 2406 return (0); 2407} 2408 2409static void 2410ural_command_wrapper(struct usb2_proc_msg *pm) 2411{ 2412 struct ural_task *task = (struct ural_task *)pm; 2413 struct ural_softc *sc = task->sc; 2414 struct ifnet *ifp; 2415 2416 /* wait for pending transfer, if any */ 2417 while (usb2_transfer_pending(sc->sc_xfer[URAL_BULK_WR])) 2418 cv_wait(&sc->sc_cmd_cv, &sc->sc_mtx); 2419 2420 /* make sure any hardware FIFOs are emptied */ 2421 ural_pause(sc, hz / 1000); 2422 2423 /* execute task */ 2424 task->func(pm); 2425 2426 /* check if this is the last task executed */ 2427 if (sc->sc_last_task == task) { 2428 sc->sc_last_task = NULL; 2429 ifp = sc->sc_ifp; 2430 /* re-start TX, if any */ 2431 if ((ifp != NULL) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 2432 usb2_transfer_start(sc->sc_xfer[URAL_BULK_WR]); 2433 } 2434} 2435 2436static void 2437ural_queue_command(struct ural_softc *sc, usb2_proc_callback_t *fn, 2438 struct usb2_proc_msg *t0, struct usb2_proc_msg *t1) 2439{ 2440 struct ural_task *task; 2441 2442 RAL_LOCK_ASSERT(sc, MA_OWNED); 2443 2444 /* 2445 * NOTE: The task cannot get executed before we drop the 2446 * "sc_mtx" mutex. It is safe to update fields in the message 2447 * structure after that the message got queued. 2448 */ 2449 task = (struct ural_task *) 2450 usb2_proc_msignal(&sc->sc_tq, t0, t1); 2451 2452 /* Setup callback and softc pointers */ 2453 task->hdr.pm_callback = ural_command_wrapper; 2454 task->func = fn; 2455 task->sc = sc; 2456 2457 /* Make sure that any TX operation will stop */ 2458 sc->sc_last_task = task; 2459 2460 /* 2461 * Init, stop and flush must be synchronous! 2462 */ 2463 if ((fn == ural_init_task) || (fn == ural_stop_task) || 2464 (fn == ural_stop_task)) 2465 usb2_proc_mwait(&sc->sc_tq, t0, t1); 2466} 2467