• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/uwb/

Lines Matching defs:whcrc

25  * For each device probed, creates an 'struct whcrc' which contains
63 struct whcrc {
81 * @rc: Instance of a Radio Controller that is a whcrc
85 * We copy the command into whcrc->cmd_buf (as it is pretty and
96 struct whcrc *whcrc = uwb_rc->priv;
97 struct device *dev = &whcrc->umc_dev->dev;
109 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
115 result = wait_event_timeout(whcrc->cmd_wq,
116 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
122 memmove(whcrc->cmd_buf, cmd, cmd_size);
123 le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
125 spin_lock(&whcrc->irq_lock);
126 urccmd = le_readl(whcrc->rc_base + URCCMD);
129 whcrc->rc_base + URCCMD);
130 spin_unlock(&whcrc->irq_lock);
137 struct whcrc *whcrc = rc->priv;
139 return umc_controller_reset(whcrc->umc_dev);
157 void whcrc_enable_events(struct whcrc *whcrc)
161 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
163 spin_lock(&whcrc->irq_lock);
164 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
165 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
166 spin_unlock(&whcrc->irq_lock);
171 struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
175 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
178 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
179 whcrc_enable_events(whcrc);
191 struct whcrc *whcrc = _whcrc;
192 struct device *dev = &whcrc->umc_dev->dev;
195 urcsts = le_readl(whcrc->rc_base + URCSTS);
198 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
205 schedule_work(&whcrc->event_work);
207 wake_up_all(&whcrc->cmd_wq);
217 int whcrc_setup_rc_umc(struct whcrc *whcrc)
220 struct device *dev = &whcrc->umc_dev->dev;
221 struct umc_dev *umc_dev = whcrc->umc_dev;
223 whcrc->area = umc_dev->resource.start;
224 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
226 if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
228 whcrc->rc_len, whcrc->area, result);
232 whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
233 if (whcrc->rc_base == NULL) {
235 whcrc->rc_len, whcrc->area, result);
240 KBUILD_MODNAME, whcrc);
248 whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
249 &whcrc->cmd_dma_buf, GFP_KERNEL);
250 if (whcrc->cmd_buf == NULL) {
255 whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
256 &whcrc->evt_dma_buf, GFP_KERNEL);
257 if (whcrc->evt_buf == NULL) {
264 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
265 whcrc->cmd_dma_buf);
267 free_irq(umc_dev->irq, whcrc);
269 iounmap(whcrc->rc_base);
271 release_mem_region(whcrc->area, whcrc->rc_len);
281 void whcrc_release_rc_umc(struct whcrc *whcrc)
283 struct umc_dev *umc_dev = whcrc->umc_dev;
285 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
286 whcrc->evt_dma_buf);
287 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
288 whcrc->cmd_dma_buf);
289 free_irq(umc_dev->irq, whcrc);
290 iounmap(whcrc->rc_base);
291 release_mem_region(whcrc->area, whcrc->rc_len);
297 * @whcrc: the radio controller to start
304 struct whcrc *whcrc = rc->priv;
305 struct device *dev = &whcrc->umc_dev->dev;
308 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
309 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
314 le_writel(0, whcrc->rc_base + URCINTR);
315 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
316 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
319 whcrc_enable_events(whcrc);
320 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
327 * @whcrc: the radio controller to stop
335 struct whcrc *whcrc = rc->priv;
336 struct umc_dev *umc_dev = whcrc->umc_dev;
338 le_writel(0, whcrc->rc_base + URCINTR);
339 cancel_work_sync(&whcrc->event_work);
341 le_writel(0, whcrc->rc_base + URCCMD);
342 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
346 static void whcrc_init(struct whcrc *whcrc)
348 spin_lock_init(&whcrc->irq_lock);
349 init_waitqueue_head(&whcrc->cmd_wq);
350 INIT_WORK(&whcrc->event_work, whcrc_event_work);
356 * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
366 struct whcrc *whcrc;
375 whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
376 if (whcrc == NULL) {
380 whcrc_init(whcrc);
381 whcrc->umc_dev = umc_dev;
383 result = whcrc_setup_rc_umc(whcrc);
388 whcrc->uwb_rc = uwb_rc;
396 result = uwb_rc_add(uwb_rc, dev, whcrc);
399 umc_set_drvdata(umc_dev, whcrc);
403 whcrc_release_rc_umc(whcrc);
405 kfree(whcrc);
423 struct whcrc *whcrc = umc_get_drvdata(umc_dev);
424 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
428 whcrc_release_rc_umc(whcrc);
429 kfree(whcrc);
435 struct whcrc *whcrc = umc_get_drvdata(umc);
436 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
444 struct whcrc *whcrc = umc_get_drvdata(umc);
445 struct uwb_rc *uwb_rc = whcrc->uwb_rc;