Lines Matching defs:xmm

294 		tsleep(xmm, 0, "wwancsl", msec * hz / 1000);	\
479 struct xmm_dev *xmm;
566 struct xmm_dev *xmm;
586 static void xmm7360_poll(struct xmm_dev *xmm)
588 if (xmm->cp->status.code == 0xbadc0ded) {
589 dev_err(xmm->dev, "crashed but dma up\n");
590 xmm->error = -ENODEV;
592 if (xmm->bar2[BAR2_STATUS] != XMM_MODEM_READY) {
593 dev_err(xmm->dev, "bad status %x\n",xmm->bar2[BAR2_STATUS]);
594 xmm->error = -ENODEV;
598 static void xmm7360_ding(struct xmm_dev *xmm, int bell)
600 if (xmm->cp->status.asleep)
601 xmm->bar0[BAR0_WAKEUP] = 1;
602 xmm->bar0[BAR0_DOORBELL] = bell;
603 xmm7360_poll(xmm);
606 static int xmm7360_cmd_ring_wait(struct xmm_dev *xmm)
610 int ret = wait_event_interruptible_timeout(xmm->wq, (xmm->cp->c_rptr == xmm->cp->c_wptr) || xmm->error, msecs_to_jiffies(1000));
615 return xmm->error;
618 static int xmm7360_cmd_ring_execute(struct xmm_dev *xmm, u8 cmd, u8 parm, u16 len, dma_addr_t ptr, u32 extra)
620 u8 wptr = xmm->cp->c_wptr;
622 if (xmm->error)
623 return xmm->error;
624 if (new_wptr == xmm->cp->c_rptr) // ring full
627 xmm->cp->c_ring[wptr].ptr = ptr;
628 xmm->cp->c_ring[wptr].cmd = cmd;
629 xmm->cp->c_ring[wptr].parm = parm;
630 xmm->cp->c_ring[wptr].len = len;
631 xmm->cp->c_ring[wptr].extra = extra;
632 xmm->cp->c_ring[wptr].unk = 0;
633 xmm->cp->c_ring[wptr].flags = CMD_FLAG_READY;
635 xmm->cp->c_wptr = new_wptr;
637 xmm7360_ding(xmm, DOORBELL_CMD);
638 return xmm7360_cmd_ring_wait(xmm);
641 static int xmm7360_cmd_ring_init(struct xmm_dev *xmm) {
645 xmm->cp = dma_alloc_coherent(xmm->dev, sizeof(struct control_page), &xmm->cp_phys, GFP_KERNEL);
646 BUG_ON(xmm->cp == NULL);
648 xmm->cp->ctl.status = xmm->cp_phys + offsetof(struct control_page, status);
649 xmm->cp->ctl.s_wptr = xmm->cp_phys + offsetof(struct control_page, s_wptr);
650 xmm->cp->ctl.s_rptr = xmm->cp_phys + offsetof(struct control_page, s_rptr);
651 xmm->cp->ctl.c_wptr = xmm->cp_phys + offsetof(struct control_page, c_wptr);
652 xmm->cp->ctl.c_rptr = xmm->cp_phys + offsetof(struct control_page, c_rptr);
653 xmm->cp->ctl.c_ring = xmm->cp_phys + offsetof(struct control_page, c_ring);
654 xmm->cp->ctl.c_ring_size = CMD_RING_SIZE;
656 xmm->bar2[BAR2_CONTROL] = xmm->cp_phys;
657 xmm->bar2[BAR2_CONTROLH] = xmm->cp_phys >> 32;
659 xmm->bar0[BAR0_MODE] = 1;
662 while (xmm->bar2[BAR2_MODE] == 0 && --timeout)
668 xmm->bar2[BAR2_BLANK0] = 0;
669 xmm->bar2[BAR2_BLANK1] = 0;
670 xmm->bar2[BAR2_BLANK2] = 0;
671 xmm->bar2[BAR2_BLANK3] = 0;
673 xmm->bar0[BAR0_MODE] = 2; // enable intrs?
676 while (xmm->bar2[BAR2_MODE] != 2 && --timeout)
683 ret = xmm7360_cmd_ring_execute(xmm, CMD_WAKEUP, 0, 1, 0, 0);
690 static void xmm7360_cmd_ring_free(struct xmm_dev *xmm) {
691 if (xmm->bar0)
692 xmm->bar0[BAR0_MODE] = 0;
693 if (xmm->cp)
694 dma_free_coherent(xmm->dev, sizeof(struct control_page), (volatile void *)xmm->cp, xmm->cp_phys);
695 xmm->cp = NULL;
699 static void xmm7360_td_ring_activate(struct xmm_dev *xmm, u8 ring_id)
701 struct td_ring *ring = &xmm->td_ring[ring_id];
704 xmm->cp->s_rptr[ring_id] = xmm->cp->s_wptr[ring_id] = 0;
706 ret = xmm7360_cmd_ring_execute(xmm, CMD_RING_OPEN, ring_id, ring->depth, ring->tds_phys, 0x60);
710 static void xmm7360_td_ring_create(struct xmm_dev *xmm, u8 ring_id, u8 depth, u16 page_size)
712 struct td_ring *ring = &xmm->td_ring[ring_id];
722 ring->tds = dma_alloc_coherent(xmm->dev, sizeof(struct td_ring_entry)*depth, &ring->tds_phys, GFP_KERNEL);
728 ring->pages[i] = dma_alloc_coherent(xmm->dev, ring->page_size, &ring->pages_phys[i], GFP_KERNEL);
732 xmm7360_td_ring_activate(xmm, ring_id);
735 static void xmm7360_td_ring_deactivate(struct xmm_dev *xmm, u8 ring_id)
737 xmm7360_cmd_ring_execute(xmm, CMD_RING_CLOSE, ring_id, 0, 0, 0);
740 static void xmm7360_td_ring_destroy(struct xmm_dev *xmm, u8 ring_id)
742 struct td_ring *ring = &xmm->td_ring[ring_id];
747 dev_err(xmm->dev, "Tried destroying empty ring!\n");
751 xmm7360_td_ring_deactivate(xmm, ring_id);
754 dma_free_coherent(xmm->dev, ring->page_size, ring->pages[i], ring->pages_phys[i]);
760 dma_free_coherent(xmm->dev, sizeof(struct td_ring_entry)*depth, ring->tds, ring->tds_phys);
765 static void xmm7360_td_ring_write(struct xmm_dev *xmm, u8 ring_id, const void *buf, int len)
767 struct td_ring *ring = &xmm->td_ring[ring_id];
768 u8 wptr = xmm->cp->s_wptr[ring_id];
780 BUG_ON(wptr == xmm->cp->s_rptr[ring_id]);
782 xmm->cp->s_wptr[ring_id] = wptr;
785 static int xmm7360_td_ring_full(struct xmm_dev *xmm, u8 ring_id)
787 struct td_ring *ring = &xmm->td_ring[ring_id];
788 u8 wptr = xmm->cp->s_wptr[ring_id];
790 return wptr == xmm->cp->s_rptr[ring_id];
793 static void xmm7360_td_ring_read(struct xmm_dev *xmm, u8 ring_id)
795 struct td_ring *ring = &xmm->td_ring[ring_id];
796 u8 wptr = xmm->cp->s_wptr[ring_id];
799 dev_err(xmm->dev, "read on disabled ring\n");
804 dev_err(xmm->dev, "read on write ring\n");
814 BUG_ON(wptr == xmm->cp->s_rptr[ring_id]);
816 xmm->cp->s_wptr[ring_id] = wptr;
819 static struct queue_pair * xmm7360_init_qp(struct xmm_dev *xmm, int num, u8 depth, u16 page_size)
821 struct queue_pair *qp = &xmm->qp[num];
823 qp->xmm = xmm;
834 static void xmm7360_qp_arm(struct xmm_dev *xmm, struct queue_pair *qp)
836 while (!xmm7360_td_ring_full(xmm, qp->num*2+1))
837 xmm7360_td_ring_read(xmm, qp->num*2+1);
838 xmm7360_ding(xmm, DOORBELL_TD);
843 struct xmm_dev *xmm = qp->xmm;
856 xmm7360_td_ring_create(xmm, qp->num*2, qp->depth, qp->page_size);
857 xmm7360_td_ring_create(xmm, qp->num*2+1, qp->depth, qp->page_size);
858 xmm7360_qp_arm(xmm, qp);
866 struct xmm_dev *xmm = qp->xmm;
869 xmm7360_td_ring_activate(xmm, qp->num*2);
870 xmm7360_td_ring_activate(xmm, qp->num*2+1);
871 xmm7360_qp_arm(xmm, qp);
876 struct xmm_dev *xmm = qp->xmm;
889 xmm7360_td_ring_destroy(xmm, qp->num*2);
890 xmm7360_td_ring_destroy(xmm, qp->num*2+1);
902 struct xmm_dev *xmm = qp->xmm;
905 xmm7360_td_ring_deactivate(xmm, qp->num*2);
910 struct xmm_dev *xmm = qp->xmm;
911 return !xmm7360_td_ring_full(xmm, qp->num*2);
916 struct xmm_dev *xmm = qp->xmm;
917 int page_size = qp->xmm->td_ring[qp->num*2].page_size;
918 if (xmm->error)
919 return xmm->error;
924 xmm7360_td_ring_write(xmm, qp->num*2, buf, size);
925 xmm7360_ding(xmm, DOORBELL_TD);
931 int page_size = qp->xmm->td_ring[qp->num*2].page_size;
946 struct xmm_dev *xmm = qp->xmm;
947 struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
949 return (xmm->cp->s_rptr[qp->num*2+1] != ring->last_handled);
954 struct xmm_dev *xmm = qp->xmm;
955 struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
958 ret = wait_event_interruptible(qp->wq, xmm7360_qp_has_data(qp) || xmm->error);
961 if (xmm->error)
962 return xmm->error;
974 xmm7360_td_ring_read(xmm, qp->num*2+1);
975 xmm7360_ding(xmm, DOORBELL_TD);
983 struct xmm_dev *xmm = qp->xmm;
984 struct td_ring *ring = &xmm->td_ring[qp->num*2+1];
991 xmm7360_td_ring_read(xmm, qp->num*2+1);
992 xmm7360_ding(xmm, DOORBELL_TD);
1051 if (qp->xmm->error)
1071 val = qp->xmm->td_ring[qp->num*2].page_size;
1094 frame->max_size = xn->xmm->td_ring[0].page_size;
1177 static int xmm7360_mux_frame_push(struct xmm_dev *xmm, struct mux_frame *frame)
1183 ret = xmm7360_qp_write(xmm->net->qp, frame->data, frame->n_bytes);
1201 ret = xmm7360_mux_frame_push(xn->xmm, frame);
1229 ret = xmm7360_mux_frame_push(xn->xmm, frame);
1236 dev_err(xn->xmm->dev, "Failed to ship coalesced frame");
1239 static int xmm7360_base_init(struct xmm_dev *xmm)
1244 xmm->error = 0;
1245 xmm->num_ttys = 0;
1247 status = xmm->bar2[BAR2_STATUS];
1249 dev_info(xmm->dev, "modem still booting, waiting...\n");
1251 status = xmm->bar2[BAR2_STATUS];
1259 dev_err(xmm->dev, "unknown modem status: 0x%08x\n", status);
1263 dev_info(xmm->dev, "modem is ready\n");
1265 ret = xmm7360_cmd_ring_init(xmm);
1267 dev_err(xmm->dev, "Could not bring up command ring %d\n",
1287 dev_info(xn->xmm->dev, "Unexpected tag %x\n", first->tag);
1293 dev_err(xn->xmm->dev, "Unexpected tag %x, expected ADTH\n", adth->tag);
1305 xmm7360_os_handle_net_frame(xn->xmm,
1310 static void xmm7360_net_poll(struct xmm_dev *xmm)
1315 struct xmm_net *xn = xmm->net;
1321 ring = &xmm->td_ring[qp->num*2+1];
1333 xmm7360_td_ring_read(xmm, qp->num*2+1);
1334 xmm7360_ding(xmm, DOORBELL_TD);
1422 static void xmm7360_os_handle_net_frame(struct xmm_dev *xmm, const u8 *buf, size_t sz)
1435 skb->dev = xmm->netdev;
1470 if (netif_queue_stopped(xn->xmm->netdev))
1471 netif_wake_queue(xn->xmm->netdev);
1503 static int xmm7360_create_net(struct xmm_dev *xmm, int num)
1514 SET_NETDEV_DEV(netdev, xmm->dev);
1516 xmm->netdev = netdev;
1519 xn->xmm = xmm;
1520 xmm->net = xn;
1526 xn->qp = xmm7360_init_qp(xmm, num, 128, TD_MAX_PAGE_SIZE);
1533 xmm->netdev = NULL;
1540 static void xmm7360_destroy_net(struct xmm_dev *xmm)
1542 if (xmm->netdev) {
1543 xmm7360_qp_stop(xmm->net->qp);
1545 unregister_netdevice(xmm->netdev);
1547 free_netdev(xmm->netdev);
1548 xmm->net = NULL;
1549 xmm->netdev = NULL;
1554 struct xmm_dev *xmm = dev_id;
1558 xmm7360_poll(xmm);
1559 wake_up(&xmm->wq);
1560 if (xmm->td_ring) {
1561 if (xmm->net)
1562 xmm7360_net_poll(xmm);
1565 qp = &xmm->qp[id];
1594 static void xmm7360_dev_deinit(struct xmm_dev *xmm)
1597 xmm->error = -ENODEV;
1599 cancel_work_sync(&xmm->init_work);
1601 xmm7360_destroy_net(xmm);
1604 if (xmm->qp[i].xmm) {
1605 if (xmm->qp[i].cdev.owner) {
1606 cdev_del(&xmm->qp[i].cdev);
1607 device_unregister(&xmm->qp[i].dev);
1609 if (xmm->qp[i].port.ops) {
1610 tty_unregister_device(xmm7360_tty_driver, xmm->qp[i].tty_index);
1611 tty_port_destroy(&xmm->qp[i].port);
1614 memset(&xmm->qp[i], 0, sizeof(struct queue_pair));
1616 xmm7360_cmd_ring_free(xmm);
1622 struct xmm_dev *xmm = pci_get_drvdata(dev);
1624 xmm7360_dev_deinit(xmm);
1626 if (xmm->irq)
1627 free_irq(xmm->irq, xmm);
1632 kfree(xmm);
1669 return qp->xmm->td_ring[qp->num*2].page_size;
1714 static int xmm7360_create_tty(struct xmm_dev *xmm, int num)
1717 struct queue_pair *qp = xmm7360_init_qp(xmm, num, 8, 4096);
1722 qp->tty_index = xmm->num_ttys++;
1723 tty_dev = tty_port_register_device(&qp->port, xmm7360_tty_driver, qp->tty_index, xmm->dev);
1728 dev_err(xmm->dev, "Could not allocate tty?\n");
1736 static int xmm7360_create_cdev(struct xmm_dev *xmm, int num, const char *name, int cardnum)
1738 struct queue_pair *qp = xmm7360_init_qp(xmm, num, 16, TD_MAX_PAGE_SIZE);
1745 qp->dev.parent = &xmm->pci_dev->dev;
1751 dev_err(xmm->dev, "cdev_device_add: %d\n", ret);
1757 static int xmm7360_dev_init(struct xmm_dev *xmm)
1761 ret = xmm7360_base_init(xmm);
1765 ret = xmm7360_create_cdev(xmm, 1, "xmm%d/rpc", xmm->card_num);
1768 ret = xmm7360_create_cdev(xmm, 3, "xmm%d/trace", xmm->card_num);
1771 ret = xmm7360_create_tty(xmm, 2);
1774 ret = xmm7360_create_tty(xmm, 4);
1777 ret = xmm7360_create_tty(xmm, 7);
1780 ret = xmm7360_create_net(xmm, 0);
1789 struct xmm_dev *xmm = container_of(work, struct xmm_dev, init_work);
1790 xmm7360_dev_init(xmm);
1795 struct xmm_dev *xmm = kzalloc(sizeof(struct xmm_dev), GFP_KERNEL);
1798 xmm->pci_dev = dev;
1799 xmm->dev = &dev->dev;
1801 if (!xmm) {
1815 dev_err(xmm->dev, "Cannot set DMA mask\n");
1818 dma_set_coherent_mask(xmm->dev, 0xffffffffffffffff);
1826 xmm->bar0 = pci_iomap(dev, 0, pci_resource_len(dev, 0));
1833 xmm->bar2 = pci_iomap(dev, 2, pci_resource_len(dev, 2));
1841 init_waitqueue_head(&xmm->wq);
1842 INIT_WORK(&xmm->init_work, xmm7360_dev_init_work);
1844 pci_set_drvdata(dev, xmm);
1846 ret = xmm7360_dev_init(xmm);
1850 xmm->irq = pci_irq_vector(dev, 0);
1851 ret = request_irq(xmm->irq, xmm7360_irq0, 0, "xmm7360", xmm);
1860 xmm7360_dev_deinit(xmm);
1875 ret = alloc_chrdev_region(&xmm_base, 0, 8, "xmm");
2005 static int xmm7360_dev_init(struct xmm_dev *xmm)
2010 ret = xmm7360_base_init(xmm);
2032 xmm7360_init_qp(xmm, num, depth, page_size);
2038 static void xmm7360_dev_deinit(struct xmm_dev *xmm)
2040 struct wwanc_softc *sc = device_private(xmm->dev);
2044 xmm->error = -ENODEV;
2048 KASSERT(xmm->net == NULL);
2065 xmm7360_cmd_ring_free(xmm);
2085 struct xmm_dev *xmm = &sc->sc_xmm;
2088 xmm7360_poll(xmm);
2089 wakeup(&xmm->wq);
2091 if (xmm->net && xmm->net->qp->open && xmm7360_qp_has_data(xmm->net->qp))
2092 xmm7360_net_poll(xmm);
2095 qp = &xmm->qp[func];
2197 /* Set xmm members needed for xmm7360_dev_init() */
2294 struct xmm_dev *xmm = &sc->sc_xmm;
2298 KASSERT(xmm->cp != NULL);
2301 qp = &xmm->qp[i];
2306 xmm7360_cmd_ring_free(xmm);
2307 KASSERT(xmm->cp == NULL);
2314 struct xmm_dev *xmm = &sc->sc_xmm;
2317 KASSERT(xmm->cp == NULL);
2319 xmm7360_base_init(xmm);
2322 qp = &xmm->qp[i];
2429 struct xmm_dev *xmm = qp->xmm;
2430 struct wwanc_softc *sc = device_private(xmm->dev);
2729 if (qp->xmm->error) {
2755 struct xmm_dev *xmm = qp->xmm;
2756 int func = qp - xmm->qp;
2757 struct wwanc_softc *sc = container_of(xmm, struct wwanc_softc, sc_xmm);
2786 struct xmm_dev *xmm = qp->xmm;
2787 int func = qp - xmm->qp;
2788 struct wwanc_softc *sc = container_of(xmm, struct wwanc_softc, sc_xmm);
2921 static void xmm7360_os_handle_net_frame(struct xmm_dev *xmm, const u8 *buf, size_t sz)
2923 struct wwanc_softc *sc = device_private(xmm->dev);
3226 struct xmm_dev *xmm;
3231 xmm = sc_if->sc_xmm_net.xmm = &sc_if->sc_parent->sc_xmm;
3236 xn->qp = &xmm->qp[0];
3238 xmm->net = xn;
3298 sc_if->sc_xmm_net.xmm->net = NULL;