Lines Matching refs:conn

168 	struct iucv_connection *conn;
179 struct iucv_connection *conn;
246 static char *netiucv_printuser(struct iucv_connection *conn)
252 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
255 memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
256 memcpy(tmp_udat, conn->userdata, 16);
262 return netiucv_printname(conn->userid, 8);
481 struct iucv_connection *conn = path->private;
484 ev.conn = conn;
486 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
492 struct iucv_connection *conn = path->private;
495 ev.conn = conn;
497 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
502 struct iucv_connection *conn = path->private;
504 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
510 struct iucv_connection *conn = path->private;
521 list_for_each_entry(conn, &iucv_connection_list, list) {
522 if (strncmp(ipvmid, conn->userid, 8) ||
523 strncmp(ipuser, conn->userdata, 16))
526 conn->path = path;
527 ev.conn = conn;
529 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
540 struct iucv_connection *conn = path->private;
542 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
547 struct iucv_connection *conn = path->private;
549 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
554 struct iucv_connection *conn = path->private;
556 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
572 * @conn: The connection where this skb has been received.
578 static void netiucv_unpack_skb(struct iucv_connection *conn,
581 struct net_device *dev = conn->netdev;
632 struct iucv_connection *conn = ev->conn;
634 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
639 if (!conn->netdev) {
640 iucv_message_reject(conn->path, msg);
645 if (msg->length > conn->max_buffsize) {
646 iucv_message_reject(conn->path, msg);
649 msg->length, conn->max_buffsize);
652 conn->rx_buff->data = conn->rx_buff->head;
653 skb_reset_tail_pointer(conn->rx_buff);
654 conn->rx_buff->len = 0;
655 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
662 netiucv_unpack_skb(conn, conn->rx_buff);
668 struct iucv_connection *conn = ev->conn;
683 if (!conn || !conn->netdev) {
688 privptr = netdev_priv(conn->netdev);
689 conn->prof.tx_pending--;
691 if ((skb = skb_dequeue(&conn->commit_queue))) {
702 conn->tx_buff->data = conn->tx_buff->head;
703 skb_reset_tail_pointer(conn->tx_buff);
704 conn->tx_buff->len = 0;
705 spin_lock_irqsave(&conn->collect_lock, saveflags);
706 while ((skb = skb_dequeue(&conn->collect_queue))) {
707 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
708 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
710 skb_put(conn->tx_buff, skb->len),
718 if (conn->collect_len > conn->prof.maxmulti)
719 conn->prof.maxmulti = conn->collect_len;
720 conn->collect_len = 0;
721 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
722 if (conn->tx_buff->len == 0) {
728 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
729 conn->prof.send_stamp = jiffies;
732 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
733 conn->tx_buff->data, conn->tx_buff->len);
734 conn->prof.doios_multi++;
735 conn->prof.txlen += conn->tx_buff->len;
736 conn->prof.tx_pending++;
737 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
738 conn->prof.tx_max_pending = conn->prof.tx_pending;
740 conn->prof.tx_pending--;
750 if (stat_maxcq > conn->prof.maxcqueue)
751 conn->prof.maxcqueue = stat_maxcq;
768 struct iucv_connection *conn = ev->conn;
770 struct net_device *netdev = conn->netdev;
776 conn->path = path;
779 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
785 netdev->tx_queue_len = conn->path->msglim;
800 struct iucv_connection *conn = arg;
801 struct net_device *netdev = conn->netdev;
805 fsm_deltimer(&conn->timer);
807 netdev->tx_queue_len = conn->path->msglim;
813 struct iucv_connection *conn = arg;
816 fsm_deltimer(&conn->timer);
817 iucv_path_sever(conn->path, conn->userdata);
823 struct iucv_connection *conn = arg;
824 struct net_device *netdev = conn->netdev;
829 fsm_deltimer(&conn->timer);
830 iucv_path_sever(conn->path, conn->userdata);
832 "connection\n", netiucv_printuser(conn));
841 struct iucv_connection *conn = arg;
842 struct net_device *netdev = conn->netdev;
857 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
859 netdev->name, netiucv_printuser(conn));
861 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
862 NULL, conn->userdata, conn);
865 netdev->tx_queue_len = conn->path->msglim;
866 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
867 CONN_EVENT_TIMER, conn);
872 netiucv_printname(conn->userid, 8));
878 " guest %s\n", netiucv_printname(conn->userid, 8));
891 netiucv_printname(conn->userid, 8));
908 kfree(conn->path);
909 conn->path = NULL;
925 struct iucv_connection *conn = ev->conn;
926 struct net_device *netdev = conn->netdev;
931 fsm_deltimer(&conn->timer);
933 netiucv_purge_skb_queue(&conn->collect_queue);
934 if (conn->path) {
936 iucv_path_sever(conn->path, conn->userdata);
937 kfree(conn->path);
938 conn->path = NULL;
940 netiucv_purge_skb_queue(&conn->commit_queue);
946 struct iucv_connection *conn = arg;
947 struct net_device *netdev = conn->netdev;
950 netdev->name, conn->userid);
1008 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1027 ev.conn = privptr->conn;
1030 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1055 netiucv_printuser(privptr->conn));
1110 * @param conn Connection to be used for sending.
1117 static int netiucv_transmit_skb(struct iucv_connection *conn,
1125 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1128 spin_lock_irqsave(&conn->collect_lock, saveflags);
1129 if (conn->collect_len + l >
1130 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1136 skb_queue_tail(&conn->collect_queue, skb);
1137 conn->collect_len += l;
1140 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1171 fsm_newstate(conn->fsm, CONN_STATE_TX);
1172 conn->prof.send_stamp = jiffies;
1176 rc = iucv_message_send(conn->path, &msg, 0, 0,
1178 conn->prof.doios_single++;
1179 conn->prof.txlen += skb->len;
1180 conn->prof.tx_pending++;
1181 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1182 conn->prof.tx_max_pending = conn->prof.tx_pending;
1185 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1186 conn->prof.tx_pending--;
1187 privptr = netdev_priv(conn->netdev);
1205 skb_queue_tail(&conn->commit_queue, nskb);
1291 rc = netiucv_transmit_skb(privptr->conn, skb);
1322 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1377 struct net_device *ndev = priv->conn->netdev;
1388 if (memcmp(username, priv->conn->userid, 9) &&
1405 memcpy(priv->conn->userid, username, 9);
1406 memcpy(priv->conn->userdata, userdata, 17);
1418 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1425 struct net_device *ndev = priv->conn->netdev;
1460 priv->conn->max_buffsize = bs1;
1487 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1498 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1508 priv->conn->prof.maxmulti = 0;
1520 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1529 priv->conn->prof.maxcqueue = 0;
1541 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1550 priv->conn->prof.doios_single = 0;
1562 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1571 priv->conn->prof.doios_multi = 0;
1583 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1592 priv->conn->prof.txlen = 0;
1604 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1613 priv->conn->prof.tx_time = 0;
1625 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1634 priv->conn->prof.tx_pending = 0;
1646 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1655 priv->conn->prof.tx_max_pending = 0;
1745 struct iucv_connection *conn;
1747 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1748 if (!conn)
1750 skb_queue_head_init(&conn->collect_queue);
1751 skb_queue_head_init(&conn->commit_queue);
1752 spin_lock_init(&conn->collect_lock);
1753 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1754 conn->netdev = dev;
1756 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1757 if (!conn->rx_buff)
1759 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1760 if (!conn->tx_buff)
1762 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1766 if (!conn->fsm)
1769 fsm_settimer(conn->fsm, &conn->timer);
1770 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1773 memcpy(conn->userdata, userdata, 17);
1775 memcpy(conn->userid, username, 9);
1776 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1780 list_add_tail(&conn->list, &iucv_connection_list);
1782 return conn;
1785 kfree_skb(conn->tx_buff);
1787 kfree_skb(conn->rx_buff);
1789 kfree(conn);
1798 static void netiucv_remove_connection(struct iucv_connection *conn)
1803 list_del_init(&conn->list);
1805 fsm_deltimer(&conn->timer);
1806 netiucv_purge_skb_queue(&conn->collect_queue);
1807 if (conn->path) {
1808 iucv_path_sever(conn->path, conn->userdata);
1809 kfree(conn->path);
1810 conn->path = NULL;
1812 netiucv_purge_skb_queue(&conn->commit_queue);
1813 kfree_fsm(conn->fsm);
1814 kfree_skb(conn->rx_buff);
1815 kfree_skb(conn->tx_buff);
1831 if (privptr->conn)
1832 netiucv_remove_connection(privptr->conn);
1835 privptr->conn = NULL; privptr->fsm = NULL;
1888 privptr->conn = netiucv_new_connection(dev, username, userdata);
1889 if (!privptr->conn) {
1956 netiucv_printuser(priv->conn));
2003 priv->conn->userid);