Lines Matching refs:nl

291 	struct net_local *nl = netdev_priv(dev);
302 nl->port_owner = 0;
305 nl->trigger = PLIP_TRIGGER_WAIT;
306 nl->nibble = PLIP_NIBBLE_WAIT;
309 INIT_WORK(&nl->immediate, plip_bh);
310 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
313 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
315 spin_lock_init(&nl->lock);
324 struct net_local *nl =
327 if (nl->is_deferred)
328 schedule_work(&nl->immediate);
342 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
352 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
368 struct net_local *nl = container_of(work, struct net_local, immediate);
369 struct plip_local *snd = &nl->snd_data;
370 struct plip_local *rcv = &nl->rcv_data;
374 nl->is_deferred = 0;
375 f = connection_state_table[nl->connection];
376 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
377 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
378 nl->is_deferred = 1;
379 schedule_delayed_work(&nl->deferred, 1);
386 struct net_local *nl =
389 if (!(atomic_read (&nl->kill_timer))) {
390 plip_interrupt (nl->dev);
392 schedule_delayed_work(&nl->timer, 1);
395 complete(&nl->killed_timer_cmp);
400 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
415 spin_lock_irq(&nl->lock);
416 if (nl->connection == PLIP_CN_SEND) {
419 nl->timeout_count++;
420 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
421 nl->timeout_count <= 3) {
422 spin_unlock_irq(&nl->lock);
433 } else if (nl->connection == PLIP_CN_RECEIVE) {
436 spin_unlock_irq(&nl->lock);
440 if (++nl->timeout_count <= 3) {
441 spin_unlock_irq(&nl->lock);
461 spin_unlock_irq(&nl->lock);
468 nl->connection = PLIP_CN_ERROR;
475 plip_none(struct net_device *dev, struct net_local *nl,
588 plip_receive_packet(struct net_device *dev, struct net_local *nl,
591 unsigned short nibble_timeout = nl->nibble;
608 if (plip_receive(nl->trigger, dev,
612 nl->is_deferred = 1;
613 nl->connection = PLIP_CN_SEND;
614 schedule_delayed_work(&nl->deferred, 1);
688 spin_lock_irq(&nl->lock);
690 nl->connection = PLIP_CN_SEND;
691 spin_unlock_irq(&nl->lock);
692 schedule_work(&nl->immediate);
697 nl->connection = PLIP_CN_NONE;
698 spin_unlock_irq(&nl->lock);
756 plip_send_packet(struct net_device *dev, struct net_local *nl,
759 unsigned short nibble_timeout = nl->nibble;
778 cx = nl->trigger;
781 spin_lock_irq(&nl->lock);
782 if (nl->connection == PLIP_CN_RECEIVE) {
783 spin_unlock_irq(&nl->lock);
790 spin_unlock_irq(&nl->lock);
793 if (nl->connection == PLIP_CN_RECEIVE) {
809 nl->timeout_count = 0;
812 spin_unlock_irq(&nl->lock);
865 nl->connection = PLIP_CN_CLOSING;
866 nl->is_deferred = 1;
867 schedule_delayed_work(&nl->deferred, 1);
876 plip_connection_close(struct net_device *dev, struct net_local *nl,
879 spin_lock_irq(&nl->lock);
880 if (nl->connection == PLIP_CN_CLOSING) {
881 nl->connection = PLIP_CN_NONE;
884 spin_unlock_irq(&nl->lock);
885 if (nl->should_relinquish) {
886 nl->should_relinquish = nl->port_owner = 0;
887 parport_release(nl->pardev);
894 plip_error(struct net_device *dev, struct net_local *nl,
903 nl->connection = PLIP_CN_NONE;
904 nl->should_relinquish = 0;
910 nl->is_deferred = 1;
911 schedule_delayed_work(&nl->deferred, 1);
922 struct net_local *nl;
927 nl = netdev_priv(dev);
928 rcv = &nl->rcv_data;
930 spin_lock_irqsave (&nl->lock, flags);
936 spin_unlock_irqrestore (&nl->lock, flags);
943 switch (nl->connection) {
950 nl->connection = PLIP_CN_RECEIVE;
951 nl->timeout_count = 0;
952 schedule_work(&nl->immediate);
966 spin_unlock_irqrestore(&nl->lock, flags);
972 struct net_local *nl = netdev_priv(dev);
973 struct plip_local *snd = &nl->snd_data;
979 if (!nl->port_owner) {
980 if (parport_claim(nl->pardev))
982 nl->port_owner = 1;
996 spin_lock_irq(&nl->lock);
1000 if (nl->connection == PLIP_CN_NONE) {
1001 nl->connection = PLIP_CN_SEND;
1002 nl->timeout_count = 0;
1004 schedule_work(&nl->immediate);
1005 spin_unlock_irq(&nl->lock);
1069 struct net_local *nl = netdev_priv(dev);
1073 if (!nl->port_owner) {
1074 if (parport_claim(nl->pardev)) return -EAGAIN;
1075 nl->port_owner = 1;
1078 nl->should_relinquish = 0;
1087 atomic_set (&nl->kill_timer, 0);
1088 schedule_delayed_work(&nl->timer, 1);
1092 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1093 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1094 nl->connection = PLIP_CN_NONE;
1095 nl->is_deferred = 0;
1129 struct net_local *nl = netdev_priv(dev);
1130 struct plip_local *snd = &nl->snd_data;
1131 struct plip_local *rcv = &nl->rcv_data;
1139 init_completion(&nl->killed_timer_cmp);
1140 atomic_set (&nl->kill_timer, 1);
1141 wait_for_completion(&nl->killed_timer_cmp);
1147 nl->is_deferred = 0;
1148 nl->connection = PLIP_CN_NONE;
1149 if (nl->port_owner) {
1150 parport_release(nl->pardev);
1151 nl->port_owner = 0;
1176 struct net_local *nl = netdev_priv(dev);
1179 if (nl->connection != PLIP_CN_NONE) {
1180 nl->should_relinquish = 1;
1184 nl->port_owner = 0; /* Remember that we released the bus */
1192 struct net_local *nl = netdev_priv(dev);
1194 if (nl->port_owner) {
1197 if (!parport_claim(nl->pardev))
1208 if (!parport_claim(nl->pardev)) {
1209 nl->port_owner = 1;
1219 struct net_local *nl = netdev_priv(dev);
1230 pc->trigger = nl->trigger;
1231 pc->nibble = nl->nibble;
1236 nl->trigger = pc->trigger;
1237 nl->nibble = pc->nibble;
1270 struct net_local *nl;
1295 nl = netdev_priv(dev);
1296 nl->dev = dev;
1304 nl->pardev = parport_register_dev_model(port, dev->name,
1307 if (!nl->pardev) {
1333 parport_unregister_device(nl->pardev);
1371 struct net_local *nl = netdev_priv(dev);
1373 if (nl->port_owner)
1374 parport_release(nl->pardev);
1375 parport_unregister_device(nl->pardev);