• Home
  • Raw
  • Download

Lines Matching refs:nl

290 	struct net_local *nl = netdev_priv(dev);  in plip_init_netdev()  local
301 nl->port_owner = 0; in plip_init_netdev()
304 nl->trigger = PLIP_TRIGGER_WAIT; in plip_init_netdev()
305 nl->nibble = PLIP_NIBBLE_WAIT; in plip_init_netdev()
308 INIT_WORK(&nl->immediate, plip_bh); in plip_init_netdev()
309 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); in plip_init_netdev()
312 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); in plip_init_netdev()
314 spin_lock_init(&nl->lock); in plip_init_netdev()
323 struct net_local *nl = in plip_kick_bh() local
326 if (nl->is_deferred) in plip_kick_bh()
327 schedule_work(&nl->immediate); in plip_kick_bh()
341 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
351 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
367 struct net_local *nl = container_of(work, struct net_local, immediate); in plip_bh() local
368 struct plip_local *snd = &nl->snd_data; in plip_bh()
369 struct plip_local *rcv = &nl->rcv_data; in plip_bh()
373 nl->is_deferred = 0; in plip_bh()
374 f = connection_state_table[nl->connection]; in plip_bh()
375 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK && in plip_bh()
376 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { in plip_bh()
377 nl->is_deferred = 1; in plip_bh()
378 schedule_delayed_work(&nl->deferred, 1); in plip_bh()
385 struct net_local *nl = in plip_timer_bh() local
388 if (!(atomic_read (&nl->kill_timer))) { in plip_timer_bh()
389 plip_interrupt (nl->dev); in plip_timer_bh()
391 schedule_delayed_work(&nl->timer, 1); in plip_timer_bh()
394 complete(&nl->killed_timer_cmp); in plip_timer_bh()
399 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, in plip_bh_timeout_error() argument
414 spin_lock_irq(&nl->lock); in plip_bh_timeout_error()
415 if (nl->connection == PLIP_CN_SEND) { in plip_bh_timeout_error()
418 nl->timeout_count++; in plip_bh_timeout_error()
419 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) || in plip_bh_timeout_error()
420 nl->timeout_count <= 3) { in plip_bh_timeout_error()
421 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
432 } else if (nl->connection == PLIP_CN_RECEIVE) { in plip_bh_timeout_error()
435 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
439 if (++nl->timeout_count <= 3) { in plip_bh_timeout_error()
440 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
460 spin_unlock_irq(&nl->lock); in plip_bh_timeout_error()
467 nl->connection = PLIP_CN_ERROR; in plip_bh_timeout_error()
474 plip_none(struct net_device *dev, struct net_local *nl, in plip_none() argument
585 plip_receive_packet(struct net_device *dev, struct net_local *nl, in plip_receive_packet() argument
588 unsigned short nibble_timeout = nl->nibble; in plip_receive_packet()
604 if (plip_receive(nl->trigger, dev, in plip_receive_packet()
608 nl->is_deferred = 1; in plip_receive_packet()
609 nl->connection = PLIP_CN_SEND; in plip_receive_packet()
610 schedule_delayed_work(&nl->deferred, 1); in plip_receive_packet()
680 spin_lock_irq(&nl->lock); in plip_receive_packet()
682 nl->connection = PLIP_CN_SEND; in plip_receive_packet()
683 spin_unlock_irq(&nl->lock); in plip_receive_packet()
684 schedule_work(&nl->immediate); in plip_receive_packet()
689 nl->connection = PLIP_CN_NONE; in plip_receive_packet()
690 spin_unlock_irq(&nl->lock); in plip_receive_packet()
746 plip_send_packet(struct net_device *dev, struct net_local *nl, in plip_send_packet() argument
749 unsigned short nibble_timeout = nl->nibble; in plip_send_packet()
768 cx = nl->trigger; in plip_send_packet()
771 spin_lock_irq(&nl->lock); in plip_send_packet()
772 if (nl->connection == PLIP_CN_RECEIVE) { in plip_send_packet()
773 spin_unlock_irq(&nl->lock); in plip_send_packet()
780 spin_unlock_irq(&nl->lock); in plip_send_packet()
783 if (nl->connection == PLIP_CN_RECEIVE) { in plip_send_packet()
799 nl->timeout_count = 0; in plip_send_packet()
802 spin_unlock_irq(&nl->lock); in plip_send_packet()
850 nl->connection = PLIP_CN_CLOSING; in plip_send_packet()
851 nl->is_deferred = 1; in plip_send_packet()
852 schedule_delayed_work(&nl->deferred, 1); in plip_send_packet()
861 plip_connection_close(struct net_device *dev, struct net_local *nl, in plip_connection_close() argument
864 spin_lock_irq(&nl->lock); in plip_connection_close()
865 if (nl->connection == PLIP_CN_CLOSING) { in plip_connection_close()
866 nl->connection = PLIP_CN_NONE; in plip_connection_close()
869 spin_unlock_irq(&nl->lock); in plip_connection_close()
870 if (nl->should_relinquish) { in plip_connection_close()
871 nl->should_relinquish = nl->port_owner = 0; in plip_connection_close()
872 parport_release(nl->pardev); in plip_connection_close()
879 plip_error(struct net_device *dev, struct net_local *nl, in plip_error() argument
888 nl->connection = PLIP_CN_NONE; in plip_error()
889 nl->should_relinquish = 0; in plip_error()
895 nl->is_deferred = 1; in plip_error()
896 schedule_delayed_work(&nl->deferred, 1); in plip_error()
907 struct net_local *nl; in plip_interrupt() local
912 nl = netdev_priv(dev); in plip_interrupt()
913 rcv = &nl->rcv_data; in plip_interrupt()
915 spin_lock_irqsave (&nl->lock, flags); in plip_interrupt()
921 spin_unlock_irqrestore (&nl->lock, flags); in plip_interrupt()
928 switch (nl->connection) { in plip_interrupt()
934 nl->connection = PLIP_CN_RECEIVE; in plip_interrupt()
935 nl->timeout_count = 0; in plip_interrupt()
936 schedule_work(&nl->immediate); in plip_interrupt()
950 spin_unlock_irqrestore(&nl->lock, flags); in plip_interrupt()
956 struct net_local *nl = netdev_priv(dev); in plip_tx_packet() local
957 struct plip_local *snd = &nl->snd_data; in plip_tx_packet()
963 if (!nl->port_owner) { in plip_tx_packet()
964 if (parport_claim(nl->pardev)) in plip_tx_packet()
966 nl->port_owner = 1; in plip_tx_packet()
980 spin_lock_irq(&nl->lock); in plip_tx_packet()
984 if (nl->connection == PLIP_CN_NONE) { in plip_tx_packet()
985 nl->connection = PLIP_CN_SEND; in plip_tx_packet()
986 nl->timeout_count = 0; in plip_tx_packet()
988 schedule_work(&nl->immediate); in plip_tx_packet()
989 spin_unlock_irq(&nl->lock); in plip_tx_packet()
1053 struct net_local *nl = netdev_priv(dev); in plip_open() local
1057 if (!nl->port_owner) { in plip_open()
1058 if (parport_claim(nl->pardev)) return -EAGAIN; in plip_open()
1059 nl->port_owner = 1; in plip_open()
1062 nl->should_relinquish = 0; in plip_open()
1071 atomic_set (&nl->kill_timer, 0); in plip_open()
1072 schedule_delayed_work(&nl->timer, 1); in plip_open()
1076 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE; in plip_open()
1077 nl->rcv_data.skb = nl->snd_data.skb = NULL; in plip_open()
1078 nl->connection = PLIP_CN_NONE; in plip_open()
1079 nl->is_deferred = 0; in plip_open()
1113 struct net_local *nl = netdev_priv(dev); in plip_close() local
1114 struct plip_local *snd = &nl->snd_data; in plip_close()
1115 struct plip_local *rcv = &nl->rcv_data; in plip_close()
1123 init_completion(&nl->killed_timer_cmp); in plip_close()
1124 atomic_set (&nl->kill_timer, 1); in plip_close()
1125 wait_for_completion(&nl->killed_timer_cmp); in plip_close()
1131 nl->is_deferred = 0; in plip_close()
1132 nl->connection = PLIP_CN_NONE; in plip_close()
1133 if (nl->port_owner) { in plip_close()
1134 parport_release(nl->pardev); in plip_close()
1135 nl->port_owner = 0; in plip_close()
1160 struct net_local *nl = netdev_priv(dev); in plip_preempt() local
1163 if (nl->connection != PLIP_CN_NONE) { in plip_preempt()
1164 nl->should_relinquish = 1; in plip_preempt()
1168 nl->port_owner = 0; /* Remember that we released the bus */ in plip_preempt()
1176 struct net_local *nl = netdev_priv(dev); in plip_wakeup() local
1178 if (nl->port_owner) { in plip_wakeup()
1181 if (!parport_claim(nl->pardev)) in plip_wakeup()
1192 if (!parport_claim(nl->pardev)) { in plip_wakeup()
1193 nl->port_owner = 1; in plip_wakeup()
1202 struct net_local *nl = netdev_priv(dev); in plip_ioctl() local
1210 pc->trigger = nl->trigger; in plip_ioctl()
1211 pc->nibble = nl->nibble; in plip_ioctl()
1216 nl->trigger = pc->trigger; in plip_ioctl()
1217 nl->nibble = pc->nibble; in plip_ioctl()
1250 struct net_local *nl; in plip_attach() local
1274 nl = netdev_priv(dev); in plip_attach()
1275 nl->dev = dev; in plip_attach()
1276 nl->pardev = parport_register_device(port, dev->name, plip_preempt, in plip_attach()
1280 if (!nl->pardev) { in plip_attach()
1306 parport_unregister_device(nl->pardev); in plip_attach()
1333 struct net_local *nl = netdev_priv(dev); in plip_cleanup_module() local
1335 if (nl->port_owner) in plip_cleanup_module()
1336 parport_release(nl->pardev); in plip_cleanup_module()
1337 parport_unregister_device(nl->pardev); in plip_cleanup_module()