Lines Matching +full:slave +full:- +full:dev
2 * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
17 * Phone: 1-703-847-0040 ext 103
33 * Added one-line eql_remove_slave patch.
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
74 * Broken set-bit locking snapshot
80 * Log trimmed of non-pertinent 1.x branch messages
90 * shock.. the kept-new-versions could have zonked working
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
132 static int eql_open(struct net_device *dev);
133 static int eql_close(struct net_device *dev);
134 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
135 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
137 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) argument
138 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) argument
140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
147 spin_lock(&eql->queue.lock); in eql_timer()
148 head = &eql->queue.all_slaves; in eql_timer()
150 slave_t *slave = list_entry(this, slave_t, list); in eql_timer() local
152 if ((slave->dev->flags & IFF_UP) == IFF_UP) { in eql_timer()
153 slave->bytes_queued -= slave->priority_Bps; in eql_timer()
154 if (slave->bytes_queued < 0) in eql_timer()
155 slave->bytes_queued = 0; in eql_timer()
157 eql_kill_one_slave(&eql->queue, slave); in eql_timer()
161 spin_unlock(&eql->queue.lock); in eql_timer()
163 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; in eql_timer()
164 add_timer(&eql->timer); in eql_timer()
177 static void __init eql_setup(struct net_device *dev) in eql_setup() argument
179 equalizer_t *eql = netdev_priv(dev); in eql_setup()
181 timer_setup(&eql->timer, eql_timer, 0); in eql_setup()
182 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; in eql_setup()
184 spin_lock_init(&eql->queue.lock); in eql_setup()
185 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup()
186 eql->queue.master_dev = dev; in eql_setup()
188 dev->netdev_ops = &eql_netdev_ops; in eql_setup()
195 dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ in eql_setup()
196 dev->flags = IFF_MASTER; in eql_setup()
198 dev->type = ARPHRD_SLIP; in eql_setup()
199 dev->tx_queue_len = 5; /* Hands them off fast */ in eql_setup()
200 netif_keep_dst(dev); in eql_setup()
203 static int eql_open(struct net_device *dev) in eql_open() argument
205 equalizer_t *eql = netdev_priv(dev); in eql_open()
208 netdev_info(dev, in eql_open()
209 "remember to turn off Van-Jacobson compression on your slave devices\n"); in eql_open()
211 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open()
213 eql->min_slaves = 1; in eql_open()
214 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ in eql_open()
216 add_timer(&eql->timer); in eql_open()
221 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument
223 list_del(&slave->list); in eql_kill_one_slave()
224 queue->num_slaves--; in eql_kill_one_slave()
225 slave->dev->flags &= ~IFF_SLAVE; in eql_kill_one_slave()
226 dev_put(slave->dev); in eql_kill_one_slave()
227 kfree(slave); in eql_kill_one_slave()
234 spin_lock_bh(&queue->lock); in eql_kill_slave_queue()
236 head = &queue->all_slaves; in eql_kill_slave_queue()
243 spin_unlock_bh(&queue->lock); in eql_kill_slave_queue()
246 static int eql_close(struct net_device *dev) in eql_close() argument
248 equalizer_t *eql = netdev_priv(dev); in eql_close()
255 del_timer_sync(&eql->timer); in eql_close()
257 eql_kill_slave_queue(&eql->queue); in eql_close()
262 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
263 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
266 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
268 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
269 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
271 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) in eql_ioctl() argument
275 return -EPERM; in eql_ioctl()
279 return eql_enslave(dev, ifr->ifr_data); in eql_ioctl()
281 return eql_emancipate(dev, ifr->ifr_data); in eql_ioctl()
283 return eql_g_slave_cfg(dev, ifr->ifr_data); in eql_ioctl()
285 return eql_s_slave_cfg(dev, ifr->ifr_data); in eql_ioctl()
287 return eql_g_master_cfg(dev, ifr->ifr_data); in eql_ioctl()
289 return eql_s_master_cfg(dev, ifr->ifr_data); in eql_ioctl()
291 return -EOPNOTSUPP; in eql_ioctl()
295 /* queue->lock must be held */
304 /* Make a pass to set the best slave. */ in __eql_schedule_slaves()
305 head = &queue->all_slaves; in __eql_schedule_slaves()
307 slave_t *slave = list_entry(this, slave_t, list); in __eql_schedule_slaves() local
310 /* Go through the slave list once, updating best_slave in __eql_schedule_slaves()
313 bytes_queued = slave->bytes_queued; in __eql_schedule_slaves()
314 priority_Bps = slave->priority_Bps; in __eql_schedule_slaves()
315 if ((slave->dev->flags & IFF_UP) == IFF_UP) { in __eql_schedule_slaves()
316 slave_load = (~0UL - (~0UL / 2)) - in __eql_schedule_slaves()
321 best_slave = slave; in __eql_schedule_slaves()
324 /* We found a dead slave, kill it. */ in __eql_schedule_slaves()
325 eql_kill_one_slave(queue, slave); in __eql_schedule_slaves()
331 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) in eql_slave_xmit() argument
333 equalizer_t *eql = netdev_priv(dev); in eql_slave_xmit()
334 slave_t *slave; in eql_slave_xmit() local
336 spin_lock(&eql->queue.lock); in eql_slave_xmit()
338 slave = __eql_schedule_slaves(&eql->queue); in eql_slave_xmit()
339 if (slave) { in eql_slave_xmit()
340 struct net_device *slave_dev = slave->dev; in eql_slave_xmit()
342 skb->dev = slave_dev; in eql_slave_xmit()
343 skb->priority = TC_PRIO_FILLER; in eql_slave_xmit()
344 slave->bytes_queued += skb->len; in eql_slave_xmit()
346 dev->stats.tx_packets++; in eql_slave_xmit()
348 dev->stats.tx_dropped++; in eql_slave_xmit()
352 spin_unlock(&eql->queue.lock); in eql_slave_xmit()
361 /* queue->lock must be held */
362 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) in __eql_find_slave_dev() argument
366 head = &queue->all_slaves; in __eql_find_slave_dev()
368 slave_t *slave = list_entry(this, slave_t, list); in __eql_find_slave_dev() local
370 if (slave->dev == dev) in __eql_find_slave_dev()
371 return slave; in __eql_find_slave_dev()
379 equalizer_t *eql = netdev_priv(queue->master_dev); in eql_is_full()
381 if (queue->num_slaves >= eql->max_slaves) in eql_is_full()
386 /* queue->lock must be held */
387 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) in __eql_insert_slave() argument
392 duplicate_slave = __eql_find_slave_dev(queue, slave->dev); in __eql_insert_slave()
396 dev_hold(slave->dev); in __eql_insert_slave()
397 list_add(&slave->list, &queue->all_slaves); in __eql_insert_slave()
398 queue->num_slaves++; in __eql_insert_slave()
399 slave->dev->flags |= IFF_SLAVE; in __eql_insert_slave()
404 return -ENOSPC; in __eql_insert_slave()
413 return -EFAULT; in eql_enslave()
417 return -ENODEV; in eql_enslave()
419 if ((master_dev->flags & IFF_UP) == IFF_UP) { in eql_enslave()
420 /* slave is not a master & not already a slave: */ in eql_enslave()
427 return -ENOMEM; in eql_enslave()
430 s->dev = slave_dev; in eql_enslave()
431 s->priority = srq.priority; in eql_enslave()
432 s->priority_bps = srq.priority; in eql_enslave()
433 s->priority_Bps = srq.priority / 8; in eql_enslave()
435 spin_lock_bh(&eql->queue.lock); in eql_enslave()
436 ret = __eql_insert_slave(&eql->queue, s); in eql_enslave()
440 spin_unlock_bh(&eql->queue.lock); in eql_enslave()
446 return -EINVAL; in eql_enslave()
457 return -EFAULT; in eql_emancipate()
461 return -ENODEV; in eql_emancipate()
463 ret = -EINVAL; in eql_emancipate()
464 spin_lock_bh(&eql->queue.lock); in eql_emancipate()
466 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_emancipate() local
467 if (slave) { in eql_emancipate()
468 eql_kill_one_slave(&eql->queue, slave); in eql_emancipate()
472 spin_unlock_bh(&eql->queue.lock); in eql_emancipate()
477 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) in eql_g_slave_cfg() argument
479 equalizer_t *eql = netdev_priv(dev); in eql_g_slave_cfg()
480 slave_t *slave; in eql_g_slave_cfg() local
486 return -EFAULT; in eql_g_slave_cfg()
490 return -ENODEV; in eql_g_slave_cfg()
492 ret = -EINVAL; in eql_g_slave_cfg()
494 spin_lock_bh(&eql->queue.lock); in eql_g_slave_cfg()
496 slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_g_slave_cfg()
497 if (slave) { in eql_g_slave_cfg()
498 sc.priority = slave->priority; in eql_g_slave_cfg()
502 spin_unlock_bh(&eql->queue.lock); in eql_g_slave_cfg()
505 ret = -EFAULT; in eql_g_slave_cfg()
510 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) in eql_s_slave_cfg() argument
512 slave_t *slave; in eql_s_slave_cfg() local
519 return -EFAULT; in eql_s_slave_cfg()
523 return -ENODEV; in eql_s_slave_cfg()
525 ret = -EINVAL; in eql_s_slave_cfg()
527 eql = netdev_priv(dev); in eql_s_slave_cfg()
528 spin_lock_bh(&eql->queue.lock); in eql_s_slave_cfg()
530 slave = __eql_find_slave_dev(&eql->queue, slave_dev); in eql_s_slave_cfg()
531 if (slave) { in eql_s_slave_cfg()
532 slave->priority = sc.priority; in eql_s_slave_cfg()
533 slave->priority_bps = sc.priority; in eql_s_slave_cfg()
534 slave->priority_Bps = sc.priority / 8; in eql_s_slave_cfg()
538 spin_unlock_bh(&eql->queue.lock); in eql_s_slave_cfg()
543 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) in eql_g_master_cfg() argument
550 if (eql_is_master(dev)) { in eql_g_master_cfg()
551 eql = netdev_priv(dev); in eql_g_master_cfg()
552 mc.max_slaves = eql->max_slaves; in eql_g_master_cfg()
553 mc.min_slaves = eql->min_slaves; in eql_g_master_cfg()
555 return -EFAULT; in eql_g_master_cfg()
558 return -EINVAL; in eql_g_master_cfg()
561 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) in eql_s_master_cfg() argument
567 return -EFAULT; in eql_s_master_cfg()
569 if (eql_is_master(dev)) { in eql_s_master_cfg()
570 eql = netdev_priv(dev); in eql_s_master_cfg()
571 eql->max_slaves = mc.max_slaves; in eql_s_master_cfg()
572 eql->min_slaves = mc.min_slaves; in eql_s_master_cfg()
575 return -EINVAL; in eql_s_master_cfg()
589 return -ENOMEM; in eql_init_module()