Lines Matching refs:st
27 _queue_message(struct mISDNstack *st, struct sk_buff *skb) in _queue_message() argument
34 skb_queue_tail(&st->msgq, skb); in _queue_message()
35 if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) { in _queue_message()
36 test_and_set_bit(mISDN_STACK_WORK, &st->status); in _queue_message()
37 wake_up_interruptible(&st->workq); in _queue_message()
44 _queue_message(ch->st, skb); in mISDN_queue_message()
49 get_channel4id(struct mISDNstack *st, u_int id) in get_channel4id() argument
53 mutex_lock(&st->lmutex); in get_channel4id()
54 list_for_each_entry(ch, &st->layer2, list) { in get_channel4id()
60 mutex_unlock(&st->lmutex); in get_channel4id()
89 send_layer2(struct mISDNstack *st, struct sk_buff *skb) in send_layer2() argument
96 if (!st) in send_layer2()
98 mutex_lock(&st->lmutex); in send_layer2()
100 list_for_each_entry(ch, &st->layer2, list) { in send_layer2()
101 if (list_is_last(&ch->list, &st->layer2)) { in send_layer2()
125 list_for_each_entry(ch, &st->layer2, list) { in send_layer2()
133 ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb); in send_layer2()
142 mutex_unlock(&st->lmutex); in send_layer2()
148 send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb) in send_msg_to_layer() argument
159 if (!hlist_empty(&st->l1sock.head)) { in send_msg_to_layer()
161 send_socklist(&st->l1sock, skb); in send_msg_to_layer()
163 return st->layer1->send(st->layer1, skb); in send_msg_to_layer()
165 if (!hlist_empty(&st->l1sock.head)) in send_msg_to_layer()
166 send_socklist(&st->l1sock, skb); in send_msg_to_layer()
167 send_layer2(st, skb); in send_msg_to_layer()
170 ch = get_channel4id(st, hh->id); in send_msg_to_layer()
176 __func__, dev_name(&st->dev->dev), hh->prim, in send_msg_to_layer()
180 ch = get_channel4id(st, hh->id); in send_msg_to_layer()
186 __func__, dev_name(&st->dev->dev), hh->prim, in send_msg_to_layer()
191 __func__, dev_name(&st->dev->dev), hh->prim); in send_msg_to_layer()
197 do_clear_stack(struct mISDNstack *st) in do_clear_stack() argument
204 struct mISDNstack *st = data; in mISDNStackd() local
213 dev_name(&st->dev->dev)); in mISDNStackd()
215 if (st->notify != NULL) { in mISDNStackd()
216 complete(st->notify); in mISDNStackd()
217 st->notify = NULL; in mISDNStackd()
223 if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) { in mISDNStackd()
224 test_and_clear_bit(mISDN_STACK_WORK, &st->status); in mISDNStackd()
225 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
227 test_and_set_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
228 while (test_bit(mISDN_STACK_WORK, &st->status)) { in mISDNStackd()
229 skb = skb_dequeue(&st->msgq); in mISDNStackd()
232 &st->status); in mISDNStackd()
234 skb = skb_dequeue(&st->msgq); in mISDNStackd()
238 &st->status); in mISDNStackd()
241 st->msg_cnt++; in mISDNStackd()
243 err = send_msg_to_layer(st, skb); in mISDNStackd()
249 __func__, dev_name(&st->dev->dev), in mISDNStackd()
256 &st->status))) { in mISDNStackd()
258 &st->status); in mISDNStackd()
260 &st->status); in mISDNStackd()
264 if (test_bit(mISDN_STACK_CLEARING, &st->status)) { in mISDNStackd()
265 test_and_set_bit(mISDN_STACK_STOPPED, &st->status); in mISDNStackd()
266 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
267 do_clear_stack(st); in mISDNStackd()
268 test_and_clear_bit(mISDN_STACK_CLEARING, &st->status); in mISDNStackd()
269 test_and_set_bit(mISDN_STACK_RESTART, &st->status); in mISDNStackd()
271 if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) { in mISDNStackd()
272 test_and_clear_bit(mISDN_STACK_STOPPED, &st->status); in mISDNStackd()
273 test_and_set_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
274 if (!skb_queue_empty(&st->msgq)) in mISDNStackd()
276 &st->status); in mISDNStackd()
278 if (test_bit(mISDN_STACK_ABORT, &st->status)) in mISDNStackd()
280 if (st->notify != NULL) { in mISDNStackd()
281 complete(st->notify); in mISDNStackd()
282 st->notify = NULL; in mISDNStackd()
285 st->sleep_cnt++; in mISDNStackd()
287 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status); in mISDNStackd()
288 wait_event_interruptible(st->workq, (st->status & in mISDNStackd()
292 __func__, dev_name(&st->dev->dev), st->status); in mISDNStackd()
293 test_and_set_bit(mISDN_STACK_ACTIVE, &st->status); in mISDNStackd()
295 test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status); in mISDNStackd()
297 if (test_bit(mISDN_STACK_STOPPED, &st->status)) { in mISDNStackd()
298 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
300 st->stopped_cnt++; in mISDNStackd()
307 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, in mISDNStackd()
308 st->stopped_cnt); in mISDNStackd()
309 task_cputime(st->thread, &utime, &stime); in mISDNStackd()
312 dev_name(&st->dev->dev), utime, stime); in mISDNStackd()
315 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw); in mISDNStackd()
317 dev_name(&st->dev->dev)); in mISDNStackd()
319 test_and_set_bit(mISDN_STACK_KILLED, &st->status); in mISDNStackd()
320 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); in mISDNStackd()
321 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status); in mISDNStackd()
322 test_and_clear_bit(mISDN_STACK_ABORT, &st->status); in mISDNStackd()
323 skb_queue_purge(&st->msgq); in mISDNStackd()
324 st->thread = NULL; in mISDNStackd()
325 if (st->notify != NULL) { in mISDNStackd()
326 complete(st->notify); in mISDNStackd()
327 st->notify = NULL; in mISDNStackd()
335 if (!ch->st) in l1_receive()
338 _queue_message(ch->st, skb); in l1_receive()
349 __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) in __add_layer2() argument
351 list_add_tail(&ch->list, &st->layer2); in __add_layer2()
355 add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) in add_layer2() argument
357 mutex_lock(&st->lmutex); in add_layer2()
358 __add_layer2(ch, st); in add_layer2()
359 mutex_unlock(&st->lmutex); in add_layer2()
365 if (!ch->st || !ch->st->layer1) in st_own_ctrl()
367 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg); in st_own_ctrl()
389 dev->D.st = newst; in create_stack()
398 dev->teimgr->st = newst; in create_stack()
402 newst->own.st = newst; in create_stack()
443 ch->peer = &dev->D.st->own; in connect_layer1()
444 ch->st = dev->D.st; in connect_layer1()
452 write_lock_bh(&dev->D.st->l1sock.lock); in connect_layer1()
453 sk_add_node(&msk->sk, &dev->D.st->l1sock.head); in connect_layer1()
454 write_unlock_bh(&dev->D.st->l1sock.lock); in connect_layer1()
475 ch->st = dev->D.st; in connect_Bstack()
487 rq.ch->st = dev->D.st; in connect_Bstack()
500 rq2.ch->st = dev->D.st; in connect_Bstack()
512 rq.ch->st = dev->D.st; in connect_Bstack()
541 ch->peer = &dev->D.st->own; in create_l2entity()
542 ch->st = dev->D.st; in create_l2entity()
556 add_layer2(rq.ch, dev->D.st); in create_l2entity()
558 rq.ch->peer = &dev->D.st->own; in create_l2entity()
574 if (!ch->st) { in delete_channel()
580 dev_name(&ch->st->dev->dev), ch->protocol); in delete_channel()
593 write_lock_bh(&ch->st->l1sock.lock); in delete_channel()
595 write_unlock_bh(&ch->st->l1sock.lock); in delete_channel()
596 ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL); in delete_channel()
599 pch = get_channel4id(ch->st, ch->nr); in delete_channel()
601 mutex_lock(&ch->st->lmutex); in delete_channel()
603 mutex_unlock(&ch->st->lmutex); in delete_channel()
605 pch = ch->st->dev->teimgr; in delete_channel()
612 pch = ch->st->dev->teimgr; in delete_channel()
628 struct mISDNstack *st = dev->D.st; in delete_stack() local
633 dev_name(&st->dev->dev)); in delete_stack()
636 if (st->thread) { in delete_stack()
637 if (st->notify) { in delete_stack()
640 complete(st->notify); in delete_stack()
642 st->notify = &done; in delete_stack()
643 test_and_set_bit(mISDN_STACK_ABORT, &st->status); in delete_stack()
644 test_and_set_bit(mISDN_STACK_WAKEUP, &st->status); in delete_stack()
645 wake_up_interruptible(&st->workq); in delete_stack()
648 if (!list_empty(&st->layer2)) in delete_stack()
651 if (!hlist_empty(&st->l1sock.head)) in delete_stack()
654 kfree(st); in delete_stack()