Lines Matching +full:part +full:- +full:number
7 * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
11 * Cross Partition Communication (XPC) support - standard version.
16 * partition This part detects the presence/absence of other
20 * channel This part manages the channels and sends/receives
68 .init_name = "", /* set to "part" at xpc_init() time */
130 /* non-zero if any remote partition disengage was timed out */
169 struct xpc_partition *part = from_timer(part, t, disengage_timer); in xpc_timeout_partition_disengage() local
171 DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); in xpc_timeout_partition_disengage()
173 xpc_partition_disengaged_from_timer(part); in xpc_timeout_partition_disengage()
175 DBUG_ON(part->disengage_timeout != 0); in xpc_timeout_partition_disengage()
176 DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); in xpc_timeout_partition_disengage()
218 struct xpc_partition *part; in xpc_check_remote_hb() local
230 part = &xpc_partitions[partid]; in xpc_check_remote_hb()
232 if (part->act_state == XPC_P_AS_INACTIVE || in xpc_check_remote_hb()
233 part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_check_remote_hb()
237 ret = xpc_arch_ops.get_remote_heartbeat(part); in xpc_check_remote_hb()
239 XPC_DEACTIVATE_PARTITION(part, ret); in xpc_check_remote_hb()
264 (int)(xpc_hb_check_timeout - jiffies), in xpc_hb_checker()
328 * channels has the required number of assigned kthreads to get the work done.
331 xpc_channel_mgr(struct xpc_partition *part) in xpc_channel_mgr() argument
333 while (part->act_state != XPC_P_AS_DEACTIVATING || in xpc_channel_mgr()
334 atomic_read(&part->nchannels_active) > 0 || in xpc_channel_mgr()
335 !xpc_partition_disengaged(part)) { in xpc_channel_mgr()
337 xpc_process_sent_chctl_flags(part); in xpc_channel_mgr()
352 atomic_dec(&part->channel_mgr_requests); in xpc_channel_mgr()
353 (void)wait_event_interruptible(part->channel_mgr_wq, in xpc_channel_mgr()
354 (atomic_read(&part->channel_mgr_requests) > 0 || in xpc_channel_mgr()
355 part->chctl.all_flags != 0 || in xpc_channel_mgr()
356 (part->act_state == XPC_P_AS_DEACTIVATING && in xpc_channel_mgr()
357 atomic_read(&part->nchannels_active) == 0 && in xpc_channel_mgr()
358 xpc_partition_disengaged(part)))); in xpc_channel_mgr()
359 atomic_set(&part->channel_mgr_requests, 1); in xpc_channel_mgr()
392 xpc_setup_ch_structures(struct xpc_partition *part) in xpc_setup_ch_structures() argument
397 short partid = XPC_PARTID(part); in xpc_setup_ch_structures()
403 DBUG_ON(part->channels != NULL); in xpc_setup_ch_structures()
404 part->channels = kcalloc(XPC_MAX_NCHANNELS, in xpc_setup_ch_structures()
407 if (part->channels == NULL) { in xpc_setup_ch_structures()
414 part->remote_openclose_args = in xpc_setup_ch_structures()
416 GFP_KERNEL, &part-> in xpc_setup_ch_structures()
418 if (part->remote_openclose_args == NULL) { in xpc_setup_ch_structures()
424 part->chctl.all_flags = 0; in xpc_setup_ch_structures()
425 spin_lock_init(&part->chctl_lock); in xpc_setup_ch_structures()
427 atomic_set(&part->channel_mgr_requests, 1); in xpc_setup_ch_structures()
428 init_waitqueue_head(&part->channel_mgr_wq); in xpc_setup_ch_structures()
430 part->nchannels = XPC_MAX_NCHANNELS; in xpc_setup_ch_structures()
432 atomic_set(&part->nchannels_active, 0); in xpc_setup_ch_structures()
433 atomic_set(&part->nchannels_engaged, 0); in xpc_setup_ch_structures()
435 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { in xpc_setup_ch_structures()
436 ch = &part->channels[ch_number]; in xpc_setup_ch_structures()
438 ch->partid = partid; in xpc_setup_ch_structures()
439 ch->number = ch_number; in xpc_setup_ch_structures()
440 ch->flags = XPC_C_DISCONNECTED; in xpc_setup_ch_structures()
442 atomic_set(&ch->kthreads_assigned, 0); in xpc_setup_ch_structures()
443 atomic_set(&ch->kthreads_idle, 0); in xpc_setup_ch_structures()
444 atomic_set(&ch->kthreads_active, 0); in xpc_setup_ch_structures()
446 atomic_set(&ch->references, 0); in xpc_setup_ch_structures()
447 atomic_set(&ch->n_to_notify, 0); in xpc_setup_ch_structures()
449 spin_lock_init(&ch->lock); in xpc_setup_ch_structures()
450 init_completion(&ch->wdisconnect_wait); in xpc_setup_ch_structures()
452 atomic_set(&ch->n_on_msg_allocate_wq, 0); in xpc_setup_ch_structures()
453 init_waitqueue_head(&ch->msg_allocate_wq); in xpc_setup_ch_structures()
454 init_waitqueue_head(&ch->idle_wq); in xpc_setup_ch_structures()
457 ret = xpc_arch_ops.setup_ch_structures(part); in xpc_setup_ch_structures()
465 part->setup_state = XPC_P_SS_SETUP; in xpc_setup_ch_structures()
471 kfree(part->remote_openclose_args_base); in xpc_setup_ch_structures()
472 part->remote_openclose_args = NULL; in xpc_setup_ch_structures()
474 kfree(part->channels); in xpc_setup_ch_structures()
475 part->channels = NULL; in xpc_setup_ch_structures()
484 xpc_teardown_ch_structures(struct xpc_partition *part) in xpc_teardown_ch_structures() argument
486 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); in xpc_teardown_ch_structures()
487 DBUG_ON(atomic_read(&part->nchannels_active) != 0); in xpc_teardown_ch_structures()
494 DBUG_ON(part->setup_state != XPC_P_SS_SETUP); in xpc_teardown_ch_structures()
495 part->setup_state = XPC_P_SS_WTEARDOWN; in xpc_teardown_ch_structures()
497 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); in xpc_teardown_ch_structures()
501 xpc_arch_ops.teardown_ch_structures(part); in xpc_teardown_ch_structures()
503 kfree(part->remote_openclose_args_base); in xpc_teardown_ch_structures()
504 part->remote_openclose_args = NULL; in xpc_teardown_ch_structures()
505 kfree(part->channels); in xpc_teardown_ch_structures()
506 part->channels = NULL; in xpc_teardown_ch_structures()
508 part->setup_state = XPC_P_SS_TORNDOWN; in xpc_teardown_ch_structures()
526 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_activating() local
531 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activating()
533 if (part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_activating()
534 part->act_state = XPC_P_AS_INACTIVE; in xpc_activating()
535 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activating()
536 part->remote_rp_pa = 0; in xpc_activating()
541 DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); in xpc_activating()
542 part->act_state = XPC_P_AS_ACTIVATING; in xpc_activating()
544 XPC_SET_REASON(part, 0, 0); in xpc_activating()
545 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activating()
551 if (xpc_setup_ch_structures(part) == xpSuccess) { in xpc_activating()
552 (void)xpc_part_ref(part); /* this will always succeed */ in xpc_activating()
554 if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { in xpc_activating()
555 xpc_mark_partition_active(part); in xpc_activating()
556 xpc_channel_mgr(part); in xpc_activating()
560 xpc_part_deref(part); in xpc_activating()
561 xpc_teardown_ch_structures(part); in xpc_activating()
565 xpc_mark_partition_inactive(part); in xpc_activating()
567 if (part->reason == xpReactivating) { in xpc_activating()
569 xpc_arch_ops.request_partition_reactivation(part); in xpc_activating()
576 xpc_activate_partition(struct xpc_partition *part) in xpc_activate_partition() argument
578 short partid = XPC_PARTID(part); in xpc_activate_partition()
582 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activate_partition()
584 DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); in xpc_activate_partition()
586 part->act_state = XPC_P_AS_ACTIVATION_REQ; in xpc_activate_partition()
587 XPC_SET_REASON(part, xpCloneKThread, __LINE__); in xpc_activate_partition()
589 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activate_partition()
594 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activate_partition()
595 part->act_state = XPC_P_AS_INACTIVE; in xpc_activate_partition()
596 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); in xpc_activate_partition()
597 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activate_partition()
604 int idle = atomic_read(&ch->kthreads_idle); in xpc_activate_kthreads()
605 int assigned = atomic_read(&ch->kthreads_assigned); in xpc_activate_kthreads()
612 needed -= wakeup; in xpc_activate_kthreads()
615 "channel=%d\n", wakeup, ch->partid, ch->number); in xpc_activate_kthreads()
617 /* only wakeup the requested number of kthreads */ in xpc_activate_kthreads()
618 wake_up_nr(&ch->idle_wq, wakeup); in xpc_activate_kthreads()
624 if (needed + assigned > ch->kthreads_assigned_limit) { in xpc_activate_kthreads()
625 needed = ch->kthreads_assigned_limit - assigned; in xpc_activate_kthreads()
631 needed, ch->partid, ch->number); in xpc_activate_kthreads()
640 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) in xpc_kthread_waitmsgs() argument
649 !(ch->flags & XPC_C_DISCONNECTING)) { in xpc_kthread_waitmsgs()
653 if (atomic_inc_return(&ch->kthreads_idle) > in xpc_kthread_waitmsgs()
654 ch->kthreads_idle_limit) { in xpc_kthread_waitmsgs()
656 atomic_dec(&ch->kthreads_idle); in xpc_kthread_waitmsgs()
663 (void)wait_event_interruptible_exclusive(ch->idle_wq, in xpc_kthread_waitmsgs()
665 (ch->flags & XPC_C_DISCONNECTING))); in xpc_kthread_waitmsgs()
667 atomic_dec(&ch->kthreads_idle); in xpc_kthread_waitmsgs()
669 } while (!(ch->flags & XPC_C_DISCONNECTING)); in xpc_kthread_waitmsgs()
677 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_kthread_start() local
687 ch = &part->channels[ch_number]; in xpc_kthread_start()
689 if (!(ch->flags & XPC_C_DISCONNECTING)) { in xpc_kthread_start()
693 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_kthread_start()
694 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { in xpc_kthread_start()
695 ch->flags |= XPC_C_CONNECTEDCALLOUT; in xpc_kthread_start()
696 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_kthread_start()
700 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_kthread_start()
701 ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; in xpc_kthread_start()
702 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_kthread_start()
711 n_needed = n_of_deliverable_payloads(ch) - 1; in xpc_kthread_start()
712 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) in xpc_kthread_start()
716 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_kthread_start()
719 xpc_kthread_waitmsgs(part, ch); in xpc_kthread_start()
724 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_kthread_start()
725 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && in xpc_kthread_start()
726 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { in xpc_kthread_start()
727 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; in xpc_kthread_start()
728 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_kthread_start()
732 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_kthread_start()
733 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; in xpc_kthread_start()
735 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_kthread_start()
737 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && in xpc_kthread_start()
738 atomic_dec_return(&part->nchannels_engaged) == 0) { in xpc_kthread_start()
739 xpc_arch_ops.indicate_partition_disengaged(part); in xpc_kthread_start()
747 xpc_part_deref(part); in xpc_kthread_start()
768 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); in xpc_create_kthreads()
769 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_create_kthreads() local
774 while (needed-- > 0) { in xpc_create_kthreads()
782 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { in xpc_create_kthreads()
784 BUG_ON(!(ch->flags & in xpc_create_kthreads()
789 } else if (ch->flags & XPC_C_DISCONNECTING) { in xpc_create_kthreads()
792 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && in xpc_create_kthreads()
793 atomic_inc_return(&part->nchannels_engaged) == 1) { in xpc_create_kthreads()
794 xpc_arch_ops.indicate_partition_engaged(part); in xpc_create_kthreads()
796 (void)xpc_part_ref(part); in xpc_create_kthreads()
800 "xpc%02dc%d", ch->partid, ch->number); in xpc_create_kthreads()
806 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, in xpc_create_kthreads()
814 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && in xpc_create_kthreads()
815 atomic_dec_return(&part->nchannels_engaged) == 0) { in xpc_create_kthreads()
816 indicate_partition_disengaged(part); in xpc_create_kthreads()
819 xpc_part_deref(part); in xpc_create_kthreads()
821 if (atomic_read(&ch->kthreads_assigned) < in xpc_create_kthreads()
822 ch->kthreads_idle_limit) { in xpc_create_kthreads()
828 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_create_kthreads()
831 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_create_kthreads()
843 struct xpc_partition *part; in xpc_disconnect_wait() local
849 part = &xpc_partitions[partid]; in xpc_disconnect_wait()
851 if (!xpc_part_ref(part)) in xpc_disconnect_wait()
854 ch = &part->channels[ch_number]; in xpc_disconnect_wait()
856 if (!(ch->flags & XPC_C_WDISCONNECT)) { in xpc_disconnect_wait()
857 xpc_part_deref(part); in xpc_disconnect_wait()
861 wait_for_completion(&ch->wdisconnect_wait); in xpc_disconnect_wait()
863 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_disconnect_wait()
864 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); in xpc_disconnect_wait()
867 if (ch->delayed_chctl_flags) { in xpc_disconnect_wait()
868 if (part->act_state != XPC_P_AS_DEACTIVATING) { in xpc_disconnect_wait()
869 spin_lock(&part->chctl_lock); in xpc_disconnect_wait()
870 part->chctl.flags[ch->number] |= in xpc_disconnect_wait()
871 ch->delayed_chctl_flags; in xpc_disconnect_wait()
872 spin_unlock(&part->chctl_lock); in xpc_disconnect_wait()
875 ch->delayed_chctl_flags = 0; in xpc_disconnect_wait()
878 ch->flags &= ~XPC_C_WDISCONNECT; in xpc_disconnect_wait()
879 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_disconnect_wait()
882 xpc_wakeup_channel_mgr(part); in xpc_disconnect_wait()
884 xpc_part_deref(part); in xpc_disconnect_wait()
892 struct xpc_partition *part; in xpc_setup_partitions() local
899 return -ENOMEM; in xpc_setup_partitions()
911 part = &xpc_partitions[partid]; in xpc_setup_partitions()
913 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); in xpc_setup_partitions()
915 part->activate_IRQ_rcvd = 0; in xpc_setup_partitions()
916 spin_lock_init(&part->act_lock); in xpc_setup_partitions()
917 part->act_state = XPC_P_AS_INACTIVE; in xpc_setup_partitions()
918 XPC_SET_REASON(part, 0, 0); in xpc_setup_partitions()
920 timer_setup(&part->disengage_timer, in xpc_setup_partitions()
923 part->setup_state = XPC_P_SS_UNSET; in xpc_setup_partitions()
924 init_waitqueue_head(&part->teardown_wq); in xpc_setup_partitions()
925 atomic_set(&part->references, 0); in xpc_setup_partitions()
943 struct xpc_partition *part; in xpc_do_exit() local
975 part = &xpc_partitions[partid]; in xpc_do_exit()
977 if (xpc_partition_disengaged(part) && in xpc_do_exit()
978 part->act_state == XPC_P_AS_INACTIVE) { in xpc_do_exit()
984 XPC_DEACTIVATE_PARTITION(part, reason); in xpc_do_exit()
986 if (part->disengage_timeout > disengage_timeout) in xpc_do_exit()
987 disengage_timeout = part->disengage_timeout; in xpc_do_exit()
994 "%ld seconds\n", (disengage_timeout - in xpc_do_exit()
1080 struct xpc_partition *part; in xpc_die_deactivate() local
1095 part = &xpc_partitions[partid]; in xpc_die_deactivate()
1098 part->act_state != XPC_P_AS_INACTIVE) { in xpc_die_deactivate()
1099 xpc_arch_ops.request_partition_deactivation(part); in xpc_die_deactivate()
1100 xpc_arch_ops.indicate_partition_disengaged(part); in xpc_die_deactivate()
1109 * Given that one iteration through the following while-loop takes in xpc_die_deactivate()
1123 if (!keep_waiting--) { in xpc_die_deactivate()
1135 if (!wait_to_print--) { in xpc_die_deactivate()
1192 if (die_args->trapnr == X86_TRAP_DF) in xpc_system_die()
1195 if (((die_args->trapnr == X86_TRAP_MF) || in xpc_system_die()
1196 (die_args->trapnr == X86_TRAP_XF)) && in xpc_system_die()
1197 !user_mode(die_args->regs)) in xpc_system_die()
1220 dev_set_name(xpc_part, "part"); in xpc_init()
1227 ret = -ENODEV; in xpc_init()
1264 * The real work-horse behind xpc. This processes incoming in xpc_init()
1270 ret = -EBUSY; in xpc_init()
1284 /* mark this new thread as a non-starter */ in xpc_init()
1288 return -EBUSY; in xpc_init()
1332 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1336 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1340 MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "