• Home
  • Raw
  • Download

Lines Matching refs:q

260 	struct sem_queue *q, *tq;  in unmerge_queues()  local
270 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in unmerge_queues()
272 curr = &sma->sems[q->sops[0].sem_num]; in unmerge_queues()
274 list_add_tail(&q->list, &curr->pending_alter); in unmerge_queues()
625 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) in perform_atomic_semop_slow() argument
634 sops = q->sops; in perform_atomic_semop_slow()
635 nsops = q->nsops; in perform_atomic_semop_slow()
636 un = q->undo; in perform_atomic_semop_slow()
665 pid = q->pid; in perform_atomic_semop_slow()
678 q->blocking = sop; in perform_atomic_semop_slow()
698 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) in perform_atomic_semop() argument
706 sops = q->sops; in perform_atomic_semop()
707 nsops = q->nsops; in perform_atomic_semop()
708 un = q->undo; in perform_atomic_semop()
710 if (unlikely(q->dupsop)) in perform_atomic_semop()
711 return perform_atomic_semop_slow(sma, q); in perform_atomic_semop()
756 ipc_update_pid(&curr->sempid, q->pid); in perform_atomic_semop()
762 q->blocking = sop; in perform_atomic_semop()
766 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, in wake_up_sem_queue_prepare() argument
769 wake_q_add(wake_q, q->sleeper); in wake_up_sem_queue_prepare()
777 WRITE_ONCE(q->status, error); in wake_up_sem_queue_prepare()
780 static void unlink_queue(struct sem_array *sma, struct sem_queue *q) in unlink_queue() argument
782 list_del(&q->list); in unlink_queue()
783 if (q->nsops > 1) in unlink_queue()
797 static inline int check_restart(struct sem_array *sma, struct sem_queue *q) in check_restart() argument
804 if (q->nsops > 1) in check_restart()
838 struct sem_queue *q, *tmp; in wake_const_ops() local
847 list_for_each_entry_safe(q, tmp, pending_list, list) { in wake_const_ops()
848 int error = perform_atomic_semop(sma, q); in wake_const_ops()
853 unlink_queue(sma, q); in wake_const_ops()
855 wake_up_sem_queue_prepare(q, error, wake_q); in wake_const_ops()
932 struct sem_queue *q, *tmp; in update_queue() local
942 list_for_each_entry_safe(q, tmp, pending_list, list) { in update_queue()
955 error = perform_atomic_semop(sma, q); in update_queue()
961 unlink_queue(sma, q); in update_queue()
967 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); in update_queue()
968 restart = check_restart(sma, q); in update_queue()
971 wake_up_sem_queue_prepare(q, error, wake_q); in update_queue()
1053 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, in check_qop() argument
1056 struct sembuf *sop = q->blocking; in check_qop()
1091 struct sem_queue *q; in count_semcnt() local
1101 list_for_each_entry(q, l, list) { in count_semcnt()
1109 list_for_each_entry(q, &sma->pending_alter, list) { in count_semcnt()
1110 semcnt += check_qop(sma, semnum, q, count_zero); in count_semcnt()
1113 list_for_each_entry(q, &sma->pending_const, list) { in count_semcnt()
1114 semcnt += check_qop(sma, semnum, q, count_zero); in count_semcnt()
1127 struct sem_queue *q, *tq; in freeary() local
1144 list_for_each_entry_safe(q, tq, &sma->pending_const, list) { in freeary()
1145 unlink_queue(sma, q); in freeary()
1146 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1149 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in freeary()
1150 unlink_queue(sma, q); in freeary()
1151 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1155 list_for_each_entry_safe(q, tq, &sem->pending_const, list) { in freeary()
1156 unlink_queue(sma, q); in freeary()
1157 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()
1159 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { in freeary()
1160 unlink_queue(sma, q); in freeary()
1161 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); in freeary()