Lines Matching +full:foo +full:- +full:queue
13 * - object w/ a bit
14 * - free list
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
25 * - don't just push entire root set; process in place
37 * of foo to bar and vice versa. Current code chokes on that.
43 * upon the beginning and unmark non-junk ones.
61 * parents (->gc_tree).
63 * Damn. Added missing check for ->dead in listen queues scanning.
104 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { in unix_get_socket()
106 struct sock *s = sock->sk; in unix_get_socket()
109 if (s && sock->ops && sock->ops->family == PF_UNIX) in unix_get_socket()
128 if (atomic_long_inc_return(&u->inflight) == 1) { in unix_inflight()
129 BUG_ON(!list_empty(&u->link)); in unix_inflight()
130 list_add_tail(&u->link, &gc_inflight_list); in unix_inflight()
132 BUG_ON(list_empty(&u->link)); in unix_inflight()
136 user->unix_inflight++; in unix_inflight()
149 BUG_ON(!atomic_long_read(&u->inflight)); in unix_notinflight()
150 BUG_ON(list_empty(&u->link)); in unix_notinflight()
152 if (atomic_long_dec_and_test(&u->inflight)) in unix_notinflight()
153 list_del_init(&u->link); in unix_notinflight()
154 unix_tot_inflight--; in unix_notinflight()
156 user->unix_inflight--; in unix_notinflight()
166 spin_lock(&x->sk_receive_queue.lock); in scan_inflight()
167 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { in scan_inflight()
172 int nfd = UNIXCB(skb).fp->count; in scan_inflight()
173 struct file **fp = UNIXCB(skb).fp->fp; in scan_inflight()
175 while (nfd--) { in scan_inflight()
182 /* Ignore non-candidates, they could in scan_inflight()
186 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { in scan_inflight()
194 __skb_unlink(skb, &x->sk_receive_queue); in scan_inflight()
199 spin_unlock(&x->sk_receive_queue.lock); in scan_inflight()
205 if (x->sk_state != TCP_LISTEN) { in scan_children()
216 spin_lock(&x->sk_receive_queue.lock); in scan_children()
217 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { in scan_children()
218 u = unix_sk(skb->sk); in scan_children()
220 /* An embryo cannot be in-flight, so it's safe in scan_children()
223 BUG_ON(!list_empty(&u->link)); in scan_children()
224 list_add_tail(&u->link, &embryos); in scan_children()
226 spin_unlock(&x->sk_receive_queue.lock); in scan_children()
230 scan_inflight(&u->sk, func, hitlist); in scan_children()
231 list_del_init(&u->link); in scan_children()
238 atomic_long_dec(&usk->inflight); in dec_inflight()
243 atomic_long_inc(&usk->inflight); in inc_inflight()
248 atomic_long_inc(&u->inflight); in inc_inflight_move_tail()
253 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) in inc_inflight_move_tail()
254 list_move_tail(&u->link, &gc_candidates); in inc_inflight_move_tail()
287 * in-flight sockets are considered, and from those only ones in unix_gc()
298 * added to queue, so we must make sure only to touch in unix_gc()
305 total_refs = file_count(u->sk.sk_socket->file); in unix_gc()
306 inflight_refs = atomic_long_read(&u->inflight); in unix_gc()
311 list_move_tail(&u->link, &gc_candidates); in unix_gc()
312 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); in unix_gc()
313 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); in unix_gc()
317 /* Now remove all internal in-flight reference to children of in unix_gc()
321 scan_children(&u->sk, dec_inflight, NULL); in unix_gc()
335 list_move(&cursor, &u->link); in unix_gc()
337 if (atomic_long_read(&u->inflight) > 0) { in unix_gc()
338 list_move_tail(&u->link, ¬_cycle_list); in unix_gc()
339 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); in unix_gc()
340 scan_children(&u->sk, inc_inflight_move_tail, NULL); in unix_gc()
351 scan_children(&u->sk, inc_inflight, &hitlist); in unix_gc()
358 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); in unix_gc()
359 list_move_tail(&u->link, &gc_inflight_list); in unix_gc()