Lines Matching refs:fl
60 #define for_each_fl_rcu(hash, fl) \ argument
61 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
62 fl != NULL; \
63 fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl) \ argument
65 for (fl = rcu_dereference_bh(fl->next); \
66 fl != NULL; \
67 fl = rcu_dereference_bh(fl->next))
76 struct ip6_flowlabel *fl; in __fl_lookup() local
78 for_each_fl_rcu(FL_HASH(label), fl) { in __fl_lookup()
79 if (fl->label == label && net_eq(fl->fl_net, net)) in __fl_lookup()
80 return fl; in __fl_lookup()
87 struct ip6_flowlabel *fl; in fl_lookup() local
90 fl = __fl_lookup(net, label); in fl_lookup()
91 if (fl && !atomic_inc_not_zero(&fl->users)) in fl_lookup()
92 fl = NULL; in fl_lookup()
94 return fl; in fl_lookup()
99 struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu); in fl_free_rcu() local
101 if (fl->share == IPV6_FL_S_PROCESS) in fl_free_rcu()
102 put_pid(fl->owner.pid); in fl_free_rcu()
103 kfree(fl->opt); in fl_free_rcu()
104 kfree(fl); in fl_free_rcu()
108 static void fl_free(struct ip6_flowlabel *fl) in fl_free() argument
110 if (fl) in fl_free()
111 call_rcu(&fl->rcu, fl_free_rcu); in fl_free()
114 static void fl_release(struct ip6_flowlabel *fl) in fl_release() argument
118 fl->lastuse = jiffies; in fl_release()
119 if (atomic_dec_and_test(&fl->users)) { in fl_release()
120 unsigned long ttd = fl->lastuse + fl->linger; in fl_release()
121 if (time_after(ttd, fl->expires)) in fl_release()
122 fl->expires = ttd; in fl_release()
123 ttd = fl->expires; in fl_release()
124 if (fl->opt && fl->share == IPV6_FL_S_EXCL) { in fl_release()
125 struct ipv6_txoptions *opt = fl->opt; in fl_release()
126 fl->opt = NULL; in fl_release()
145 struct ip6_flowlabel *fl; in ip6_fl_gc() local
149 while ((fl = rcu_dereference_protected(*flp, in ip6_fl_gc()
151 if (atomic_read(&fl->users) == 0) { in ip6_fl_gc()
152 unsigned long ttd = fl->lastuse + fl->linger; in ip6_fl_gc()
153 if (time_after(ttd, fl->expires)) in ip6_fl_gc()
154 fl->expires = ttd; in ip6_fl_gc()
155 ttd = fl->expires; in ip6_fl_gc()
157 *flp = fl->next; in ip6_fl_gc()
158 fl_free(fl); in ip6_fl_gc()
165 flp = &fl->next; in ip6_fl_gc()
182 struct ip6_flowlabel *fl; in ip6_fl_purge() local
186 while ((fl = rcu_dereference_protected(*flp, in ip6_fl_purge()
188 if (net_eq(fl->fl_net, net) && in ip6_fl_purge()
189 atomic_read(&fl->users) == 0) { in ip6_fl_purge()
190 *flp = fl->next; in ip6_fl_purge()
191 fl_free(fl); in ip6_fl_purge()
195 flp = &fl->next; in ip6_fl_purge()
202 struct ip6_flowlabel *fl, __be32 label) in fl_intern()
206 fl->label = label & IPV6_FLOWLABEL_MASK; in fl_intern()
211 fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK; in fl_intern()
212 if (fl->label) { in fl_intern()
213 lfl = __fl_lookup(net, fl->label); in fl_intern()
227 lfl = __fl_lookup(net, fl->label); in fl_intern()
235 fl->lastuse = jiffies; in fl_intern()
236 fl->next = fl_ht[FL_HASH(fl->label)]; in fl_intern()
237 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl); in fl_intern()
256 struct ip6_flowlabel *fl = sfl->fl; in fl6_sock_lookup() local
258 if (fl->label == label && atomic_inc_not_zero(&fl->users)) { in fl6_sock_lookup()
259 fl->lastuse = jiffies; in fl6_sock_lookup()
261 return fl; in fl6_sock_lookup()
283 fl_release(sfl->fl); in fl6_free_socklist()
301 struct ip6_flowlabel *fl, in fl6_merge_options() argument
304 struct ipv6_txoptions *fl_opt = fl->opt; in fl6_merge_options()
338 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires) in fl6_renew() argument
348 fl->lastuse = jiffies; in fl6_renew()
349 if (time_before(fl->linger, linger)) in fl6_renew()
350 fl->linger = linger; in fl6_renew()
351 if (time_before(expires, fl->linger)) in fl6_renew()
352 expires = fl->linger; in fl6_renew()
353 if (time_before(fl->expires, fl->lastuse + expires)) in fl6_renew()
354 fl->expires = fl->lastuse + expires; in fl6_renew()
364 struct ip6_flowlabel *fl = NULL; in fl_create() local
375 fl = kzalloc(sizeof(*fl), GFP_KERNEL); in fl_create()
376 if (!fl) in fl_create()
386 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); in fl_create()
387 if (!fl->opt) in fl_create()
390 memset(fl->opt, 0, sizeof(*fl->opt)); in fl_create()
391 fl->opt->tot_len = sizeof(*fl->opt) + olen; in fl_create()
393 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) in fl_create()
397 msg.msg_control = (void *)(fl->opt+1); in fl_create()
400 ipc6.opt = fl->opt; in fl_create()
405 if (fl->opt->opt_flen) in fl_create()
407 if (fl->opt->opt_nflen == 0) { in fl_create()
408 kfree(fl->opt); in fl_create()
409 fl->opt = NULL; in fl_create()
413 fl->fl_net = net; in fl_create()
414 fl->expires = jiffies; in fl_create()
415 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); in fl_create()
418 fl->share = freq->flr_share; in fl_create()
425 fl->dst = freq->flr_dst; in fl_create()
426 atomic_set(&fl->users, 1); in fl_create()
427 switch (fl->share) { in fl_create()
432 fl->owner.pid = get_task_pid(current, PIDTYPE_PID); in fl_create()
435 fl->owner.uid = current_euid(); in fl_create()
441 return fl; in fl_create()
444 fl_free(fl); in fl_create()
474 struct ip6_flowlabel *fl) in fl_link() argument
477 sfl->fl = fl; in fl_link()
502 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) { in ipv6_flowlabel_opt_get()
504 freq->flr_label = sfl->fl->label; in ipv6_flowlabel_opt_get()
505 freq->flr_dst = sfl->fl->dst; in ipv6_flowlabel_opt_get()
506 freq->flr_share = sfl->fl->share; in ipv6_flowlabel_opt_get()
507 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ; in ipv6_flowlabel_opt_get()
508 freq->flr_linger = sfl->fl->linger / HZ; in ipv6_flowlabel_opt_get()
529 struct ip6_flowlabel *fl, *fl1 = NULL; in ipv6_flowlabel_opt() local
554 if (sfl->fl->label == freq.flr_label) { in ipv6_flowlabel_opt()
559 fl_release(sfl->fl); in ipv6_flowlabel_opt()
570 if (sfl->fl->label == freq.flr_label) { in ipv6_flowlabel_opt()
571 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); in ipv6_flowlabel_opt()
580 fl = fl_lookup(net, freq.flr_label); in ipv6_flowlabel_opt()
581 if (fl) { in ipv6_flowlabel_opt()
582 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); in ipv6_flowlabel_opt()
583 fl_release(fl); in ipv6_flowlabel_opt()
611 fl = fl_create(net, sk, &freq, optval, optlen, &err); in ipv6_flowlabel_opt()
612 if (!fl) in ipv6_flowlabel_opt()
620 if (sfl->fl->label == freq.flr_label) { in ipv6_flowlabel_opt()
625 fl1 = sfl->fl; in ipv6_flowlabel_opt()
642 fl1->share != fl->share || in ipv6_flowlabel_opt()
644 (fl1->owner.pid != fl->owner.pid)) || in ipv6_flowlabel_opt()
646 !uid_eq(fl1->owner.uid, fl->owner.uid))) in ipv6_flowlabel_opt()
652 if (fl->linger > fl1->linger) in ipv6_flowlabel_opt()
653 fl1->linger = fl->linger; in ipv6_flowlabel_opt()
654 if ((long)(fl->expires - fl1->expires) > 0) in ipv6_flowlabel_opt()
655 fl1->expires = fl->expires; in ipv6_flowlabel_opt()
657 fl_free(fl); in ipv6_flowlabel_opt()
677 fl1 = fl_intern(net, fl, freq.flr_label); in ipv6_flowlabel_opt()
683 &fl->label, sizeof(fl->label))) { in ipv6_flowlabel_opt()
688 fl_link(np, sfl1, fl); in ipv6_flowlabel_opt()
696 fl_free(fl); in ipv6_flowlabel_opt()
713 struct ip6_flowlabel *fl = NULL; in ip6fl_get_first() local
718 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_first()
719 if (net_eq(fl->fl_net, net)) in ip6fl_get_first()
723 fl = NULL; in ip6fl_get_first()
725 return fl; in ip6fl_get_first()
728 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) in ip6fl_get_next() argument
733 for_each_fl_continue_rcu(fl) { in ip6fl_get_next()
734 if (net_eq(fl->fl_net, net)) in ip6fl_get_next()
740 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_next()
741 if (net_eq(fl->fl_net, net)) in ip6fl_get_next()
746 fl = NULL; in ip6fl_get_next()
749 return fl; in ip6fl_get_next()
754 struct ip6_flowlabel *fl = ip6fl_get_first(seq); in ip6fl_get_idx() local
755 if (fl) in ip6fl_get_idx()
756 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL) in ip6fl_get_idx()
758 return pos ? NULL : fl; in ip6fl_get_idx()
770 struct ip6_flowlabel *fl; in ip6fl_seq_next() local
773 fl = ip6fl_get_first(seq); in ip6fl_seq_next()
775 fl = ip6fl_get_next(seq, v); in ip6fl_seq_next()
777 return fl; in ip6fl_seq_next()
792 struct ip6_flowlabel *fl = v; in ip6fl_seq_show() local
795 (unsigned int)ntohl(fl->label), in ip6fl_seq_show()
796 fl->share, in ip6fl_seq_show()
797 ((fl->share == IPV6_FL_S_PROCESS) ? in ip6fl_seq_show()
798 pid_nr_ns(fl->owner.pid, state->pid_ns) : in ip6fl_seq_show()
799 ((fl->share == IPV6_FL_S_USER) ? in ip6fl_seq_show()
800 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : in ip6fl_seq_show()
802 atomic_read(&fl->users), in ip6fl_seq_show()
803 fl->linger/HZ, in ip6fl_seq_show()
804 (long)(fl->expires - jiffies)/HZ, in ip6fl_seq_show()
805 &fl->dst, in ip6fl_seq_show()
806 fl->opt ? fl->opt->opt_nflen : 0); in ip6fl_seq_show()