Lines Matching defs:fl

60 #define for_each_fl_rcu(hash, fl)				\
61 for (fl = rcu_dereference(fl_ht[(hash)]); \
62 fl != NULL; \
63 fl = rcu_dereference(fl->next))
64 #define for_each_fl_continue_rcu(fl) \
65 for (fl = rcu_dereference(fl->next); \
66 fl != NULL; \
67 fl = rcu_dereference(fl->next))
76 struct ip6_flowlabel *fl;
78 for_each_fl_rcu(FL_HASH(label), fl) {
79 if (fl->label == label && net_eq(fl->fl_net, net))
80 return fl;
87 struct ip6_flowlabel *fl;
90 fl = __fl_lookup(net, label);
91 if (fl && !atomic_inc_not_zero(&fl->users))
92 fl = NULL;
94 return fl;
97 static bool fl_shared_exclusive(struct ip6_flowlabel *fl)
99 return fl->share == IPV6_FL_S_EXCL ||
100 fl->share == IPV6_FL_S_PROCESS ||
101 fl->share == IPV6_FL_S_USER;
106 struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
108 if (fl->share == IPV6_FL_S_PROCESS)
109 put_pid(fl->owner.pid);
110 kfree(fl->opt);
111 kfree(fl);
115 static void fl_free(struct ip6_flowlabel *fl)
117 if (!fl)
120 if (fl_shared_exclusive(fl) || fl->opt)
123 call_rcu(&fl->rcu, fl_free_rcu);
126 static void fl_release(struct ip6_flowlabel *fl)
130 fl->lastuse = jiffies;
131 if (atomic_dec_and_test(&fl->users)) {
132 unsigned long ttd = fl->lastuse + fl->linger;
133 if (time_after(ttd, fl->expires))
134 fl->expires = ttd;
135 ttd = fl->expires;
136 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
137 struct ipv6_txoptions *opt = fl->opt;
138 fl->opt = NULL;
157 struct ip6_flowlabel *fl;
161 while ((fl = rcu_dereference_protected(*flp,
163 if (atomic_read(&fl->users) == 0) {
164 unsigned long ttd = fl->lastuse + fl->linger;
165 if (time_after(ttd, fl->expires))
166 fl->expires = ttd;
167 ttd = fl->expires;
169 *flp = fl->next;
170 fl_free(fl);
177 flp = &fl->next;
194 struct ip6_flowlabel *fl;
198 while ((fl = rcu_dereference_protected(*flp,
200 if (net_eq(fl->fl_net, net) &&
201 atomic_read(&fl->users) == 0) {
202 *flp = fl->next;
203 fl_free(fl);
207 flp = &fl->next;
214 struct ip6_flowlabel *fl, __be32 label)
218 fl->label = label & IPV6_FLOWLABEL_MASK;
224 fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK;
225 if (fl->label) {
226 lfl = __fl_lookup(net, fl->label);
240 lfl = __fl_lookup(net, fl->label);
249 fl->lastuse = jiffies;
250 fl->next = fl_ht[FL_HASH(fl->label)];
251 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
271 struct ip6_flowlabel *fl = sfl->fl;
273 if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
274 fl->lastuse = jiffies;
276 return fl;
298 fl_release(sfl->fl);
316 struct ip6_flowlabel *fl,
319 struct ipv6_txoptions *fl_opt = fl->opt;
353 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
363 fl->lastuse = jiffies;
364 if (time_before(fl->linger, linger))
365 fl->linger = linger;
366 if (time_before(expires, fl->linger))
367 expires = fl->linger;
368 if (time_before(fl->expires, fl->lastuse + expires))
369 fl->expires = fl->lastuse + expires;
379 struct ip6_flowlabel *fl = NULL;
390 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
391 if (!fl)
400 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
401 if (!fl->opt)
404 memset(fl->opt, 0, sizeof(*fl->opt));
405 fl->opt->tot_len = sizeof(*fl->opt) + olen;
407 if (copy_from_sockptr_offset(fl->opt + 1, optval,
412 msg.msg_control = (void *)(fl->opt+1);
415 ipc6.opt = fl->opt;
420 if (fl->opt->opt_flen)
422 if (fl->opt->opt_nflen == 0) {
423 kfree(fl->opt);
424 fl->opt = NULL;
428 fl->fl_net = net;
429 fl->expires = jiffies;
430 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
433 fl->share = freq->flr_share;
440 fl->dst = freq->flr_dst;
441 atomic_set(&fl->users, 1);
442 switch (fl->share) {
447 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
450 fl->owner.uid = current_euid();
456 if (fl_shared_exclusive(fl) || fl->opt) {
460 return fl;
463 if (fl) {
464 kfree(fl->opt);
465 kfree(fl);
496 struct ip6_flowlabel *fl)
499 sfl->fl = fl;
524 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
526 freq->flr_label = sfl->fl->label;
527 freq->flr_dst = sfl->fl->dst;
528 freq->flr_share = sfl->fl->share;
529 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
530 freq->flr_linger = sfl->fl->linger / HZ;
565 if (sfl->fl->label == freq->flr_label)
575 fl_release(sfl->fl);
589 if (sfl->fl->label == freq->flr_label) {
590 err = fl6_renew(sfl->fl, freq->flr_linger,
600 struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
602 if (fl) {
603 err = fl6_renew(fl, freq->flr_linger,
605 fl_release(fl);
616 struct ip6_flowlabel *fl, *fl1 = NULL;
639 fl = fl_create(net, sk, freq, optval, optlen, &err);
640 if (!fl)
649 if (sfl->fl->label == freq->flr_label) {
654 fl1 = sfl->fl;
671 fl1->share != fl->share ||
673 (fl1->owner.pid != fl->owner.pid)) ||
675 !uid_eq(fl1->owner.uid, fl->owner.uid)))
681 if (fl->linger > fl1->linger)
682 fl1->linger = fl->linger;
683 if ((long)(fl->expires - fl1->expires) > 0)
684 fl1->expires = fl->expires;
686 fl_free(fl);
706 fl1 = fl_intern(net, fl, freq->flr_label);
713 if (copy_to_sockptr_offset(optval, offset, &fl->label,
714 sizeof(fl->label))) {
719 fl_link(np, sfl1, fl);
722 fl_free(fl);
760 struct ip6_flowlabel *fl = NULL;
765 for_each_fl_rcu(state->bucket, fl) {
766 if (net_eq(fl->fl_net, net))
770 fl = NULL;
772 return fl;
775 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
780 for_each_fl_continue_rcu(fl) {
781 if (net_eq(fl->fl_net, net))
787 for_each_fl_rcu(state->bucket, fl) {
788 if (net_eq(fl->fl_net, net))
793 fl = NULL;
796 return fl;
801 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
802 if (fl)
803 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
805 return pos ? NULL : fl;
821 struct ip6_flowlabel *fl;
824 fl = ip6fl_get_first(seq);
826 fl = ip6fl_get_next(seq, v);
828 return fl;
843 struct ip6_flowlabel *fl = v;
846 (unsigned int)ntohl(fl->label),
847 fl->share,
848 ((fl->share == IPV6_FL_S_PROCESS) ?
849 pid_nr_ns(fl->owner.pid, state->pid_ns) :
850 ((fl->share == IPV6_FL_S_USER) ?
851 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
853 atomic_read(&fl->users),
854 fl->linger/HZ,
855 (long)(fl->expires - jiffies)/HZ,
856 &fl->dst,
857 fl->opt ? fl->opt->opt_nflen : 0);