• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Linux multicast routing support
2  * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
3  */
4 
5 #include <linux/rhashtable.h>
6 #include <linux/mroute_base.h>
7 
8 /* Sets everything common except 'dev', since that is done under locking */
vif_device_init(struct vif_device * v,struct net_device * dev,unsigned long rate_limit,unsigned char threshold,unsigned short flags,unsigned short get_iflink_mask)9 void vif_device_init(struct vif_device *v,
10 		     struct net_device *dev,
11 		     unsigned long rate_limit,
12 		     unsigned char threshold,
13 		     unsigned short flags,
14 		     unsigned short get_iflink_mask)
15 {
16 	v->dev = NULL;
17 	v->bytes_in = 0;
18 	v->bytes_out = 0;
19 	v->pkt_in = 0;
20 	v->pkt_out = 0;
21 	v->rate_limit = rate_limit;
22 	v->flags = flags;
23 	v->threshold = threshold;
24 	if (v->flags & get_iflink_mask)
25 		v->link = dev_get_iflink(dev);
26 	else
27 		v->link = dev->ifindex;
28 }
29 EXPORT_SYMBOL(vif_device_init);
30 
31 struct mr_table *
mr_table_alloc(struct net * net,u32 id,struct mr_table_ops * ops,void (* expire_func)(struct timer_list * t),void (* table_set)(struct mr_table * mrt,struct net * net))32 mr_table_alloc(struct net *net, u32 id,
33 	       struct mr_table_ops *ops,
34 	       void (*expire_func)(struct timer_list *t),
35 	       void (*table_set)(struct mr_table *mrt,
36 				 struct net *net))
37 {
38 	struct mr_table *mrt;
39 	int err;
40 
41 	mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
42 	if (!mrt)
43 		return ERR_PTR(-ENOMEM);
44 	mrt->id = id;
45 	write_pnet(&mrt->net, net);
46 
47 	mrt->ops = *ops;
48 	err = rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
49 	if (err) {
50 		kfree(mrt);
51 		return ERR_PTR(err);
52 	}
53 	INIT_LIST_HEAD(&mrt->mfc_cache_list);
54 	INIT_LIST_HEAD(&mrt->mfc_unres_queue);
55 
56 	timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
57 
58 	mrt->mroute_reg_vif_num = -1;
59 	table_set(mrt, net);
60 	return mrt;
61 }
62 EXPORT_SYMBOL(mr_table_alloc);
63 
mr_mfc_find_parent(struct mr_table * mrt,void * hasharg,int parent)64 void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
65 {
66 	struct rhlist_head *tmp, *list;
67 	struct mr_mfc *c;
68 
69 	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
70 	rhl_for_each_entry_rcu(c, tmp, list, mnode)
71 		if (parent == -1 || parent == c->mfc_parent)
72 			return c;
73 
74 	return NULL;
75 }
76 EXPORT_SYMBOL(mr_mfc_find_parent);
77 
mr_mfc_find_any_parent(struct mr_table * mrt,int vifi)78 void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
79 {
80 	struct rhlist_head *tmp, *list;
81 	struct mr_mfc *c;
82 
83 	list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
84 			       *mrt->ops.rht_params);
85 	rhl_for_each_entry_rcu(c, tmp, list, mnode)
86 		if (c->mfc_un.res.ttls[vifi] < 255)
87 			return c;
88 
89 	return NULL;
90 }
91 EXPORT_SYMBOL(mr_mfc_find_any_parent);
92 
mr_mfc_find_any(struct mr_table * mrt,int vifi,void * hasharg)93 void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
94 {
95 	struct rhlist_head *tmp, *list;
96 	struct mr_mfc *c, *proxy;
97 
98 	list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
99 	rhl_for_each_entry_rcu(c, tmp, list, mnode) {
100 		if (c->mfc_un.res.ttls[vifi] < 255)
101 			return c;
102 
103 		/* It's ok if the vifi is part of the static tree */
104 		proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
105 		if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
106 			return c;
107 	}
108 
109 	return mr_mfc_find_any_parent(mrt, vifi);
110 }
111 EXPORT_SYMBOL(mr_mfc_find_any);
112 
113 #ifdef CONFIG_PROC_FS
mr_vif_seq_idx(struct net * net,struct mr_vif_iter * iter,loff_t pos)114 void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
115 {
116 	struct mr_table *mrt = iter->mrt;
117 
118 	for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
119 		if (!VIF_EXISTS(mrt, iter->ct))
120 			continue;
121 		if (pos-- == 0)
122 			return &mrt->vif_table[iter->ct];
123 	}
124 	return NULL;
125 }
126 EXPORT_SYMBOL(mr_vif_seq_idx);
127 
mr_vif_seq_next(struct seq_file * seq,void * v,loff_t * pos)128 void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
129 {
130 	struct mr_vif_iter *iter = seq->private;
131 	struct net *net = seq_file_net(seq);
132 	struct mr_table *mrt = iter->mrt;
133 
134 	++*pos;
135 	if (v == SEQ_START_TOKEN)
136 		return mr_vif_seq_idx(net, iter, 0);
137 
138 	while (++iter->ct < mrt->maxvif) {
139 		if (!VIF_EXISTS(mrt, iter->ct))
140 			continue;
141 		return &mrt->vif_table[iter->ct];
142 	}
143 	return NULL;
144 }
145 EXPORT_SYMBOL(mr_vif_seq_next);
146 
mr_mfc_seq_idx(struct net * net,struct mr_mfc_iter * it,loff_t pos)147 void *mr_mfc_seq_idx(struct net *net,
148 		     struct mr_mfc_iter *it, loff_t pos)
149 {
150 	struct mr_table *mrt = it->mrt;
151 	struct mr_mfc *mfc;
152 
153 	rcu_read_lock();
154 	it->cache = &mrt->mfc_cache_list;
155 	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
156 		if (pos-- == 0)
157 			return mfc;
158 	rcu_read_unlock();
159 
160 	spin_lock_bh(it->lock);
161 	it->cache = &mrt->mfc_unres_queue;
162 	list_for_each_entry(mfc, it->cache, list)
163 		if (pos-- == 0)
164 			return mfc;
165 	spin_unlock_bh(it->lock);
166 
167 	it->cache = NULL;
168 	return NULL;
169 }
170 EXPORT_SYMBOL(mr_mfc_seq_idx);
171 
mr_mfc_seq_next(struct seq_file * seq,void * v,loff_t * pos)172 void *mr_mfc_seq_next(struct seq_file *seq, void *v,
173 		      loff_t *pos)
174 {
175 	struct mr_mfc_iter *it = seq->private;
176 	struct net *net = seq_file_net(seq);
177 	struct mr_table *mrt = it->mrt;
178 	struct mr_mfc *c = v;
179 
180 	++*pos;
181 
182 	if (v == SEQ_START_TOKEN)
183 		return mr_mfc_seq_idx(net, seq->private, 0);
184 
185 	if (c->list.next != it->cache)
186 		return list_entry(c->list.next, struct mr_mfc, list);
187 
188 	if (it->cache == &mrt->mfc_unres_queue)
189 		goto end_of_list;
190 
191 	/* exhausted cache_array, show unresolved */
192 	rcu_read_unlock();
193 	it->cache = &mrt->mfc_unres_queue;
194 
195 	spin_lock_bh(it->lock);
196 	if (!list_empty(it->cache))
197 		return list_first_entry(it->cache, struct mr_mfc, list);
198 
199 end_of_list:
200 	spin_unlock_bh(it->lock);
201 	it->cache = NULL;
202 
203 	return NULL;
204 }
205 EXPORT_SYMBOL(mr_mfc_seq_next);
206 #endif
207 
mr_fill_mroute(struct mr_table * mrt,struct sk_buff * skb,struct mr_mfc * c,struct rtmsg * rtm)208 int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
209 		   struct mr_mfc *c, struct rtmsg *rtm)
210 {
211 	struct rta_mfc_stats mfcs;
212 	struct nlattr *mp_attr;
213 	struct rtnexthop *nhp;
214 	unsigned long lastuse;
215 	int ct;
216 
217 	/* If cache is unresolved, don't try to parse IIF and OIF */
218 	if (c->mfc_parent >= MAXVIFS) {
219 		rtm->rtm_flags |= RTNH_F_UNRESOLVED;
220 		return -ENOENT;
221 	}
222 
223 	if (VIF_EXISTS(mrt, c->mfc_parent) &&
224 	    nla_put_u32(skb, RTA_IIF,
225 			mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
226 		return -EMSGSIZE;
227 
228 	if (c->mfc_flags & MFC_OFFLOAD)
229 		rtm->rtm_flags |= RTNH_F_OFFLOAD;
230 
231 	mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
232 	if (!mp_attr)
233 		return -EMSGSIZE;
234 
235 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
236 		if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
237 			struct vif_device *vif;
238 
239 			nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
240 			if (!nhp) {
241 				nla_nest_cancel(skb, mp_attr);
242 				return -EMSGSIZE;
243 			}
244 
245 			nhp->rtnh_flags = 0;
246 			nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
247 			vif = &mrt->vif_table[ct];
248 			nhp->rtnh_ifindex = vif->dev->ifindex;
249 			nhp->rtnh_len = sizeof(*nhp);
250 		}
251 	}
252 
253 	nla_nest_end(skb, mp_attr);
254 
255 	lastuse = READ_ONCE(c->mfc_un.res.lastuse);
256 	lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
257 
258 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
259 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
260 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
261 	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
262 	    nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
263 			      RTA_PAD))
264 		return -EMSGSIZE;
265 
266 	rtm->rtm_type = RTN_MULTICAST;
267 	return 1;
268 }
269 EXPORT_SYMBOL(mr_fill_mroute);
270 
mr_mfc_uses_dev(const struct mr_table * mrt,const struct mr_mfc * c,const struct net_device * dev)271 static bool mr_mfc_uses_dev(const struct mr_table *mrt,
272 			    const struct mr_mfc *c,
273 			    const struct net_device *dev)
274 {
275 	int ct;
276 
277 	for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
278 		if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
279 			const struct vif_device *vif;
280 
281 			vif = &mrt->vif_table[ct];
282 			if (vif->dev == dev)
283 				return true;
284 		}
285 	}
286 	return false;
287 }
288 
mr_table_dump(struct mr_table * mrt,struct sk_buff * skb,struct netlink_callback * cb,int (* fill)(struct mr_table * mrt,struct sk_buff * skb,u32 portid,u32 seq,struct mr_mfc * c,int cmd,int flags),spinlock_t * lock,struct fib_dump_filter * filter)289 int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
290 		  struct netlink_callback *cb,
291 		  int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
292 			      u32 portid, u32 seq, struct mr_mfc *c,
293 			      int cmd, int flags),
294 		  spinlock_t *lock, struct fib_dump_filter *filter)
295 {
296 	unsigned int e = 0, s_e = cb->args[1];
297 	unsigned int flags = NLM_F_MULTI;
298 	struct mr_mfc *mfc;
299 	int err;
300 
301 	if (filter->filter_set)
302 		flags |= NLM_F_DUMP_FILTERED;
303 
304 	list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
305 		if (e < s_e)
306 			goto next_entry;
307 		if (filter->dev &&
308 		    !mr_mfc_uses_dev(mrt, mfc, filter->dev))
309 			goto next_entry;
310 
311 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
312 			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
313 		if (err < 0)
314 			goto out;
315 next_entry:
316 		e++;
317 	}
318 
319 	spin_lock_bh(lock);
320 	list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
321 		if (e < s_e)
322 			goto next_entry2;
323 		if (filter->dev &&
324 		    !mr_mfc_uses_dev(mrt, mfc, filter->dev))
325 			goto next_entry2;
326 
327 		err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
328 			   cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
329 		if (err < 0) {
330 			spin_unlock_bh(lock);
331 			goto out;
332 		}
333 next_entry2:
334 		e++;
335 	}
336 	spin_unlock_bh(lock);
337 	err = 0;
338 out:
339 	cb->args[1] = e;
340 	return err;
341 }
342 EXPORT_SYMBOL(mr_table_dump);
343 
mr_rtm_dumproute(struct sk_buff * skb,struct netlink_callback * cb,struct mr_table * (* iter)(struct net * net,struct mr_table * mrt),int (* fill)(struct mr_table * mrt,struct sk_buff * skb,u32 portid,u32 seq,struct mr_mfc * c,int cmd,int flags),spinlock_t * lock,struct fib_dump_filter * filter)344 int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
345 		     struct mr_table *(*iter)(struct net *net,
346 					      struct mr_table *mrt),
347 		     int (*fill)(struct mr_table *mrt,
348 				 struct sk_buff *skb,
349 				 u32 portid, u32 seq, struct mr_mfc *c,
350 				 int cmd, int flags),
351 		     spinlock_t *lock, struct fib_dump_filter *filter)
352 {
353 	unsigned int t = 0, s_t = cb->args[0];
354 	struct net *net = sock_net(skb->sk);
355 	struct mr_table *mrt;
356 	int err;
357 
358 	/* multicast does not track protocol or have route type other
359 	 * than RTN_MULTICAST
360 	 */
361 	if (filter->filter_set) {
362 		if (filter->protocol || filter->flags ||
363 		    (filter->rt_type && filter->rt_type != RTN_MULTICAST))
364 			return skb->len;
365 	}
366 
367 	rcu_read_lock();
368 	for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
369 		if (t < s_t)
370 			goto next_table;
371 
372 		err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
373 		if (err < 0)
374 			break;
375 		cb->args[1] = 0;
376 next_table:
377 		t++;
378 	}
379 	rcu_read_unlock();
380 
381 	cb->args[0] = t;
382 
383 	return skb->len;
384 }
385 EXPORT_SYMBOL(mr_rtm_dumproute);
386 
mr_dump(struct net * net,struct notifier_block * nb,unsigned short family,int (* rules_dump)(struct net * net,struct notifier_block * nb,struct netlink_ext_ack * extack),struct mr_table * (* mr_iter)(struct net * net,struct mr_table * mrt),rwlock_t * mrt_lock,struct netlink_ext_ack * extack)387 int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
388 	    int (*rules_dump)(struct net *net,
389 			      struct notifier_block *nb,
390 			      struct netlink_ext_ack *extack),
391 	    struct mr_table *(*mr_iter)(struct net *net,
392 					struct mr_table *mrt),
393 	    rwlock_t *mrt_lock,
394 	    struct netlink_ext_ack *extack)
395 {
396 	struct mr_table *mrt;
397 	int err;
398 
399 	err = rules_dump(net, nb, extack);
400 	if (err)
401 		return err;
402 
403 	for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
404 		struct vif_device *v = &mrt->vif_table[0];
405 		struct mr_mfc *mfc;
406 		int vifi;
407 
408 		/* Notifiy on table VIF entries */
409 		read_lock(mrt_lock);
410 		for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
411 			if (!v->dev)
412 				continue;
413 
414 			err = mr_call_vif_notifier(nb, family,
415 						   FIB_EVENT_VIF_ADD,
416 						   v, vifi, mrt->id, extack);
417 			if (err)
418 				break;
419 		}
420 		read_unlock(mrt_lock);
421 
422 		if (err)
423 			return err;
424 
425 		/* Notify on table MFC entries */
426 		list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
427 			err = mr_call_mfc_notifier(nb, family,
428 						   FIB_EVENT_ENTRY_ADD,
429 						   mfc, mrt->id, extack);
430 			if (err)
431 				return err;
432 		}
433 	}
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL(mr_dump);
438