1 /*
2 * xt_quota2 - enhanced xt_quota that can count upwards and in packets
3 * as a minimal accounting match.
4 * by Jan Engelhardt <jengelh@medozas.de>, 2008
5 *
6 * Originally based on xt_quota.c:
7 * netfilter module to enforce network quotas
8 * Sam Johnston <samj@samj.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License; either
12 * version 2 of the License, as published by the Free Software Foundation.
13 */
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/skbuff.h>
18 #include <linux/spinlock.h>
19 #include <asm/atomic.h>
20 #include <net/netlink.h>
21
22 #include <linux/netfilter/x_tables.h>
23 #include <linux/netfilter/xt_quota2.h>
24
25 #ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
26 /* For compatibility, these definitions are copied from the
27 * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
28 #define ULOG_MAC_LEN 80
29 #define ULOG_PREFIX_LEN 32
30
31 /* Format of the ULOG packets passed through netlink */
32 typedef struct ulog_packet_msg {
33 unsigned long mark;
34 long timestamp_sec;
35 long timestamp_usec;
36 unsigned int hook;
37 char indev_name[IFNAMSIZ];
38 char outdev_name[IFNAMSIZ];
39 size_t data_len;
40 char prefix[ULOG_PREFIX_LEN];
41 unsigned char mac_len;
42 unsigned char mac[ULOG_MAC_LEN];
43 unsigned char payload[0];
44 } ulog_packet_msg_t;
45 #endif
46
47 /**
48 * @lock: lock to protect quota writers from each other
49 */
50 struct xt_quota_counter {
51 u_int64_t quota;
52 spinlock_t lock;
53 struct list_head list;
54 atomic_t ref;
55 char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
56 struct proc_dir_entry *procfs_entry;
57 };
58
59 #ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
60 /* Harald's favorite number +1 :D From ipt_ULOG.C */
61 static int qlog_nl_event = 112;
62 module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(event_num,
64 "Event number for NETLINK_NFLOG message. 0 disables log."
65 "111 is what ipt_ULOG uses.");
66 static struct sock *nflognl;
67 #endif
68
69 static LIST_HEAD(counter_list);
70 static DEFINE_SPINLOCK(counter_list_lock);
71
72 static struct proc_dir_entry *proc_xt_quota;
73 static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
74 static kuid_t quota_list_uid = KUIDT_INIT(0);
75 static kgid_t quota_list_gid = KGIDT_INIT(0);
76 module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
77
78 #ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
quota2_log(unsigned int hooknum,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,const char * prefix)79 static void quota2_log(unsigned int hooknum,
80 const struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 const char *prefix)
84 {
85 ulog_packet_msg_t *pm;
86 struct sk_buff *log_skb;
87 size_t size;
88 struct nlmsghdr *nlh;
89
90 if (!qlog_nl_event)
91 return;
92
93 size = NLMSG_SPACE(sizeof(*pm));
94 size = max(size, (size_t)NLMSG_GOODSIZE);
95 log_skb = alloc_skb(size, GFP_ATOMIC);
96 if (!log_skb) {
97 pr_err("xt_quota2: cannot alloc skb for logging\n");
98 return;
99 }
100
101 nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
102 sizeof(*pm), 0);
103 if (!nlh) {
104 pr_err("xt_quota2: nlmsg_put failed\n");
105 kfree_skb(log_skb);
106 return;
107 }
108 pm = nlmsg_data(nlh);
109 if (skb->tstamp.tv64 == 0)
110 __net_timestamp((struct sk_buff *)skb);
111 pm->data_len = 0;
112 pm->hook = hooknum;
113 if (prefix != NULL)
114 strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
115 else
116 *(pm->prefix) = '\0';
117 if (in)
118 strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
119 else
120 pm->indev_name[0] = '\0';
121
122 if (out)
123 strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
124 else
125 pm->outdev_name[0] = '\0';
126
127 NETLINK_CB(log_skb).dst_group = 1;
128 pr_debug("throwing 1 packets to netlink group 1\n");
129 netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
130 }
131 #else
quota2_log(unsigned int hooknum,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out,const char * prefix)132 static void quota2_log(unsigned int hooknum,
133 const struct sk_buff *skb,
134 const struct net_device *in,
135 const struct net_device *out,
136 const char *prefix)
137 {
138 }
139 #endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
140
quota_proc_read(struct file * file,char __user * buf,size_t size,loff_t * ppos)141 static ssize_t quota_proc_read(struct file *file, char __user *buf,
142 size_t size, loff_t *ppos)
143 {
144 struct xt_quota_counter *e = PDE_DATA(file_inode(file));
145 char tmp[24];
146 size_t tmp_size;
147
148 spin_lock_bh(&e->lock);
149 tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
150 spin_unlock_bh(&e->lock);
151 return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
152 }
153
quota_proc_write(struct file * file,const char __user * input,size_t size,loff_t * ppos)154 static ssize_t quota_proc_write(struct file *file, const char __user *input,
155 size_t size, loff_t *ppos)
156 {
157 struct xt_quota_counter *e = PDE_DATA(file_inode(file));
158 char buf[sizeof("18446744073709551616")];
159
160 if (size > sizeof(buf))
161 size = sizeof(buf);
162 if (copy_from_user(buf, input, size) != 0)
163 return -EFAULT;
164 buf[sizeof(buf)-1] = '\0';
165
166 spin_lock_bh(&e->lock);
167 e->quota = simple_strtoull(buf, NULL, 0);
168 spin_unlock_bh(&e->lock);
169 return size;
170 }
171
172 static const struct file_operations q2_counter_fops = {
173 .read = quota_proc_read,
174 .write = quota_proc_write,
175 .llseek = default_llseek,
176 };
177
178 static struct xt_quota_counter *
q2_new_counter(const struct xt_quota_mtinfo2 * q,bool anon)179 q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
180 {
181 struct xt_quota_counter *e;
182 unsigned int size;
183
184 /* Do not need all the procfs things for anonymous counters. */
185 size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
186 e = kmalloc(size, GFP_KERNEL);
187 if (e == NULL)
188 return NULL;
189
190 e->quota = q->quota;
191 spin_lock_init(&e->lock);
192 if (!anon) {
193 INIT_LIST_HEAD(&e->list);
194 atomic_set(&e->ref, 1);
195 strlcpy(e->name, q->name, sizeof(e->name));
196 }
197 return e;
198 }
199
200 /**
201 * q2_get_counter - get ref to counter or create new
202 * @name: name of counter
203 */
204 static struct xt_quota_counter *
q2_get_counter(const struct xt_quota_mtinfo2 * q)205 q2_get_counter(const struct xt_quota_mtinfo2 *q)
206 {
207 struct proc_dir_entry *p;
208 struct xt_quota_counter *e = NULL;
209 struct xt_quota_counter *new_e;
210
211 if (*q->name == '\0')
212 return q2_new_counter(q, true);
213
214 /* No need to hold a lock while getting a new counter */
215 new_e = q2_new_counter(q, false);
216 if (new_e == NULL)
217 goto out;
218
219 spin_lock_bh(&counter_list_lock);
220 list_for_each_entry(e, &counter_list, list)
221 if (strcmp(e->name, q->name) == 0) {
222 atomic_inc(&e->ref);
223 spin_unlock_bh(&counter_list_lock);
224 kfree(new_e);
225 pr_debug("xt_quota2: old counter name=%s", e->name);
226 return e;
227 }
228 e = new_e;
229 pr_debug("xt_quota2: new_counter name=%s", e->name);
230 list_add_tail(&e->list, &counter_list);
231 /* The entry having a refcount of 1 is not directly destructible.
232 * This func has not yet returned the new entry, thus iptables
233 * has not references for destroying this entry.
234 * For another rule to try to destroy it, it would 1st need for this
235 * func* to be re-invoked, acquire a new ref for the same named quota.
236 * Nobody will access the e->procfs_entry either.
237 * So release the lock. */
238 spin_unlock_bh(&counter_list_lock);
239
240 /* create_proc_entry() is not spin_lock happy */
241 p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
242 proc_xt_quota, &q2_counter_fops, e);
243
244 if (IS_ERR_OR_NULL(p)) {
245 spin_lock_bh(&counter_list_lock);
246 list_del(&e->list);
247 spin_unlock_bh(&counter_list_lock);
248 goto out;
249 }
250 proc_set_user(p, quota_list_uid, quota_list_gid);
251 return e;
252
253 out:
254 kfree(e);
255 return NULL;
256 }
257
quota_mt2_check(const struct xt_mtchk_param * par)258 static int quota_mt2_check(const struct xt_mtchk_param *par)
259 {
260 struct xt_quota_mtinfo2 *q = par->matchinfo;
261
262 pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
263
264 if (q->flags & ~XT_QUOTA_MASK)
265 return -EINVAL;
266
267 q->name[sizeof(q->name)-1] = '\0';
268 if (*q->name == '.' || strchr(q->name, '/') != NULL) {
269 printk(KERN_ERR "xt_quota.3: illegal name\n");
270 return -EINVAL;
271 }
272
273 q->master = q2_get_counter(q);
274 if (q->master == NULL) {
275 printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
276 return -ENOMEM;
277 }
278
279 return 0;
280 }
281
quota_mt2_destroy(const struct xt_mtdtor_param * par)282 static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
283 {
284 struct xt_quota_mtinfo2 *q = par->matchinfo;
285 struct xt_quota_counter *e = q->master;
286
287 if (*q->name == '\0') {
288 kfree(e);
289 return;
290 }
291
292 spin_lock_bh(&counter_list_lock);
293 if (!atomic_dec_and_test(&e->ref)) {
294 spin_unlock_bh(&counter_list_lock);
295 return;
296 }
297
298 list_del(&e->list);
299 remove_proc_entry(e->name, proc_xt_quota);
300 spin_unlock_bh(&counter_list_lock);
301 kfree(e);
302 }
303
304 static bool
quota_mt2(const struct sk_buff * skb,struct xt_action_param * par)305 quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
306 {
307 struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
308 struct xt_quota_counter *e = q->master;
309 bool ret = q->flags & XT_QUOTA_INVERT;
310
311 spin_lock_bh(&e->lock);
312 if (q->flags & XT_QUOTA_GROW) {
313 /*
314 * While no_change is pointless in "grow" mode, we will
315 * implement it here simply to have a consistent behavior.
316 */
317 if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
318 e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
319 }
320 ret = true;
321 } else {
322 if (e->quota >= skb->len) {
323 if (!(q->flags & XT_QUOTA_NO_CHANGE))
324 e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
325 ret = !ret;
326 } else {
327 /* We are transitioning, log that fact. */
328 if (e->quota) {
329 quota2_log(par->hooknum,
330 skb,
331 par->in,
332 par->out,
333 q->name);
334 }
335 /* we do not allow even small packets from now on */
336 e->quota = 0;
337 }
338 }
339 spin_unlock_bh(&e->lock);
340 return ret;
341 }
342
343 static struct xt_match quota_mt2_reg[] __read_mostly = {
344 {
345 .name = "quota2",
346 .revision = 3,
347 .family = NFPROTO_IPV4,
348 .checkentry = quota_mt2_check,
349 .match = quota_mt2,
350 .destroy = quota_mt2_destroy,
351 .matchsize = sizeof(struct xt_quota_mtinfo2),
352 .me = THIS_MODULE,
353 },
354 {
355 .name = "quota2",
356 .revision = 3,
357 .family = NFPROTO_IPV6,
358 .checkentry = quota_mt2_check,
359 .match = quota_mt2,
360 .destroy = quota_mt2_destroy,
361 .matchsize = sizeof(struct xt_quota_mtinfo2),
362 .me = THIS_MODULE,
363 },
364 };
365
quota_mt2_init(void)366 static int __init quota_mt2_init(void)
367 {
368 int ret;
369 pr_debug("xt_quota2: init()");
370
371 #ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
372 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
373 if (!nflognl)
374 return -ENOMEM;
375 #endif
376
377 proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
378 if (proc_xt_quota == NULL)
379 return -EACCES;
380
381 ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
382 if (ret < 0)
383 remove_proc_entry("xt_quota", init_net.proc_net);
384 pr_debug("xt_quota2: init() %d", ret);
385 return ret;
386 }
387
quota_mt2_exit(void)388 static void __exit quota_mt2_exit(void)
389 {
390 xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
391 remove_proc_entry("xt_quota", init_net.proc_net);
392 }
393
394 module_init(quota_mt2_init);
395 module_exit(quota_mt2_exit);
396 MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
397 MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
398 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
399 MODULE_LICENSE("GPL");
400 MODULE_ALIAS("ipt_quota2");
401 MODULE_ALIAS("ip6t_quota2");
402