1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/compiler.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/jiffies.h>
28#include <linux/skbuff.h>
29#include <linux/list.h>
30#include <linux/ip.h>
31#include <linux/icmp.h>
32#include <linux/netdevice.h>
33#include <linux/jhash.h>
34#include <linux/random.h>
35#include <net/sock.h>
36#include <net/ip.h>
37#include <net/icmp.h>
38#include <net/checksum.h>
39#include <net/inetpeer.h>
40#include <net/inet_frag.h>
41#include <linux/tcp.h>
42#include <linux/udp.h>
43#include <linux/inet.h>
44#include <linux/netfilter_ipv4.h>
45
46
47
48
49
50
51static int sysctl_ipfrag_max_dist __read_mostly = 64;
52
53struct ipfrag_skb_cb
54{
55 struct inet_skb_parm h;
56 int offset;
57};
58
59#define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb))
60
61
62struct ipq {
63 struct inet_frag_queue q;
64
65 u32 user;
66 __be32 saddr;
67 __be32 daddr;
68 __be16 id;
69 u8 protocol;
70 int iif;
71 unsigned int rid;
72 struct inet_peer *peer;
73};
74
75static struct inet_frags ip4_frags;
76
77int ip_frag_nqueues(struct net *net)
78{
79 return net->ipv4.frags.nqueues;
80}
81
82int ip_frag_mem(struct net *net)
83{
84 return atomic_read(&net->ipv4.frags.mem);
85}
86
87static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
88 struct net_device *dev);
89
90struct ip4_create_arg {
91 struct iphdr *iph;
92 u32 user;
93};
94
95static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
96{
97 return jhash_3words((__force u32)id << 16 | prot,
98 (__force u32)saddr, (__force u32)daddr,
99 ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
100}
101
102static unsigned int ip4_hashfn(struct inet_frag_queue *q)
103{
104 struct ipq *ipq;
105
106 ipq = container_of(q, struct ipq, q);
107 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
108}
109
110static int ip4_frag_match(struct inet_frag_queue *q, void *a)
111{
112 struct ipq *qp;
113 struct ip4_create_arg *arg = a;
114
115 qp = container_of(q, struct ipq, q);
116 return (qp->id == arg->iph->id &&
117 qp->saddr == arg->iph->saddr &&
118 qp->daddr == arg->iph->daddr &&
119 qp->protocol == arg->iph->protocol &&
120 qp->user == arg->user);
121}
122
123
124static __inline__ void frag_kfree_skb(struct netns_frags *nf,
125 struct sk_buff *skb, int *work)
126{
127 if (work)
128 *work -= skb->truesize;
129 atomic_sub(skb->truesize, &nf->mem);
130 kfree_skb(skb);
131}
132
133static void ip4_frag_init(struct inet_frag_queue *q, void *a)
134{
135 struct ipq *qp = container_of(q, struct ipq, q);
136 struct ip4_create_arg *arg = a;
137
138 qp->protocol = arg->iph->protocol;
139 qp->id = arg->iph->id;
140 qp->saddr = arg->iph->saddr;
141 qp->daddr = arg->iph->daddr;
142 qp->user = arg->user;
143 qp->peer = sysctl_ipfrag_max_dist ?
144 inet_getpeer(arg->iph->saddr, 1) : NULL;
145}
146
147static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
148{
149 struct ipq *qp;
150
151 qp = container_of(q, struct ipq, q);
152 if (qp->peer)
153 inet_putpeer(qp->peer);
154}
155
156
157
158
159static __inline__ void ipq_put(struct ipq *ipq)
160{
161 inet_frag_put(&ipq->q, &ip4_frags);
162}
163
164
165
166
167static void ipq_kill(struct ipq *ipq)
168{
169 inet_frag_kill(&ipq->q, &ip4_frags);
170}
171
172
173
174
175static void ip_evictor(struct net *net)
176{
177 int evicted;
178
179 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
180 if (evicted)
181 IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
182}
183
184
185
186
187static void ip_expire(unsigned long arg)
188{
189 struct ipq *qp;
190 struct net *net;
191
192 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
193 net = container_of(qp->q.net, struct net, ipv4.frags);
194
195 spin_lock(&qp->q.lock);
196
197 if (qp->q.last_in & INET_FRAG_COMPLETE)
198 goto out;
199
200 ipq_kill(qp);
201
202 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments;
207
208
209 if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
210 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
211 dev_put(head->dev);
212 }
213 }
214out:
215 spin_unlock(&qp->q.lock);
216 ipq_put(qp);
217}
218
219
220
221
222static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
223{
224 struct inet_frag_queue *q;
225 struct ip4_create_arg arg;
226 unsigned int hash;
227
228 arg.iph = iph;
229 arg.user = user;
230
231 read_lock(&ip4_frags.lock);
232 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
233
234 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
235 if (q == NULL)
236 goto out_nomem;
237
238 return container_of(q, struct ipq, q);
239
240out_nomem:
241 LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
242 return NULL;
243}
244
245
246static inline int ip_frag_too_far(struct ipq *qp)
247{
248 struct inet_peer *peer = qp->peer;
249 unsigned int max = sysctl_ipfrag_max_dist;
250 unsigned int start, end;
251
252 int rc;
253
254 if (!peer || !max)
255 return 0;
256
257 start = qp->rid;
258 end = atomic_inc_return(&peer->rid);
259 qp->rid = end;
260
261 rc = qp->q.fragments && (end - start) > max;
262
263 if (rc) {
264 struct net *net;
265
266 net = container_of(qp->q.net, struct net, ipv4.frags);
267 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
268 }
269
270 return rc;
271}
272
273static int ip_frag_reinit(struct ipq *qp)
274{
275 struct sk_buff *fp;
276
277 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
278 atomic_inc(&qp->q.refcnt);
279 return -ETIMEDOUT;
280 }
281
282 fp = qp->q.fragments;
283 do {
284 struct sk_buff *xp = fp->next;
285 frag_kfree_skb(qp->q.net, fp, NULL);
286 fp = xp;
287 } while (fp);
288
289 qp->q.last_in = 0;
290 qp->q.len = 0;
291 qp->q.meat = 0;
292 qp->q.fragments = NULL;
293 qp->iif = 0;
294
295 return 0;
296}
297
298
299static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
300{
301 struct sk_buff *prev, *next;
302 struct net_device *dev;
303 int flags, offset;
304 int ihl, end;
305 int err = -ENOENT;
306
307 if (qp->q.last_in & INET_FRAG_COMPLETE)
308 goto err;
309
310 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
311 unlikely(ip_frag_too_far(qp)) &&
312 unlikely(err = ip_frag_reinit(qp))) {
313 ipq_kill(qp);
314 goto err;
315 }
316
317 offset = ntohs(ip_hdr(skb)->frag_off);
318 flags = offset & ~IP_OFFSET;
319 offset &= IP_OFFSET;
320 offset <<= 3;
321 ihl = ip_hdrlen(skb);
322
323
324 end = offset + skb->len - ihl;
325 err = -EINVAL;
326
327
328 if ((flags & IP_MF) == 0) {
329
330
331
332 if (end < qp->q.len ||
333 ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
334 goto err;
335 qp->q.last_in |= INET_FRAG_LAST_IN;
336 qp->q.len = end;
337 } else {
338 if (end&7) {
339 end &= ~7;
340 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
341 skb->ip_summed = CHECKSUM_NONE;
342 }
343 if (end > qp->q.len) {
344
345 if (qp->q.last_in & INET_FRAG_LAST_IN)
346 goto err;
347 qp->q.len = end;
348 }
349 }
350 if (end == offset)
351 goto err;
352
353 err = -ENOMEM;
354 if (pskb_pull(skb, ihl) == NULL)
355 goto err;
356
357 err = pskb_trim_rcsum(skb, end - offset);
358 if (err)
359 goto err;
360
361
362
363
364
365 prev = NULL;
366 for (next = qp->q.fragments; next != NULL; next = next->next) {
367 if (FRAG_CB(next)->offset >= offset)
368 break;
369 prev = next;
370 }
371
372
373
374
375
376 if (prev) {
377 int i = (FRAG_CB(prev)->offset + prev->len) - offset;
378
379 if (i > 0) {
380 offset += i;
381 err = -EINVAL;
382 if (end <= offset)
383 goto err;
384 err = -ENOMEM;
385 if (!pskb_pull(skb, i))
386 goto err;
387 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
388 skb->ip_summed = CHECKSUM_NONE;
389 }
390 }
391
392 err = -ENOMEM;
393
394 while (next && FRAG_CB(next)->offset < end) {
395 int i = end - FRAG_CB(next)->offset;
396
397 if (i < next->len) {
398
399
400
401 if (!pskb_pull(next, i))
402 goto err;
403 FRAG_CB(next)->offset += i;
404 qp->q.meat -= i;
405 if (next->ip_summed != CHECKSUM_UNNECESSARY)
406 next->ip_summed = CHECKSUM_NONE;
407 break;
408 } else {
409 struct sk_buff *free_it = next;
410
411
412
413
414 next = next->next;
415
416 if (prev)
417 prev->next = next;
418 else
419 qp->q.fragments = next;
420
421 qp->q.meat -= free_it->len;
422 frag_kfree_skb(qp->q.net, free_it, NULL);
423 }
424 }
425
426 FRAG_CB(skb)->offset = offset;
427
428
429 skb->next = next;
430 if (prev)
431 prev->next = skb;
432 else
433 qp->q.fragments = skb;
434
435 dev = skb->dev;
436 if (dev) {
437 qp->iif = dev->ifindex;
438 skb->dev = NULL;
439 }
440 qp->q.stamp = skb->tstamp;
441 qp->q.meat += skb->len;
442 atomic_add(skb->truesize, &qp->q.net->mem);
443 if (offset == 0)
444 qp->q.last_in |= INET_FRAG_FIRST_IN;
445
446 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
447 qp->q.meat == qp->q.len)
448 return ip_frag_reasm(qp, prev, dev);
449
450 write_lock(&ip4_frags.lock);
451 list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
452 write_unlock(&ip4_frags.lock);
453 return -EINPROGRESS;
454
455err:
456 kfree_skb(skb);
457 return err;
458}
459
460
461
462
463static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
464 struct net_device *dev)
465{
466 struct iphdr *iph;
467 struct sk_buff *fp, *head = qp->q.fragments;
468 int len;
469 int ihlen;
470 int err;
471
472 ipq_kill(qp);
473
474
475 if (prev) {
476 head = prev->next;
477 fp = skb_clone(head, GFP_ATOMIC);
478 if (!fp)
479 goto out_nomem;
480
481 fp->next = head->next;
482 prev->next = fp;
483
484 skb_morph(head, qp->q.fragments);
485 head->next = qp->q.fragments->next;
486
487 kfree_skb(qp->q.fragments);
488 qp->q.fragments = head;
489 }
490
491 WARN_ON(head == NULL);
492 WARN_ON(FRAG_CB(head)->offset != 0);
493
494
495 ihlen = ip_hdrlen(head);
496 len = ihlen + qp->q.len;
497
498 err = -E2BIG;
499 if (len > 65535)
500 goto out_oversize;
501
502
503 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
504 goto out_nomem;
505
506
507
508
509 if (skb_shinfo(head)->frag_list) {
510 struct sk_buff *clone;
511 int i, plen = 0;
512
513 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
514 goto out_nomem;
515 clone->next = head->next;
516 head->next = clone;
517 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
518 skb_shinfo(head)->frag_list = NULL;
519 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
520 plen += skb_shinfo(head)->frags[i].size;
521 clone->len = clone->data_len = head->data_len - plen;
522 head->data_len -= clone->len;
523 head->len -= clone->len;
524 clone->csum = 0;
525 clone->ip_summed = head->ip_summed;
526 atomic_add(clone->truesize, &qp->q.net->mem);
527 }
528
529 skb_shinfo(head)->frag_list = head->next;
530 skb_push(head, head->data - skb_network_header(head));
531 atomic_sub(head->truesize, &qp->q.net->mem);
532
533 for (fp=head->next; fp; fp = fp->next) {
534 head->data_len += fp->len;
535 head->len += fp->len;
536 if (head->ip_summed != fp->ip_summed)
537 head->ip_summed = CHECKSUM_NONE;
538 else if (head->ip_summed == CHECKSUM_COMPLETE)
539 head->csum = csum_add(head->csum, fp->csum);
540 head->truesize += fp->truesize;
541 atomic_sub(fp->truesize, &qp->q.net->mem);
542 }
543
544 head->next = NULL;
545 head->dev = dev;
546 head->tstamp = qp->q.stamp;
547
548 iph = ip_hdr(head);
549 iph->frag_off = 0;
550 iph->tot_len = htons(len);
551 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMOKS);
552 qp->q.fragments = NULL;
553 return 0;
554
555out_nomem:
556 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
557 "queue %p\n", qp);
558 err = -ENOMEM;
559 goto out_fail;
560out_oversize:
561 if (net_ratelimit())
562 printk(KERN_INFO
563 "Oversized IP packet from " NIPQUAD_FMT ".\n",
564 NIPQUAD(qp->saddr));
565out_fail:
566 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
567 return err;
568}
569
570
571int ip_defrag(struct sk_buff *skb, u32 user)
572{
573 struct ipq *qp;
574 struct net *net;
575
576 net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev);
577 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
578
579
580 if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
581 ip_evictor(net);
582
583
584 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
585 int ret;
586
587 spin_lock(&qp->q.lock);
588
589 ret = ip_frag_queue(qp, skb);
590
591 spin_unlock(&qp->q.lock);
592 ipq_put(qp);
593 return ret;
594 }
595
596 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
597 kfree_skb(skb);
598 return -ENOMEM;
599}
600
601#ifdef CONFIG_SYSCTL
602static int zero;
603
604static struct ctl_table ip4_frags_ns_ctl_table[] = {
605 {
606 .ctl_name = NET_IPV4_IPFRAG_HIGH_THRESH,
607 .procname = "ipfrag_high_thresh",
608 .data = &init_net.ipv4.frags.high_thresh,
609 .maxlen = sizeof(int),
610 .mode = 0644,
611 .proc_handler = &proc_dointvec
612 },
613 {
614 .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH,
615 .procname = "ipfrag_low_thresh",
616 .data = &init_net.ipv4.frags.low_thresh,
617 .maxlen = sizeof(int),
618 .mode = 0644,
619 .proc_handler = &proc_dointvec
620 },
621 {
622 .ctl_name = NET_IPV4_IPFRAG_TIME,
623 .procname = "ipfrag_time",
624 .data = &init_net.ipv4.frags.timeout,
625 .maxlen = sizeof(int),
626 .mode = 0644,
627 .proc_handler = &proc_dointvec_jiffies,
628 .strategy = &sysctl_jiffies
629 },
630 { }
631};
632
633static struct ctl_table ip4_frags_ctl_table[] = {
634 {
635 .ctl_name = NET_IPV4_IPFRAG_SECRET_INTERVAL,
636 .procname = "ipfrag_secret_interval",
637 .data = &ip4_frags.secret_interval,
638 .maxlen = sizeof(int),
639 .mode = 0644,
640 .proc_handler = &proc_dointvec_jiffies,
641 .strategy = &sysctl_jiffies
642 },
643 {
644 .procname = "ipfrag_max_dist",
645 .data = &sysctl_ipfrag_max_dist,
646 .maxlen = sizeof(int),
647 .mode = 0644,
648 .proc_handler = &proc_dointvec_minmax,
649 .extra1 = &zero
650 },
651 { }
652};
653
654static int ip4_frags_ns_ctl_register(struct net *net)
655{
656 struct ctl_table *table;
657 struct ctl_table_header *hdr;
658
659 table = ip4_frags_ns_ctl_table;
660 if (net != &init_net) {
661 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
662 if (table == NULL)
663 goto err_alloc;
664
665 table[0].data = &net->ipv4.frags.high_thresh;
666 table[1].data = &net->ipv4.frags.low_thresh;
667 table[2].data = &net->ipv4.frags.timeout;
668 }
669
670 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
671 if (hdr == NULL)
672 goto err_reg;
673
674 net->ipv4.frags_hdr = hdr;
675 return 0;
676
677err_reg:
678 if (net != &init_net)
679 kfree(table);
680err_alloc:
681 return -ENOMEM;
682}
683
684static void ip4_frags_ns_ctl_unregister(struct net *net)
685{
686 struct ctl_table *table;
687
688 table = net->ipv4.frags_hdr->ctl_table_arg;
689 unregister_net_sysctl_table(net->ipv4.frags_hdr);
690 kfree(table);
691}
692
693static void ip4_frags_ctl_register(void)
694{
695 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
696}
697#else
698static inline int ip4_frags_ns_ctl_register(struct net *net)
699{
700 return 0;
701}
702
703static inline void ip4_frags_ns_ctl_unregister(struct net *net)
704{
705}
706
707static inline void ip4_frags_ctl_register(void)
708{
709}
710#endif
711
712static int ipv4_frags_init_net(struct net *net)
713{
714
715
716
717
718
719
720 net->ipv4.frags.high_thresh = 256 * 1024;
721 net->ipv4.frags.low_thresh = 192 * 1024;
722
723
724
725
726
727 net->ipv4.frags.timeout = IP_FRAG_TIME;
728
729 inet_frags_init_net(&net->ipv4.frags);
730
731 return ip4_frags_ns_ctl_register(net);
732}
733
734static void ipv4_frags_exit_net(struct net *net)
735{
736 ip4_frags_ns_ctl_unregister(net);
737 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
738}
739
740static struct pernet_operations ip4_frags_ops = {
741 .init = ipv4_frags_init_net,
742 .exit = ipv4_frags_exit_net,
743};
744
745void __init ipfrag_init(void)
746{
747 ip4_frags_ctl_register();
748 register_pernet_subsys(&ip4_frags_ops);
749 ip4_frags.hashfn = ip4_hashfn;
750 ip4_frags.constructor = ip4_frag_init;
751 ip4_frags.destructor = ip4_frag_free;
752 ip4_frags.skb_free = NULL;
753 ip4_frags.qsize = sizeof(struct ipq);
754 ip4_frags.match = ip4_frag_match;
755 ip4_frags.frag_expire = ip_expire;
756 ip4_frags.secret_interval = 10 * 60 * HZ;
757 inet_frags_init(&ip4_frags);
758}
759
760EXPORT_SYMBOL(ip_defrag);