VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 27843

最後變更 在這個檔案從27843是 26404,由 vboxsync 提交於 15 年 前

NAT: applied patch from xtracker 3993 (use BSD mbufs)

  • 屬性 svn:eol-style 設為 native
檔案大小: 16.3 KB
 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#include "alias.h"
48
49
50/*
51 * IP initialization: fill in IP protocol switch table.
52 * All protocols not implemented in kernel go to raw IP protocol handler.
53 */
54void
55ip_init(PNATState pData)
56{
57 int i = 0;
58 for (i = 0; i < IPREASS_NHASH; ++i)
59 TAILQ_INIT(&ipq[i]);
60 maxnipq = 100; /* ??? */
61 maxfragsperpacket = 16;
62 nipq = 0;
63 ip_currid = tt.tv_sec & 0xffff;
64 udp_init(pData);
65 tcp_init(pData);
66}
67
68static struct libalias *select_alias(PNATState pData, struct mbuf* m)
69{
70 struct libalias *la = pData->proxy_alias;
71 struct udphdr *udp = NULL;
72 struct ip *pip = NULL;
73
74#ifndef VBOX_WITH_SLIRP_BSD_MBUF
75 if (m->m_la)
76 return m->m_la;
77#else
78 struct m_tag *t;
79 if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0)
80 return (struct libalias *)&t[1];
81#endif
82
83 return la;
84}
85
86/*
87 * Ip input routine. Checksum and byte swap header. If fragmented
88 * try to reassemble. Process options. Pass to next level.
89 */
90void
91ip_input(PNATState pData, struct mbuf *m)
92{
93 register struct ip *ip;
94 int hlen = 0;
95 int mlen = 0;
96
97 STAM_PROFILE_START(&pData->StatIP_input, a);
98
99 DEBUG_CALL("ip_input");
100 DEBUG_ARG("m = %lx", (long)m);
101 ip = mtod(m, struct ip *);
102 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
103 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
104
105 ipstat.ips_total++;
106 {
107 int rc;
108 STAM_PROFILE_START(&pData->StatALIAS_input, b);
109 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m->m_len);
110 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
111 Log2(("NAT: LibAlias return %d\n", rc));
112 if (m->m_len != RT_N2H_U16(ip->ip_len))
113 m->m_len = RT_N2H_U16(ip->ip_len);
114 }
115
116 mlen = m->m_len;
117
118 if (mlen < sizeof(struct ip))
119 {
120 ipstat.ips_toosmall++;
121 STAM_PROFILE_STOP(&pData->StatIP_input, a);
122 return;
123 }
124
125 ip = mtod(m, struct ip *);
126 if (ip->ip_v != IPVERSION)
127 {
128 ipstat.ips_badvers++;
129 goto bad;
130 }
131
132 hlen = ip->ip_hl << 2;
133 if ( hlen < sizeof(struct ip)
134 || hlen > m->m_len)
135 {
136 /* min header length */
137 ipstat.ips_badhlen++; /* or packet too short */
138 goto bad;
139 }
140
141 /* keep ip header intact for ICMP reply
142 * ip->ip_sum = cksum(m, hlen);
143 * if (ip->ip_sum) {
144 */
145 if (cksum(m, hlen))
146 {
147 ipstat.ips_badsum++;
148 goto bad;
149 }
150
151 /*
152 * Convert fields to host representation.
153 */
154 NTOHS(ip->ip_len);
155 if (ip->ip_len < hlen)
156 {
157 ipstat.ips_badlen++;
158 goto bad;
159 }
160
161 NTOHS(ip->ip_id);
162 NTOHS(ip->ip_off);
163
164 /*
165 * Check that the amount of data in the buffers
166 * is as at least much as the IP header would have us expect.
167 * Trim mbufs if longer than we expect.
168 * Drop packet if shorter than we expect.
169 */
170 if (mlen < ip->ip_len)
171 {
172 ipstat.ips_tooshort++;
173 goto bad;
174 }
175
176 /* Should drop packet if mbuf too long? hmmm... */
177 if (mlen > ip->ip_len)
178 m_adj(m, ip->ip_len - m->m_len);
179
180 /* check ip_ttl for a correct ICMP reply */
181 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
182 {
183 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
184 goto bad;
185 }
186
187 ip->ip_ttl--;
188 /*
189 * If offset or IP_MF are set, must reassemble.
190 * Otherwise, nothing need be done.
191 * (We could look in the reassembly queue to see
192 * if the packet was previously fragmented,
193 * but it's not worth the time; just let them time out.)
194 *
195 */
196 if (ip->ip_off & (IP_MF | IP_OFFMASK))
197 {
198 m = ip_reass(pData, m);
199 if (m == NULL)
200 {
201 STAM_PROFILE_STOP(&pData->StatIP_input, a);
202 return;
203 }
204 ip = mtod(m, struct ip *);
205 hlen = ip->ip_hl << 2;
206 }
207 else
208 ip->ip_len -= hlen;
209
210 /*
211 * Switch out to protocol's input routine.
212 */
213 ipstat.ips_delivered++;
214 switch (ip->ip_p)
215 {
216 case IPPROTO_TCP:
217 tcp_input(pData, m, hlen, (struct socket *)NULL);
218 break;
219 case IPPROTO_UDP:
220 udp_input(pData, m, hlen);
221 break;
222 case IPPROTO_ICMP:
223 icmp_input(pData, m, hlen);
224 break;
225 default:
226 ipstat.ips_noproto++;
227 m_free(pData, m);
228 }
229 STAM_PROFILE_STOP(&pData->StatIP_input, a);
230 return;
231
232bad:
233 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
234 &ip->ip_dst, ip->ip_len));
235 m_freem(pData, m);
236 STAM_PROFILE_STOP(&pData->StatIP_input, a);
237 return;
238}
239
240struct mbuf *
241ip_reass(PNATState pData, struct mbuf* m)
242{
243 struct ip *ip;
244 struct mbuf *p, *q, *nq;
245 struct ipq_t *fp = NULL;
246 struct ipqhead *head;
247 int i, hlen, next;
248 u_short hash;
249
250 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
251 if ( maxnipq == 0
252 || maxfragsperpacket == 0)
253 {
254 ipstat.ips_fragments++;
255 ipstat.ips_fragdropped++;
256 m_freem(pData, m);
257 return (NULL);
258 }
259
260 ip = mtod(m, struct ip *);
261 hlen = ip->ip_hl << 2;
262
263 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
264 head = &ipq[hash];
265
266 /*
267 * Look for queue of fragments
268 * of this datagram.
269 */
270 TAILQ_FOREACH(fp, head, ipq_list)
271 if (ip->ip_id == fp->ipq_id &&
272 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
273 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
274 ip->ip_p == fp->ipq_p)
275 goto found;
276
277 fp = NULL;
278
279 /*
280 * Attempt to trim the number of allocated fragment queues if it
281 * exceeds the administrative limit.
282 */
283 if ((nipq > maxnipq) && (maxnipq > 0))
284 {
285 /*
286 * drop something from the tail of the current queue
287 * before proceeding further
288 */
289 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
290 if (pHead == NULL)
291 {
292 /* gak */
293 for (i = 0; i < IPREASS_NHASH; i++)
294 {
295 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
296 if (pTail)
297 {
298 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
299 ip_freef(pData, &ipq[i], pTail);
300 break;
301 }
302 }
303 }
304 else
305 {
306 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
307 ip_freef(pData, head, pHead);
308 }
309 }
310
311found:
312 /*
313 * Adjust ip_len to not reflect header,
314 * convert offset of this to bytes.
315 */
316 ip->ip_len -= hlen;
317 if (ip->ip_off & IP_MF)
318 {
319 /*
320 * Make sure that fragments have a data length
321 * that's a non-zero multiple of 8 bytes.
322 */
323 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
324 {
325 ipstat.ips_toosmall++; /* XXX */
326 goto dropfrag;
327 }
328 m->m_flags |= M_FRAG;
329 }
330 else
331 m->m_flags &= ~M_FRAG;
332 ip->ip_off <<= 3;
333
334
335 /*
336 * Attempt reassembly; if it succeeds, proceed.
337 * ip_reass() will return a different mbuf.
338 */
339 ipstat.ips_fragments++;
340
341 /* Previous ip_reass() started here. */
342 /*
343 * Presence of header sizes in mbufs
344 * would confuse code below.
345 */
346 m->m_data += hlen;
347 m->m_len -= hlen;
348
349 /*
350 * If first fragment to arrive, create a reassembly queue.
351 */
352 if (fp == NULL)
353 {
354 fp = RTMemAlloc(sizeof(struct ipq_t));
355 if (fp == NULL)
356 goto dropfrag;
357 TAILQ_INSERT_HEAD(head, fp, ipq_list);
358 nipq++;
359 fp->ipq_nfrags = 1;
360 fp->ipq_ttl = IPFRAGTTL;
361 fp->ipq_p = ip->ip_p;
362 fp->ipq_id = ip->ip_id;
363 fp->ipq_src = ip->ip_src;
364 fp->ipq_dst = ip->ip_dst;
365 fp->ipq_frags = m;
366 m->m_nextpkt = NULL;
367 goto done;
368 }
369 else
370 {
371 fp->ipq_nfrags++;
372 }
373
374#ifndef VBOX_WITH_SLIRP_BSD_MBUF
375#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
376#else
377#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
378#endif
379
380
381 /*
382 * Find a segment which begins after this one does.
383 */
384 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
385 if (GETIP(q)->ip_off > ip->ip_off)
386 break;
387
388 /*
389 * If there is a preceding segment, it may provide some of
390 * our data already. If so, drop the data from the incoming
391 * segment. If it provides all of our data, drop us, otherwise
392 * stick new segment in the proper place.
393 *
394 * If some of the data is dropped from the the preceding
395 * segment, then it's checksum is invalidated.
396 */
397 if (p)
398 {
399 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
400 if (i > 0)
401 {
402 if (i >= ip->ip_len)
403 goto dropfrag;
404 m_adj(m, i);
405 ip->ip_off += i;
406 ip->ip_len -= i;
407 }
408 m->m_nextpkt = p->m_nextpkt;
409 p->m_nextpkt = m;
410 }
411 else
412 {
413 m->m_nextpkt = fp->ipq_frags;
414 fp->ipq_frags = m;
415 }
416
417 /*
418 * While we overlap succeeding segments trim them or,
419 * if they are completely covered, dequeue them.
420 */
421 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
422 q = nq)
423 {
424 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
425 if (i < GETIP(q)->ip_len)
426 {
427 GETIP(q)->ip_len -= i;
428 GETIP(q)->ip_off += i;
429 m_adj(q, i);
430 break;
431 }
432 nq = q->m_nextpkt;
433 m->m_nextpkt = nq;
434 ipstat.ips_fragdropped++;
435 fp->ipq_nfrags--;
436 m_freem(pData, q);
437 }
438
439 /*
440 * Check for complete reassembly and perform frag per packet
441 * limiting.
442 *
443 * Frag limiting is performed here so that the nth frag has
444 * a chance to complete the packet before we drop the packet.
445 * As a result, n+1 frags are actually allowed per packet, but
446 * only n will ever be stored. (n = maxfragsperpacket.)
447 *
448 */
449 next = 0;
450 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
451 {
452 if (GETIP(q)->ip_off != next)
453 {
454 if (fp->ipq_nfrags > maxfragsperpacket)
455 {
456 ipstat.ips_fragdropped += fp->ipq_nfrags;
457 ip_freef(pData, head, fp);
458 }
459 goto done;
460 }
461 next += GETIP(q)->ip_len;
462 }
463 /* Make sure the last packet didn't have the IP_MF flag */
464 if (p->m_flags & M_FRAG)
465 {
466 if (fp->ipq_nfrags > maxfragsperpacket)
467 {
468 ipstat.ips_fragdropped += fp->ipq_nfrags;
469 ip_freef(pData, head, fp);
470 }
471 goto done;
472 }
473
474 /*
475 * Reassembly is complete. Make sure the packet is a sane size.
476 */
477 q = fp->ipq_frags;
478 ip = GETIP(q);
479 hlen = ip->ip_hl << 2;
480 if (next + hlen > IP_MAXPACKET)
481 {
482 ipstat.ips_fragdropped += fp->ipq_nfrags;
483 ip_freef(pData, head, fp);
484 goto done;
485 }
486
487 /*
488 * Concatenate fragments.
489 */
490 m = q;
491 nq = q->m_nextpkt;
492 q->m_nextpkt = NULL;
493 for (q = nq; q != NULL; q = nq)
494 {
495 nq = q->m_nextpkt;
496 q->m_nextpkt = NULL;
497 m_cat(pData, m, q);
498
499 m->m_len += hlen;
500 m->m_data -= hlen;
501 ip = mtod(m, struct ip *); /*update ip pointer */
502 hlen = ip->ip_hl << 2;
503 m->m_len -= hlen;
504 m->m_data += hlen;
505 }
506 m->m_len += hlen;
507 m->m_data -= hlen;
508
509 /*
510 * Create header for new ip packet by modifying header of first
511 * packet; dequeue and discard fragment reassembly header.
512 * Make header visible.
513 */
514
515 ip->ip_len = next;
516 ip->ip_src = fp->ipq_src;
517 ip->ip_dst = fp->ipq_dst;
518 TAILQ_REMOVE(head, fp, ipq_list);
519 nipq--;
520 RTMemFree(fp);
521
522 Assert((ip->ip_len == next));
523 /* some debugging cruft by sklower, below, will go away soon */
524#if 0
525 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
526 m_fixhdr(m);
527#endif
528 ipstat.ips_reassembled++;
529 return (m);
530
531dropfrag:
532 ipstat.ips_fragdropped++;
533 if (fp != NULL)
534 fp->ipq_nfrags--;
535 m_freem(pData, m);
536
537done:
538 return NULL;
539
540#undef GETIP
541}
542
543void
544ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
545{
546 struct mbuf *q;
547
548 while (fp->ipq_frags)
549 {
550 q = fp->ipq_frags;
551 fp->ipq_frags = q->m_nextpkt;
552 m_freem(pData, q);
553 }
554 TAILQ_REMOVE(fhp, fp, ipq_list);
555 RTMemFree(fp);
556 nipq--;
557}
558
559/*
560 * IP timer processing;
561 * if a timer expires on a reassembly
562 * queue, discard it.
563 */
564void
565ip_slowtimo(PNATState pData)
566{
567 register struct ipq_t *fp;
568
569 /* XXX: the fragment expiration is the same but requier
570 * additional loop see (see ip_input.c in FreeBSD tree)
571 */
572 int i;
573 DEBUG_CALL("ip_slowtimo");
574 for (i = 0; i < IPREASS_NHASH; i++)
575 {
576 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
577 {
578 struct ipq_t *fpp;
579
580 fpp = fp;
581 fp = TAILQ_NEXT(fp, ipq_list);
582 if(--fpp->ipq_ttl == 0)
583 {
584 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
585 ip_freef(pData, &ipq[i], fpp);
586 }
587 }
588 }
589 /*
590 * If we are over the maximum number of fragments
591 * (due to the limit being lowered), drain off
592 * enough to get down to the new limit.
593 */
594 if (maxnipq >= 0 && nipq > maxnipq)
595 {
596 for (i = 0; i < IPREASS_NHASH; i++)
597 {
598 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
599 {
600 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
601 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
602 }
603 }
604 }
605}
606
607
608/*
609 * Strip out IP options, at higher
610 * level protocol in the kernel.
611 * Second argument is buffer to which options
612 * will be moved, and return value is their length.
613 * (XXX) should be deleted; last arg currently ignored.
614 */
615void
616ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
617{
618 register int i;
619 struct ip *ip = mtod(m, struct ip *);
620 register caddr_t opts;
621 int olen;
622
623 olen = (ip->ip_hl<<2) - sizeof(struct ip);
624 opts = (caddr_t)(ip + 1);
625 i = m->m_len - (sizeof(struct ip) + olen);
626 memcpy(opts, opts + olen, (unsigned)i);
627 m->m_len -= olen;
628
629 ip->ip_hl = sizeof(struct ip) >> 2;
630}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette