VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 13710

最後變更 在這個檔案從13710是 13710,由 vboxsync 提交於 16 年 前

mid point in replacing on VBOX_SLIRP_LOCK/UNLOCK macroces
resolved deadlock

  • 屬性 svn:eol-style 設為 native
檔案大小: 18.4 KB
 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
57 ip_currid = tt.tv_sec & 0xffff;
58 udp_init(pData);
59 tcp_init(pData);
60}
61
62/*
63 * Ip input routine. Checksum and byte swap header. If fragmented
64 * try to reassemble. Process options. Pass to next level.
65 */
66void
67ip_input(PNATState pData, struct mbuf *m)
68{
69 register struct ip *ip;
70 int hlen;
71#ifdef VBOX_WITH_SYNC_SLIRP
72 int rc;
73#endif
74
75 DEBUG_CALL("ip_input");
76 DEBUG_ARG("m = %lx", (long)m);
77 DEBUG_ARG("m_len = %d", m->m_len);
78
79 VBOX_SLIRP_LOCK(m->m_mutex);
80
81 ipstat.ips_total++;
82
83 if (m->m_len < sizeof (struct ip)) {
84 ipstat.ips_toosmall++;
85 VBOX_SLIRP_UNLOCK(m->m_mutex);
86 return;
87 }
88
89 ip = mtod(m, struct ip *);
90
91 if (ip->ip_v != IPVERSION) {
92 ipstat.ips_badvers++;
93 goto bad;
94 }
95
96 hlen = ip->ip_hl << 2;
97 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */
98 ipstat.ips_badhlen++; /* or packet too short */
99 goto bad;
100 }
101
102 /* keep ip header intact for ICMP reply
103 * ip->ip_sum = cksum(m, hlen);
104 * if (ip->ip_sum) {
105 */
106 if(cksum(m,hlen)) {
107 ipstat.ips_badsum++;
108 goto bad;
109 }
110
111 /*
112 * Convert fields to host representation.
113 */
114 NTOHS(ip->ip_len);
115 if (ip->ip_len < hlen) {
116 ipstat.ips_badlen++;
117 goto bad;
118 }
119 NTOHS(ip->ip_id);
120 NTOHS(ip->ip_off);
121
122 /*
123 * Check that the amount of data in the buffers
124 * is as at least much as the IP header would have us expect.
125 * Trim mbufs if longer than we expect.
126 * Drop packet if shorter than we expect.
127 */
128 if (m->m_len < ip->ip_len) {
129 ipstat.ips_tooshort++;
130 goto bad;
131 }
132 /* Should drop packet if mbuf too long? hmmm... */
133 if (m->m_len > ip->ip_len)
134 m_adj(m, ip->ip_len - m->m_len);
135
136 /* check ip_ttl for a correct ICMP reply */
137 if(ip->ip_ttl==0 || ip->ip_ttl==1) {
138 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
139 goto bad;
140 }
141
142 /*
143 * Process options and, if not destined for us,
144 * ship it on. ip_dooptions returns 1 when an
145 * error was detected (causing an icmp message
146 * to be sent and the original packet to be freed).
147 */
148/* We do no IP options */
149/* if (hlen > sizeof (struct ip) && ip_dooptions(m))
150 * goto next;
151 */
152 /*
153 * If offset or IP_MF are set, must reassemble.
154 * Otherwise, nothing need be done.
155 * (We could look in the reassembly queue to see
156 * if the packet was previously fragmented,
157 * but it's not worth the time; just let them time out.)
158 *
159 * XXX This should fail, don't fragment yet
160 */
161 if (ip->ip_off &~ IP_DF) {
162 register struct ipq_t *fp;
163 /*
164 * Look for queue of fragments
165 * of this datagram.
166 */
167 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq;
168 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
169 if (ip->ip_id == fp->ipq_id &&
170 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
171 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
172 ip->ip_p == fp->ipq_p)
173 goto found;
174 fp = 0;
175 found:
176
177 /*
178 * Adjust ip_len to not reflect header,
179 * set ip_mff if more fragments are expected,
180 * convert offset of this to bytes.
181 */
182 ip->ip_len -= hlen;
183 if (ip->ip_off & IP_MF)
184 ((struct ipasfrag *)ip)->ipf_mff |= 1;
185 else
186 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
187
188 ip->ip_off <<= 3;
189
190 /*
191 * If datagram marked as having more fragments
192 * or if this is not the first fragment,
193 * attempt reassembly; if it succeeds, proceed.
194 */
195 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) {
196 ipstat.ips_fragments++;
197 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
198 if (ip == 0) {
199 VBOX_SLIRP_UNLOCK(m->m_mutex);
200 return;
201 }
202 ipstat.ips_reassembled++;
203 VBOX_SLIRP_UNLOCK(m->m_mutex);
204 m = dtom(pData, ip);
205 VBOX_SLIRP_LOCK(m->m_mutex);
206 } else
207 if (fp)
208 ip_freef(pData, fp);
209
210 } else
211 ip->ip_len -= hlen;
212
213 /*
214 * Switch out to protocol's input routine.
215 */
216 ipstat.ips_delivered++;
217 switch (ip->ip_p) {
218 case IPPROTO_TCP:
219 tcp_input(pData, m, hlen, (struct socket *)NULL);
220 break;
221 case IPPROTO_UDP:
222 udp_input(pData, m, hlen);
223 break;
224 case IPPROTO_ICMP:
225 icmp_input(pData, m, hlen);
226 break;
227 default:
228 ipstat.ips_noproto++;
229 m_free(pData, m);
230 }
231 if (m != NULL) {
232 VBOX_SLIRP_UNLOCK(m->m_mutex);
233 }
234 return;
235bad:
236 m_freem(pData, m);
237 if (m != NULL) {
238 VBOX_SLIRP_UNLOCK(m->m_mutex);
239 }
240 return;
241}
242
243/*
244 * Take incoming datagram fragment and try to
245 * reassemble it into whole datagram. If a chain for
246 * reassembly of this datagram already exists, then it
247 * is given as fp; otherwise have to make a chain.
248 */
249struct ip *
250ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
251{
252 register struct mbuf *m = dtom(pData, ip);
253 register struct ipasfrag *q;
254 int hlen = ip->ip_hl << 2;
255 int i, next;
256
257 DEBUG_CALL("ip_reass");
258 DEBUG_ARG("ip = %lx", (long)ip);
259 DEBUG_ARG("fp = %lx", (long)fp);
260 DEBUG_ARG("m = %lx", (long)m);
261
262 /*
263 * Presence of header sizes in mbufs
264 * would confuse code below.
265 * Fragment m_data is concatenated.
266 */
267 m->m_data += hlen;
268 m->m_len -= hlen;
269
270 /*
271 * If first fragment to arrive, create a reassembly queue.
272 */
273 if (fp == 0) {
274 struct mbuf *t;
275 if ((t = m_get(pData)) == NULL) goto dropfrag;
276 fp = mtod(t, struct ipq_t *);
277 insque_32(pData, fp, &ipq);
278 fp->ipq_ttl = IPFRAGTTL;
279 fp->ipq_p = ip->ip_p;
280 fp->ipq_id = ip->ip_id;
281 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
282 fp->ipq_src = ((struct ip *)ip)->ip_src;
283 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
284 q = (struct ipasfrag *)fp;
285 goto insert;
286 }
287
288 /*
289 * Find a segment which begins after this one does.
290 */
291 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
292 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
293 if (q->ip_off > ip->ip_off)
294 break;
295
296 /*
297 * If there is a preceding segment, it may provide some of
298 * our data already. If so, drop the data from the incoming
299 * segment. If it provides all of our data, drop us.
300 */
301 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) {
302 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
303 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
304 if (i > 0) {
305 if (i >= ip->ip_len)
306 goto dropfrag;
307 m_adj(dtom(pData, ip), i);
308 ip->ip_off += i;
309 ip->ip_len -= i;
310 }
311 }
312
313 /*
314 * While we overlap succeeding segments trim them or,
315 * if they are completely covered, dequeue them.
316 */
317 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) {
318 i = (ip->ip_off + ip->ip_len) - q->ip_off;
319 if (i < q->ip_len) {
320 q->ip_len -= i;
321 q->ip_off += i;
322 m_adj(dtom(pData, q), i);
323 break;
324 }
325 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
326 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
327 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
328 }
329
330insert:
331 /*
332 * Stick new segment in its place;
333 * check for complete reassembly.
334 */
335 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
336 next = 0;
337 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
338 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) {
339 if (q->ip_off != next)
340 return (0);
341 next += q->ip_len;
342 }
343 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
344 return (0);
345
346 /*
347 * Reassembly is complete; concatenate fragments.
348 */
349 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
350 m = dtom(pData, q);
351
352 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
353 while (q != (struct ipasfrag *)fp) {
354 struct mbuf *t;
355 t = dtom(pData, q);
356 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
357 m_cat(pData, m, t);
358 }
359
360 /*
361 * Create header for new ip packet by
362 * modifying header of first packet;
363 * dequeue and discard fragment reassembly header.
364 * Make header visible.
365 */
366 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
367
368 /*
369 * If the fragments concatenated to an mbuf that's
370 * bigger than the total size of the fragment, then and
371 * m_ext buffer was alloced. But fp->ipq_next points to
372 * the old buffer (in the mbuf), so we must point ip
373 * into the new buffer.
374 */
375 if (m->m_flags & M_EXT) {
376 int delta;
377 delta = (char *)ip - m->m_dat;
378 ip = (struct ipasfrag *)(m->m_ext + delta);
379 }
380
381 /* DEBUG_ARG("ip = %lx", (long)ip);
382 * ip=(struct ipasfrag *)m->m_data; */
383
384 ip->ip_len = next;
385 ip->ipf_mff &= ~1;
386 ((struct ip *)ip)->ip_src = fp->ipq_src;
387 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
388 remque_32(pData, fp);
389 (void) m_free(pData, dtom(pData, fp));
390 m = dtom(pData, ip);
391 m->m_len += (ip->ip_hl << 2);
392 m->m_data -= (ip->ip_hl << 2);
393
394 return ((struct ip *)ip);
395
396dropfrag:
397 ipstat.ips_fragdropped++;
398 m_freem(pData, m);
399 return (0);
400}
401
402/*
403 * Free a fragment reassembly header and all
404 * associated datagrams.
405 */
406void
407ip_freef(PNATState pData, struct ipq_t *fp)
408{
409 register struct ipasfrag *q, *p;
410
411 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
412 q = p) {
413 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
414 ip_deq(pData, q);
415 m_freem(pData, dtom(pData, q));
416 }
417 remque_32(pData, fp);
418 (void) m_free(pData, dtom(pData, fp));
419}
420
421/*
422 * Put an ip fragment on a reassembly chain.
423 * Like insque, but pointers in middle of structure.
424 */
425void
426ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
427{
428 DEBUG_CALL("ip_enq");
429 DEBUG_ARG("prev = %lx", (long)prev);
430 p->ipf_prev = ptr_to_u32(pData, prev);
431 p->ipf_next = prev->ipf_next;
432 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
433 prev->ipf_next = ptr_to_u32(pData, p);
434}
435
436/*
437 * To ip_enq as remque is to insque.
438 */
439void
440ip_deq(PNATState pData, register struct ipasfrag *p)
441{
442 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
443 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
444 u32ptr_done(pData, prev->ipf_next, p);
445 prev->ipf_next = p->ipf_next;
446 next->ipf_prev = p->ipf_prev;
447}
448
449/*
450 * IP timer processing;
451 * if a timer expires on a reassembly
452 * queue, discard it.
453 */
454void
455ip_slowtimo(PNATState pData)
456{
457 register struct ipq_t *fp;
458
459 DEBUG_CALL("ip_slowtimo");
460
461 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
462 if (fp == 0)
463 return;
464
465 while (fp != &ipq) {
466 --fp->ipq_ttl;
467 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
468 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) {
469 ipstat.ips_fragtimeout++;
470 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
471 }
472 }
473}
474
475/*
476 * Do option processing on a datagram,
477 * possibly discarding it if bad options are encountered,
478 * or forwarding it if source-routed.
479 * Returns 1 if packet has been forwarded/freed,
480 * 0 if the packet should be processed further.
481 */
482
483#ifdef notdef
484
485int
486ip_dooptions(m)
487 struct mbuf *m;
488{
489 register struct ip *ip = mtod(m, struct ip *);
490 register u_char *cp;
491 register struct ip_timestamp *ipt;
492 register struct in_ifaddr *ia;
493/* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */
494 int opt, optlen, cnt, off, code, type, forward = 0;
495 struct in_addr *sin, dst;
496typedef u_int32_t n_time;
497 n_time ntime;
498
499 dst = ip->ip_dst;
500 cp = (u_char *)(ip + 1);
501 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
502 for (; cnt > 0; cnt -= optlen, cp += optlen) {
503 opt = cp[IPOPT_OPTVAL];
504 if (opt == IPOPT_EOL)
505 break;
506 if (opt == IPOPT_NOP)
507 optlen = 1;
508 else {
509 optlen = cp[IPOPT_OLEN];
510 if (optlen <= 0 || optlen > cnt) {
511 code = &cp[IPOPT_OLEN] - (u_char *)ip;
512 goto bad;
513 }
514 }
515 switch (opt) {
516
517 default:
518 break;
519
520 /*
521 * Source routing with record.
522 * Find interface with current destination address.
523 * If none on this machine then drop if strictly routed,
524 * or do nothing if loosely routed.
525 * Record interface address and bring up next address
526 * component. If strictly routed make sure next
527 * address is on directly accessible net.
528 */
529 case IPOPT_LSRR:
530 case IPOPT_SSRR:
531 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
532 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
533 goto bad;
534 }
535 ipaddr.sin_addr = ip->ip_dst;
536 ia = (struct in_ifaddr *)
537 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
538 if (ia == 0) {
539 if (opt == IPOPT_SSRR) {
540 type = ICMP_UNREACH;
541 code = ICMP_UNREACH_SRCFAIL;
542 goto bad;
543 }
544 /*
545 * Loose routing, and not at next destination
546 * yet; nothing to do except forward.
547 */
548 break;
549 }
550 off--; / * 0 origin * /
551 if (off > optlen - sizeof(struct in_addr)) {
552 /*
553 * End of source route. Should be for us.
554 */
555 save_rte(cp, ip->ip_src);
556 break;
557 }
558 /*
559 * locate outgoing interface
560 */
561 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr,
562 sizeof(ipaddr.sin_addr));
563 if (opt == IPOPT_SSRR) {
564#define INA struct in_ifaddr *
565#define SA struct sockaddr *
566 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0)
567 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
568 } else
569 ia = ip_rtaddr(ipaddr.sin_addr);
570 if (ia == 0) {
571 type = ICMP_UNREACH;
572 code = ICMP_UNREACH_SRCFAIL;
573 goto bad;
574 }
575 ip->ip_dst = ipaddr.sin_addr;
576 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
577 (caddr_t)(cp + off), sizeof(struct in_addr));
578 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
579 /*
580 * Let ip_intr's mcast routing check handle mcast pkts
581 */
582 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
583 break;
584
585 case IPOPT_RR:
586 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
587 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
588 goto bad;
589 }
590 /*
591 * If no space remains, ignore.
592 */
593 off--; * 0 origin *
594 if (off > optlen - sizeof(struct in_addr))
595 break;
596 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr,
597 sizeof(ipaddr.sin_addr));
598 /*
599 * locate outgoing interface; if we're the destination,
600 * use the incoming interface (should be same).
601 */
602 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 &&
603 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) {
604 type = ICMP_UNREACH;
605 code = ICMP_UNREACH_HOST;
606 goto bad;
607 }
608 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
609 (caddr_t)(cp + off), sizeof(struct in_addr));
610 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
611 break;
612
613 case IPOPT_TS:
614 code = cp - (u_char *)ip;
615 ipt = (struct ip_timestamp *)cp;
616 if (ipt->ipt_len < 5)
617 goto bad;
618 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
619 if (++ipt->ipt_oflw == 0)
620 goto bad;
621 break;
622 }
623 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
624 switch (ipt->ipt_flg) {
625
626 case IPOPT_TS_TSONLY:
627 break;
628
629 case IPOPT_TS_TSANDADDR:
630 if (ipt->ipt_ptr + sizeof(n_time) +
631 sizeof(struct in_addr) > ipt->ipt_len)
632 goto bad;
633 ipaddr.sin_addr = dst;
634 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr,
635 m->m_pkthdr.rcvif);
636 if (ia == 0)
637 continue;
638 bcopy((caddr_t)&IA_SIN(ia)->sin_addr,
639 (caddr_t)sin, sizeof(struct in_addr));
640 ipt->ipt_ptr += sizeof(struct in_addr);
641 break;
642
643 case IPOPT_TS_PRESPEC:
644 if (ipt->ipt_ptr + sizeof(n_time) +
645 sizeof(struct in_addr) > ipt->ipt_len)
646 goto bad;
647 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr,
648 sizeof(struct in_addr));
649 if (ifa_ifwithaddr((SA)&ipaddr) == 0)
650 continue;
651 ipt->ipt_ptr += sizeof(struct in_addr);
652 break;
653
654 default:
655 goto bad;
656 }
657 ntime = iptime();
658 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1,
659 sizeof(n_time));
660 ipt->ipt_ptr += sizeof(n_time);
661 }
662 }
663 if (forward) {
664 ip_forward(m, 1);
665 return (1);
666 }
667 }
668 }
669 return (0);
670bad:
671 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */
672
673/* Not yet */
674 icmp_error(m, type, code, 0, 0);
675
676 ipstat.ips_badoptions++;
677 return (1);
678}
679
680#endif /* notdef */
681
682/*
683 * Strip out IP options, at higher
684 * level protocol in the kernel.
685 * Second argument is buffer to which options
686 * will be moved, and return value is their length.
687 * (XXX) should be deleted; last arg currently ignored.
688 */
689void
690ip_stripoptions(m, mopt)
691 register struct mbuf *m;
692 struct mbuf *mopt;
693{
694 register int i;
695 struct ip *ip = mtod(m, struct ip *);
696 register caddr_t opts;
697 int olen;
698
699 olen = (ip->ip_hl<<2) - sizeof (struct ip);
700 opts = (caddr_t)(ip + 1);
701 i = m->m_len - (sizeof (struct ip) + olen);
702 memcpy(opts, opts + olen, (unsigned)i);
703 m->m_len -= olen;
704
705 ip->ip_hl = sizeof(struct ip) >> 2;
706}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette