VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 27843

最後變更 在這個檔案從27843是 27797,由 vboxsync 提交於 15 年 前

misc compiler warning fixes, comment typos and other minor cleanups

  • 屬性 svn:eol-style 設為 native
檔案大小: 61.5 KB
 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
34 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
50
51/* for modulo comparisons of timestamps */
52#define TSTMP_LT(a, b) ((int)((a)-(b)) < 0)
53#define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0)
54
55#ifndef TCP_ACK_HACK
56#define DELAY_ACK(tp, ti) \
57 if (ti->ti_flags & TH_PUSH) \
58 tp->t_flags |= TF_ACKNOW; \
59 else \
60 tp->t_flags |= TF_DELACK;
61#else /* !TCP_ACK_HACK */
62#define DELAY_ACK(tp, ign) \
63 tp->t_flags |= TF_DELACK;
64#endif /* TCP_ACK_HACK */
65
66
67/*
68 * deps: netinet/tcp_reass.c
69 * tcp_reass_maxqlen = 48 (deafault)
70 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
71 */
72int
73tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
74{
75 struct tseg_qent *q;
76 struct tseg_qent *p = NULL;
77 struct tseg_qent *nq;
78 struct tseg_qent *te = NULL;
79 struct socket *so = tp->t_socket;
80 int flags;
81 STAM_PROFILE_START(&pData->StatTCP_reassamble, tcp_reassamble);
82
83 /*
84 * XXX: tcp_reass() is rather inefficient with its data structures
85 * and should be rewritten (see NetBSD for optimizations). While
86 * doing that it should move to its own file tcp_reass.c.
87 */
88
89 /*
90 * Call with th==NULL after become established to
91 * force pre-ESTABLISHED data up to user socket.
92 */
93 if (th == NULL)
94 goto present;
95
96 /*
97 * Limit the number of segments in the reassembly queue to prevent
98 * holding on to too many segments (and thus running out of mbufs).
99 * Make sure to let the missing segment through which caused this
100 * queue. Always keep one global queue entry spare to be able to
101 * process the missing segment.
102 */
103 if ( th->th_seq != tp->rcv_nxt
104 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
105 || tp->t_segqlen >= tcp_reass_maxqlen))
106 {
107 tcp_reass_overflows++;
108 tcpstat.tcps_rcvmemdrop++;
109 m_freem(pData, m);
110 *tlenp = 0;
111 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
112 return (0);
113 }
114
115 /*
116 * Allocate a new queue entry. If we can't, or hit the zone limit
117 * just drop the pkt.
118 */
119 te = RTMemAlloc(sizeof(struct tseg_qent));
120 if (te == NULL)
121 {
122 tcpstat.tcps_rcvmemdrop++;
123 m_freem(pData, m);
124 *tlenp = 0;
125 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
126 return (0);
127 }
128 tp->t_segqlen++;
129 tcp_reass_qsize++;
130
131 /*
132 * Find a segment which begins after this one does.
133 */
134 LIST_FOREACH(q, &tp->t_segq, tqe_q)
135 {
136 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
137 break;
138 p = q;
139 }
140
141 /*
142 * If there is a preceding segment, it may provide some of
143 * our data already. If so, drop the data from the incoming
144 * segment. If it provides all of our data, drop us.
145 */
146 if (p != NULL)
147 {
148 int i;
149 /* conversion to int (in i) handles seq wraparound */
150 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
151 if (i > 0)
152 {
153 if (i >= *tlenp)
154 {
155 tcpstat.tcps_rcvduppack++;
156 tcpstat.tcps_rcvdupbyte += *tlenp;
157 m_freem(pData, m);
158 RTMemFree(te);
159 tp->t_segqlen--;
160 tcp_reass_qsize--;
161 /*
162 * Try to present any queued data
163 * at the left window edge to the user.
164 * This is needed after the 3-WHS
165 * completes.
166 */
167 goto present; /* ??? */
168 }
169 m_adj(m, i);
170 *tlenp -= i;
171 th->th_seq += i;
172 }
173 }
174 tcpstat.tcps_rcvoopack++;
175 tcpstat.tcps_rcvoobyte += *tlenp;
176
177 /*
178 * While we overlap succeeding segments trim them or,
179 * if they are completely covered, dequeue them.
180 */
181 while (q)
182 {
183 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
184 if (i <= 0)
185 break;
186 if (i < q->tqe_len)
187 {
188 q->tqe_th->th_seq += i;
189 q->tqe_len -= i;
190 m_adj(q->tqe_m, i);
191 break;
192 }
193
194 nq = LIST_NEXT(q, tqe_q);
195 LIST_REMOVE(q, tqe_q);
196 m_freem(pData, q->tqe_m);
197 RTMemFree(q);
198 tp->t_segqlen--;
199 tcp_reass_qsize--;
200 q = nq;
201 }
202
203 /* Insert the new segment queue entry into place. */
204 te->tqe_m = m;
205 te->tqe_th = th;
206 te->tqe_len = *tlenp;
207
208 if (p == NULL)
209 {
210 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
211 }
212 else
213 {
214 LIST_INSERT_AFTER(p, te, tqe_q);
215 }
216
217present:
218 /*
219 * Present data to user, advancing rcv_nxt through
220 * completed sequence space.
221 */
222 if (!TCPS_HAVEESTABLISHED(tp->t_state))
223 {
224 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
225 return (0);
226 }
227 q = LIST_FIRST(&tp->t_segq);
228 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
229 {
230 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
231 return (0);
232 }
233 do
234 {
235 tp->rcv_nxt += q->tqe_len;
236 flags = q->tqe_th->th_flags & TH_FIN;
237 nq = LIST_NEXT(q, tqe_q);
238 LIST_REMOVE(q, tqe_q);
239 /* XXX: This place should be checked for the same code in
240 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
241 */
242 if (so->so_state & SS_FCANTSENDMORE)
243 m_freem(pData, q->tqe_m);
244 else
245 {
246 if (so->so_emu)
247 {
248 if (tcp_emu(pData, so, q->tqe_m))
249 sbappend(pData, so, q->tqe_m);
250 }
251 else
252 sbappend(pData, so, q->tqe_m);
253 }
254 RTMemFree(q);
255 tp->t_segqlen--;
256 tcp_reass_qsize--;
257 q = nq;
258 }
259 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
260
261 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
262 return flags;
263}
264
265/*
266 * TCP input routine, follows pages 65-76 of the
267 * protocol specification dated September, 1981 very closely.
268 */
269void
270tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
271{
272 struct ip save_ip, *ip;
273 register struct tcpiphdr *ti;
274 caddr_t optp = NULL;
275 int optlen = 0;
276 int len, tlen, off;
277 register struct tcpcb *tp = 0;
278 register int tiflags;
279 struct socket *so = 0;
280 int todrop, acked, ourfinisacked, needoutput = 0;
281/* int dropsocket = 0; */
282 int iss = 0;
283 u_long tiwin;
284/* int ts_present = 0; */
285 STAM_PROFILE_START(&pData->StatTCP_input, counter_input);
286
287 DEBUG_CALL("tcp_input");
288 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
289 (long )m, iphlen, (long )inso ));
290
291 if (inso != NULL)
292 {
293 QSOCKET_LOCK(tcb);
294 SOCKET_LOCK(inso);
295 QSOCKET_UNLOCK(tcb);
296 }
297 /*
298 * If called with m == 0, then we're continuing the connect
299 */
300 if (m == NULL)
301 {
302 so = inso;
303 Log4(("NAT: tcp_input: %R[natsock]\n", so));
304 /* Re-set a few variables */
305 tp = sototcpcb(so);
306 m = so->so_m;
307
308 so->so_m = 0;
309 ti = so->so_ti;
310
311 /** @todo (vvl) clarify why it might happens */
312 if (ti == NULL)
313 {
314 LogRel(("NAT: ti is null. can't do any reseting connection actions\n"));
315 /* mbuf should be cleared in sofree called from tcp_close */
316 tcp_close(pData, tp);
317 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
318 return;
319 }
320
321 tiwin = ti->ti_win;
322 tiflags = ti->ti_flags;
323
324 goto cont_conn;
325 }
326
327 tcpstat.tcps_rcvtotal++;
328 /*
329 * Get IP and TCP header together in first mbuf.
330 * Note: IP leaves IP header in first mbuf.
331 */
332 ti = mtod(m, struct tcpiphdr *);
333 if (iphlen > sizeof(struct ip ))
334 {
335 ip_stripoptions(m, (struct mbuf *)0);
336 iphlen = sizeof(struct ip );
337 }
338 /* XXX Check if too short */
339
340
341 /*
342 * Save a copy of the IP header in case we want restore it
343 * for sending an ICMP error message in response.
344 */
345 ip = mtod(m, struct ip *);
346 save_ip = *ip;
347 save_ip.ip_len+= iphlen;
348
349 /*
350 * Checksum extended TCP header and data.
351 */
352 tlen = ((struct ip *)ti)->ip_len;
353 memset(ti->ti_x1, 0, 9);
354 ti->ti_len = RT_H2N_U16((u_int16_t)tlen);
355 len = sizeof(struct ip ) + tlen;
356 /* keep checksum for ICMP reply
357 * ti->ti_sum = cksum(m, len);
358 * if (ti->ti_sum) { */
359 if (cksum(m, len))
360 {
361 tcpstat.tcps_rcvbadsum++;
362 goto drop;
363 }
364
365 /*
366 * Check that TCP offset makes sense,
367 * pull out TCP options and adjust length. XXX
368 */
369 off = ti->ti_off << 2;
370 if ( off < sizeof (struct tcphdr)
371 || off > tlen)
372 {
373 tcpstat.tcps_rcvbadoff++;
374 goto drop;
375 }
376 tlen -= off;
377 ti->ti_len = tlen;
378 if (off > sizeof (struct tcphdr))
379 {
380 optlen = off - sizeof (struct tcphdr);
381 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
382
383 /*
384 * Do quick retrieval of timestamp options ("options
385 * prediction?"). If timestamp is the only option and it's
386 * formatted as recommended in RFC 1323 appendix A, we
387 * quickly get the values now and not bother calling
388 * tcp_dooptions(), etc.
389 */
390#if 0
391 if (( optlen == TCPOLEN_TSTAMP_APPA
392 || ( optlen > TCPOLEN_TSTAMP_APPA
393 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
394 *(u_int32_t *)optp == RT_H2N_U32_C(TCPOPT_TSTAMP_HDR) &&
395 (ti->ti_flags & TH_SYN) == 0)
396 {
397 ts_present = 1;
398 ts_val = RT_N2H_U32(*(u_int32_t *)(optp + 4));
399 ts_ecr = RT_N2H_U32(*(u_int32_t *)(optp + 8));
400 optp = NULL; / * we have parsed the options * /
401 }
402#endif
403 }
404 tiflags = ti->ti_flags;
405
406 /*
407 * Convert TCP protocol specific fields to host format.
408 */
409 NTOHL(ti->ti_seq);
410 NTOHL(ti->ti_ack);
411 NTOHS(ti->ti_win);
412 NTOHS(ti->ti_urp);
413
414 /*
415 * Drop TCP, IP headers and TCP options.
416 */
417 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
418 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
419
420 /*
421 * Locate pcb for segment.
422 */
423findso:
424 if (so != NULL && so != &tcb)
425 SOCKET_UNLOCK(so);
426 QSOCKET_LOCK(tcb);
427 so = tcp_last_so;
428 if ( so->so_fport != ti->ti_dport
429 || so->so_lport != ti->ti_sport
430 || so->so_laddr.s_addr != ti->ti_src.s_addr
431 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
432 {
433#ifdef VBOX_WITH_SLIRP_MT
434 struct socket *sonxt;
435#endif
436 QSOCKET_UNLOCK(tcb);
437 /* @todo fix SOLOOKUP macrodefinition to be usable here */
438#ifndef VBOX_WITH_SLIRP_MT
439 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
440 ti->ti_dst, ti->ti_dport);
441#else
442 so = NULL;
443 QSOCKET_FOREACH(so, sonxt, tcp)
444 /* { */
445 if ( so->so_lport == ti->ti_sport
446 && so->so_laddr.s_addr == ti->ti_src.s_addr
447 && so->so_faddr.s_addr == ti->ti_dst.s_addr
448 && so->so_fport == ti->ti_dport
449 && so->so_deleted != 1)
450 {
451 break; /* so is locked here */
452 }
453 LOOP_LABEL(tcp, so, sonxt);
454 }
455 if (so == &tcb) {
456 so = NULL;
457 }
458#endif
459 if (so)
460 {
461 tcp_last_so = so;
462 }
463 ++tcpstat.tcps_socachemiss;
464 }
465 else
466 {
467 SOCKET_LOCK(so);
468 QSOCKET_UNLOCK(tcb);
469 }
470
471 /*
472 * If the state is CLOSED (i.e., TCB does not exist) then
473 * all data in the incoming segment is discarded.
474 * If the TCB exists but is in CLOSED state, it is embryonic,
475 * but should either do a listen or a connect soon.
476 *
477 * state == CLOSED means we've done socreate() but haven't
478 * attached it to a protocol yet...
479 *
480 * XXX If a TCB does not exist, and the TH_SYN flag is
481 * the only flag set, then create a session, mark it
482 * as if it was LISTENING, and continue...
483 */
484 if (so == 0)
485 {
486 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
487 goto dropwithreset;
488
489 if ((so = socreate()) == NULL)
490 goto dropwithreset;
491 if (tcp_attach(pData, so) < 0)
492 {
493 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
494 goto dropwithreset;
495 }
496 SOCKET_LOCK(so);
497 sbreserve(pData, &so->so_snd, tcp_sndspace);
498 sbreserve(pData, &so->so_rcv, tcp_rcvspace);
499
500/* tcp_last_so = so; */ /* XXX ? */
501/* tp = sototcpcb(so); */
502
503 so->so_laddr = ti->ti_src;
504 so->so_lport = ti->ti_sport;
505 so->so_faddr = ti->ti_dst;
506 so->so_fport = ti->ti_dport;
507
508 if ((so->so_iptos = tcp_tos(so)) == 0)
509 so->so_iptos = ((struct ip *)ti)->ip_tos;
510
511 tp = sototcpcb(so);
512 tp->t_state = TCPS_LISTEN;
513 }
514
515 /*
516 * If this is a still-connecting socket, this probably
517 * a retransmit of the SYN. Whether it's a retransmit SYN
518 * or something else, we nuke it.
519 */
520 if (so->so_state & SS_ISFCONNECTING)
521 {
522 goto drop;
523 }
524
525 tp = sototcpcb(so);
526
527 /* XXX Should never fail */
528 if (tp == 0)
529 goto dropwithreset;
530 if (tp->t_state == TCPS_CLOSED)
531 {
532 goto drop;
533 }
534
535 /* Unscale the window into a 32-bit value. */
536/* if ((tiflags & TH_SYN) == 0)
537 * tiwin = ti->ti_win << tp->snd_scale;
538 * else
539 */
540 tiwin = ti->ti_win;
541
542 /*
543 * Segment received on connection.
544 * Reset idle time and keep-alive timer.
545 */
546 tp->t_idle = 0;
547 if (so_options)
548 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
549 else
550 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
551
552 /*
553 * Process options if not in LISTEN state,
554 * else do it below (after getting remote address).
555 */
556 if (optp && tp->t_state != TCPS_LISTEN)
557 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
558/* , */
559/* &ts_present, &ts_val, &ts_ecr); */
560
561 /*
562 * Header prediction: check for the two common cases
563 * of a uni-directional data xfer. If the packet has
564 * no control flags, is in-sequence, the window didn't
565 * change and we're not retransmitting, it's a
566 * candidate. If the length is zero and the ack moved
567 * forward, we're the sender side of the xfer. Just
568 * free the data acked & wake any higher level process
569 * that was blocked waiting for space. If the length
570 * is non-zero and the ack didn't move, we're the
571 * receiver side. If we're getting packets in-order
572 * (the reassembly queue is empty), add the data to
573 * the socket buffer and note that we need a delayed ack.
574 *
575 * XXX Some of these tests are not needed
576 * eg: the tiwin == tp->snd_wnd prevents many more
577 * predictions.. with no *real* advantage..
578 */
579 if ( tp->t_state == TCPS_ESTABLISHED
580 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
581/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
582 && ti->ti_seq == tp->rcv_nxt
583 && tiwin && tiwin == tp->snd_wnd
584 && tp->snd_nxt == tp->snd_max)
585 {
586 /*
587 * If last ACK falls within this segment's sequence numbers,
588 * record the timestamp.
589 */
590#if 0
591 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
592 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
593 {
594 tp->ts_recent_age = tcp_now;
595 tp->ts_recent = ts_val;
596 }
597#endif
598
599 if (ti->ti_len == 0)
600 {
601 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
602 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
603 && tp->snd_cwnd >= tp->snd_wnd)
604 {
605 /*
606 * this is a pure ack for outstanding data.
607 */
608 ++tcpstat.tcps_predack;
609#if 0
610 if (ts_present)
611 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
612 else
613#endif
614 if ( tp->t_rtt
615 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
616 tcp_xmit_timer(pData, tp, tp->t_rtt);
617 acked = ti->ti_ack - tp->snd_una;
618 tcpstat.tcps_rcvackpack++;
619 tcpstat.tcps_rcvackbyte += acked;
620 sbdrop(&so->so_snd, acked);
621 tp->snd_una = ti->ti_ack;
622 m_freem(pData, m);
623
624 /*
625 * If all outstanding data are acked, stop
626 * retransmit timer, otherwise restart timer
627 * using current (possibly backed-off) value.
628 * If process is waiting for space,
629 * wakeup/selwakeup/signal. If data
630 * are ready to send, let tcp_output
631 * decide between more output or persist.
632 */
633 if (tp->snd_una == tp->snd_max)
634 tp->t_timer[TCPT_REXMT] = 0;
635 else if (tp->t_timer[TCPT_PERSIST] == 0)
636 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
637
638 /*
639 * There's room in so_snd, sowwakup will read()
640 * from the socket if we can
641 */
642#if 0
643 if (so->so_snd.sb_flags & SB_NOTIFY)
644 sowwakeup(so);
645#endif
646 /*
647 * This is called because sowwakeup might have
648 * put data into so_snd. Since we don't so sowwakeup,
649 * we don't need this.. XXX???
650 */
651 if (so->so_snd.sb_cc)
652 (void) tcp_output(pData, tp);
653
654 SOCKET_UNLOCK(so);
655 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
656 return;
657 }
658 }
659 else if ( ti->ti_ack == tp->snd_una
660 && LIST_FIRST(&tp->t_segq)
661 && ti->ti_len <= sbspace(&so->so_rcv))
662 {
663 /*
664 * this is a pure, in-sequence data packet
665 * with nothing on the reassembly queue and
666 * we have enough buffer space to take it.
667 */
668 ++tcpstat.tcps_preddat;
669 tp->rcv_nxt += ti->ti_len;
670 tcpstat.tcps_rcvpack++;
671 tcpstat.tcps_rcvbyte += ti->ti_len;
672 /*
673 * Add data to socket buffer.
674 */
675 if (so->so_emu)
676 {
677 if (tcp_emu(pData, so, m))
678 sbappend(pData, so, m);
679 }
680 else
681 sbappend(pData, so, m);
682
683 /*
684 * XXX This is called when data arrives. Later, check
685 * if we can actually write() to the socket
686 * XXX Need to check? It's be NON_BLOCKING
687 */
688/* sorwakeup(so); */
689
690 /*
691 * If this is a short packet, then ACK now - with Nagel
692 * congestion avoidance sender won't send more until
693 * he gets an ACK.
694 *
695 * It is better to not delay acks at all to maximize
696 * TCP throughput. See RFC 2581.
697 */
698 tp->t_flags |= TF_ACKNOW;
699 tcp_output(pData, tp);
700 SOCKET_UNLOCK(so);
701 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
702 return;
703 }
704 } /* header prediction */
705 /*
706 * Calculate amount of space in receive window,
707 * and then do TCP input processing.
708 * Receive window is amount of space in rcv queue,
709 * but not less than advertised window.
710 */
711 {
712 int win;
713 win = sbspace(&so->so_rcv);
714 if (win < 0)
715 win = 0;
716 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
717 }
718
719 switch (tp->t_state)
720 {
721 /*
722 * If the state is LISTEN then ignore segment if it contains an RST.
723 * If the segment contains an ACK then it is bad and send a RST.
724 * If it does not contain a SYN then it is not interesting; drop it.
725 * Don't bother responding if the destination was a broadcast.
726 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
727 * tp->iss, and send a segment:
728 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
729 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
730 * Fill in remote peer address fields if not previously specified.
731 * Enter SYN_RECEIVED state, and process any other fields of this
732 * segment in this state.
733 */
734 case TCPS_LISTEN:
735 {
736 if (tiflags & TH_RST) {
737 goto drop;
738 }
739 if (tiflags & TH_ACK)
740 goto dropwithreset;
741 if ((tiflags & TH_SYN) == 0)
742 {
743 goto drop;
744 }
745
746 /*
747 * This has way too many gotos...
748 * But a bit of spaghetti code never hurt anybody :)
749 */
750
751 if (so->so_emu & EMU_NOCONNECT)
752 {
753 so->so_emu &= ~EMU_NOCONNECT;
754 goto cont_input;
755 }
756
757 if ( (tcp_fconnect(pData, so) == -1)
758 && errno != EINPROGRESS
759 && errno != EWOULDBLOCK)
760 {
761 u_char code = ICMP_UNREACH_NET;
762 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
763 errno, strerror(errno)));
764 if (errno == ECONNREFUSED)
765 {
766 /* ACK the SYN, send RST to refuse the connection */
767 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
768 TH_RST|TH_ACK);
769 }
770 else
771 {
772 if (errno == EHOSTUNREACH)
773 code = ICMP_UNREACH_HOST;
774 HTONL(ti->ti_seq); /* restore tcp header */
775 HTONL(ti->ti_ack);
776 HTONS(ti->ti_win);
777 HTONS(ti->ti_urp);
778 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
779 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
780 *ip = save_ip;
781 icmp_error(pData, m, ICMP_UNREACH, code, 0, strerror(errno));
782 tp->t_socket->so_m = NULL;
783 }
784 tp = tcp_close(pData, tp);
785 m_free(pData, m);
786 }
787 else
788 {
789 /*
790 * Haven't connected yet, save the current mbuf
791 * and ti, and return
792 * XXX Some OS's don't tell us whether the connect()
793 * succeeded or not. So we must time it out.
794 */
795 so->so_m = m;
796 so->so_ti = ti;
797 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
798 tp->t_state = TCPS_SYN_RECEIVED;
799 }
800 SOCKET_UNLOCK(so);
801 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
802 return;
803
804cont_conn:
805 /* m==NULL
806 * Check if the connect succeeded
807 */
808 if (so->so_state & SS_NOFDREF)
809 {
810 tp = tcp_close(pData, tp);
811 goto dropwithreset;
812 }
813cont_input:
814 tcp_template(tp);
815
816 if (optp)
817 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
818
819 if (iss)
820 tp->iss = iss;
821 else
822 tp->iss = tcp_iss;
823 tcp_iss += TCP_ISSINCR/2;
824 tp->irs = ti->ti_seq;
825 tcp_sendseqinit(tp);
826 tcp_rcvseqinit(tp);
827 tp->t_flags |= TF_ACKNOW;
828 tp->t_state = TCPS_SYN_RECEIVED;
829 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
830 tcpstat.tcps_accepts++;
831 goto trimthenstep6;
832 } /* case TCPS_LISTEN */
833
834 /*
835 * If the state is SYN_SENT:
836 * if seg contains an ACK, but not for our SYN, drop the input.
837 * if seg contains a RST, then drop the connection.
838 * if seg does not contain SYN, then drop it.
839 * Otherwise this is an acceptable SYN segment
840 * initialize tp->rcv_nxt and tp->irs
841 * if seg contains ack then advance tp->snd_una
842 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
843 * arrange for segment to be acked (eventually)
844 * continue processing rest of data/controls, beginning with URG
845 */
846 case TCPS_SYN_SENT:
847 if ( (tiflags & TH_ACK)
848 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
849 || SEQ_GT(ti->ti_ack, tp->snd_max)))
850 goto dropwithreset;
851
852 if (tiflags & TH_RST)
853 {
854 if (tiflags & TH_ACK)
855 tp = tcp_drop(pData, tp, 0); /* XXX Check t_softerror! */
856 goto drop;
857 }
858
859 if ((tiflags & TH_SYN) == 0)
860 {
861 goto drop;
862 }
863 if (tiflags & TH_ACK)
864 {
865 tp->snd_una = ti->ti_ack;
866 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
867 tp->snd_nxt = tp->snd_una;
868 }
869
870 tp->t_timer[TCPT_REXMT] = 0;
871 tp->irs = ti->ti_seq;
872 tcp_rcvseqinit(tp);
873 tp->t_flags |= TF_ACKNOW;
874 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
875 {
876 tcpstat.tcps_connects++;
877 soisfconnected(so);
878 tp->t_state = TCPS_ESTABLISHED;
879
880 /* Do window scaling on this connection? */
881#if 0
882 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
883 == (TF_RCVD_SCALE|TF_REQ_SCALE))
884 {
885 tp->snd_scale = tp->requested_s_scale;
886 tp->rcv_scale = tp->request_r_scale;
887 }
888#endif
889 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
890 /*
891 * if we didn't have to retransmit the SYN,
892 * use its rtt as our initial srtt & rtt var.
893 */
894 if (tp->t_rtt)
895 tcp_xmit_timer(pData, tp, tp->t_rtt);
896 }
897 else
898 tp->t_state = TCPS_SYN_RECEIVED;
899
900trimthenstep6:
901 /*
902 * Advance ti->ti_seq to correspond to first data byte.
903 * If data, trim to stay within window,
904 * dropping FIN if necessary.
905 */
906 ti->ti_seq++;
907 if (ti->ti_len > tp->rcv_wnd)
908 {
909 todrop = ti->ti_len - tp->rcv_wnd;
910 m_adj(m, -todrop);
911 ti->ti_len = tp->rcv_wnd;
912 tiflags &= ~TH_FIN;
913 tcpstat.tcps_rcvpackafterwin++;
914 tcpstat.tcps_rcvbyteafterwin += todrop;
915 }
916 tp->snd_wl1 = ti->ti_seq - 1;
917 tp->rcv_up = ti->ti_seq;
918 Log2(("hit6"));
919 goto step6;
920 } /* switch tp->t_state */
921 /*
922 * States other than LISTEN or SYN_SENT.
923 * First check timestamp, if present.
924 * Then check that at least some bytes of segment are within
925 * receive window. If segment begins before rcv_nxt,
926 * drop leading data (and SYN); if nothing left, just ack.
927 *
928 * RFC 1323 PAWS: If we have a timestamp reply on this segment
929 * and it's less than ts_recent, drop it.
930 */
931#if 0
932 if ( ts_present
933 && (tiflags & TH_RST) == 0
934 && tp->ts_recent
935 && TSTMP_LT(ts_val, tp->ts_recent))
936 {
937 /* Check to see if ts_recent is over 24 days old. */
938 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
939 {
940 /*
941 * Invalidate ts_recent. If this segment updates
942 * ts_recent, the age will be reset later and ts_recent
943 * will get a valid value. If it does not, setting
944 * ts_recent to zero will at least satisfy the
945 * requirement that zero be placed in the timestamp
946 * echo reply when ts_recent isn't valid. The
947 * age isn't reset until we get a valid ts_recent
948 * because we don't want out-of-order segments to be
949 * dropped when ts_recent is old.
950 */
951 tp->ts_recent = 0;
952 }
953 else
954 {
955 tcpstat.tcps_rcvduppack++;
956 tcpstat.tcps_rcvdupbyte += ti->ti_len;
957 tcpstat.tcps_pawsdrop++;
958 goto dropafterack;
959 }
960 }
961#endif
962
963 todrop = tp->rcv_nxt - ti->ti_seq;
964 if (todrop > 0)
965 {
966 if (tiflags & TH_SYN)
967 {
968 tiflags &= ~TH_SYN;
969 ti->ti_seq++;
970 if (ti->ti_urp > 1)
971 ti->ti_urp--;
972 else
973 tiflags &= ~TH_URG;
974 todrop--;
975 }
976 /*
977 * Following if statement from Stevens, vol. 2, p. 960.
978 */
979 if ( todrop > ti->ti_len
980 || ( todrop == ti->ti_len
981 && (tiflags & TH_FIN) == 0))
982 {
983 /*
984 * Any valid FIN must be to the left of the window.
985 * At this point the FIN must be a duplicate or out
986 * of sequence; drop it.
987 */
988 tiflags &= ~TH_FIN;
989
990 /*
991 * Send an ACK to resynchronize and drop any data.
992 * But keep on processing for RST or ACK.
993 */
994 tp->t_flags |= TF_ACKNOW;
995 todrop = ti->ti_len;
996 tcpstat.tcps_rcvduppack++;
997 tcpstat.tcps_rcvdupbyte += todrop;
998 }
999 else
1000 {
1001 tcpstat.tcps_rcvpartduppack++;
1002 tcpstat.tcps_rcvpartdupbyte += todrop;
1003 }
1004 m_adj(m, todrop);
1005 ti->ti_seq += todrop;
1006 ti->ti_len -= todrop;
1007 if (ti->ti_urp > todrop)
1008 ti->ti_urp -= todrop;
1009 else
1010 {
1011 tiflags &= ~TH_URG;
1012 ti->ti_urp = 0;
1013 }
1014 }
1015 /*
1016 * If new data are received on a connection after the
1017 * user processes are gone, then RST the other end.
1018 */
1019 if ( (so->so_state & SS_NOFDREF)
1020 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
1021 {
1022 tp = tcp_close(pData, tp);
1023 tcpstat.tcps_rcvafterclose++;
1024 goto dropwithreset;
1025 }
1026
1027 /*
1028 * If segment ends after window, drop trailing data
1029 * (and PUSH and FIN); if nothing left, just ACK.
1030 */
1031 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1032 if (todrop > 0)
1033 {
1034 tcpstat.tcps_rcvpackafterwin++;
1035 if (todrop >= ti->ti_len)
1036 {
1037 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1038 /*
1039 * If a new connection request is received
1040 * while in TIME_WAIT, drop the old connection
1041 * and start over if the sequence numbers
1042 * are above the previous ones.
1043 */
1044 if ( tiflags & TH_SYN
1045 && tp->t_state == TCPS_TIME_WAIT
1046 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
1047 {
1048 iss = tp->rcv_nxt + TCP_ISSINCR;
1049 tp = tcp_close(pData, tp);
1050 SOCKET_UNLOCK(tp->t_socket);
1051 goto findso;
1052 }
1053 /*
1054 * If window is closed can only take segments at
1055 * window edge, and have to drop data and PUSH from
1056 * incoming segments. Continue processing, but
1057 * remember to ack. Otherwise, drop segment
1058 * and ack.
1059 */
1060 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
1061 {
1062 tp->t_flags |= TF_ACKNOW;
1063 tcpstat.tcps_rcvwinprobe++;
1064 }
1065 else
1066 goto dropafterack;
1067 }
1068 else
1069 tcpstat.tcps_rcvbyteafterwin += todrop;
1070 m_adj(m, -todrop);
1071 ti->ti_len -= todrop;
1072 tiflags &= ~(TH_PUSH|TH_FIN);
1073 }
1074
1075 /*
1076 * If last ACK falls within this segment's sequence numbers,
1077 * record its timestamp.
1078 */
1079#if 0
1080 if ( ts_present
1081 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1082 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1083 {
1084 tp->ts_recent_age = tcp_now;
1085 tp->ts_recent = ts_val;
1086 }
1087#endif
1088
1089 /*
1090 * If the RST bit is set examine the state:
1091 * SYN_RECEIVED STATE:
1092 * If passive open, return to LISTEN state.
1093 * If active open, inform user that connection was refused.
1094 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1095 * Inform user that connection was reset, and close tcb.
1096 * CLOSING, LAST_ACK, TIME_WAIT STATES
1097 * Close the tcb.
1098 */
1099 if (tiflags&TH_RST)
1100 switch (tp->t_state)
1101 {
1102 case TCPS_SYN_RECEIVED:
1103/* so->so_error = ECONNREFUSED; */
1104 goto close;
1105
1106 case TCPS_ESTABLISHED:
1107 case TCPS_FIN_WAIT_1:
1108 case TCPS_FIN_WAIT_2:
1109 case TCPS_CLOSE_WAIT:
1110/* so->so_error = ECONNRESET; */
1111close:
1112 tp->t_state = TCPS_CLOSED;
1113 tcpstat.tcps_drops++;
1114 tp = tcp_close(pData, tp);
1115 goto drop;
1116
1117 case TCPS_CLOSING:
1118 case TCPS_LAST_ACK:
1119 case TCPS_TIME_WAIT:
1120 tp = tcp_close(pData, tp);
1121 goto drop;
1122 }
1123
1124 /*
1125 * If a SYN is in the window, then this is an
1126 * error and we send an RST and drop the connection.
1127 */
1128 if (tiflags & TH_SYN)
1129 {
1130 tp = tcp_drop(pData, tp, 0);
1131 goto dropwithreset;
1132 }
1133
1134 /*
1135 * If the ACK bit is off we drop the segment and return.
1136 */
1137 if ((tiflags & TH_ACK) == 0)
1138 {
1139 goto drop;
1140 }
1141
1142 /*
1143 * Ack processing.
1144 */
1145 switch (tp->t_state)
1146 {
1147 /*
1148 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1149 * ESTABLISHED state and continue processing, otherwise
1150 * send an RST. una<=ack<=max
1151 */
1152 case TCPS_SYN_RECEIVED:
1153 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1154 || SEQ_GT(ti->ti_ack, tp->snd_max))
1155 goto dropwithreset;
1156 tcpstat.tcps_connects++;
1157 tp->t_state = TCPS_ESTABLISHED;
1158 /*
1159 * The sent SYN is ack'ed with our sequence number +1
1160 * The first data byte already in the buffer will get
1161 * lost if no correction is made. This is only needed for
1162 * SS_CTL since the buffer is empty otherwise.
1163 * tp->snd_una++; or:
1164 */
1165 tp->snd_una = ti->ti_ack;
1166 soisfconnected(so);
1167
1168 /* Do window scaling? */
1169#if 0
1170 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1171 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1172 {
1173 tp->snd_scale = tp->requested_s_scale;
1174 tp->rcv_scale = tp->request_r_scale;
1175 }
1176#endif
1177 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1178 tp->snd_wl1 = ti->ti_seq - 1;
1179 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1180 goto synrx_to_est;
1181 /* fall into ... */
1182
1183 /*
1184 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1185 * ACKs. If the ack is in the range
1186 * tp->snd_una < ti->ti_ack <= tp->snd_max
1187 * then advance tp->snd_una to ti->ti_ack and drop
1188 * data from the retransmission queue. If this ACK reflects
1189 * more up to date window information we update our window information.
1190 */
1191 case TCPS_ESTABLISHED:
1192 case TCPS_FIN_WAIT_1:
1193 case TCPS_FIN_WAIT_2:
1194 case TCPS_CLOSE_WAIT:
1195 case TCPS_CLOSING:
1196 case TCPS_LAST_ACK:
1197 case TCPS_TIME_WAIT:
1198 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1199 {
1200 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1201 {
1202 tcpstat.tcps_rcvdupack++;
1203 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
1204 (long )m, (long )so));
1205 /*
1206 * If we have outstanding data (other than
1207 * a window probe), this is a completely
1208 * duplicate ack (ie, window info didn't
1209 * change), the ack is the biggest we've
1210 * seen and we've seen exactly our rexmt
1211 * threshold of them, assume a packet
1212 * has been dropped and retransmit it.
1213 * Kludge snd_nxt & the congestion
1214 * window so we send only this one
1215 * packet.
1216 *
1217 * We know we're losing at the current
1218 * window size so do congestion avoidance
1219 * (set ssthresh to half the current window
1220 * and pull our congestion window back to
1221 * the new ssthresh).
1222 *
1223 * Dup acks mean that packets have left the
1224 * network (they're now cached at the receiver)
1225 * so bump cwnd by the amount in the receiver
1226 * to keep a constant cwnd packets in the
1227 * network.
1228 */
1229 if ( tp->t_timer[TCPT_REXMT] == 0
1230 || ti->ti_ack != tp->snd_una)
1231 tp->t_dupacks = 0;
1232 else if (++tp->t_dupacks == tcprexmtthresh)
1233 {
1234 tcp_seq onxt = tp->snd_nxt;
1235 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1236 if (win < 2)
1237 win = 2;
1238 tp->snd_ssthresh = win * tp->t_maxseg;
1239 tp->t_timer[TCPT_REXMT] = 0;
1240 tp->t_rtt = 0;
1241 tp->snd_nxt = ti->ti_ack;
1242 tp->snd_cwnd = tp->t_maxseg;
1243 (void) tcp_output(pData, tp);
1244 tp->snd_cwnd = tp->snd_ssthresh +
1245 tp->t_maxseg * tp->t_dupacks;
1246 if (SEQ_GT(onxt, tp->snd_nxt))
1247 tp->snd_nxt = onxt;
1248 goto drop;
1249 }
1250 else if (tp->t_dupacks > tcprexmtthresh)
1251 {
1252 tp->snd_cwnd += tp->t_maxseg;
1253 (void) tcp_output(pData, tp);
1254 goto drop;
1255 }
1256 }
1257 else
1258 tp->t_dupacks = 0;
1259 break;
1260 }
1261synrx_to_est:
1262 /*
1263 * If the congestion window was inflated to account
1264 * for the other side's cached packets, retract it.
1265 */
1266 if ( tp->t_dupacks > tcprexmtthresh
1267 && tp->snd_cwnd > tp->snd_ssthresh)
1268 tp->snd_cwnd = tp->snd_ssthresh;
1269 tp->t_dupacks = 0;
1270 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1271 {
1272 tcpstat.tcps_rcvacktoomuch++;
1273 goto dropafterack;
1274 }
1275 acked = ti->ti_ack - tp->snd_una;
1276 tcpstat.tcps_rcvackpack++;
1277 tcpstat.tcps_rcvackbyte += acked;
1278
1279 /*
1280 * If we have a timestamp reply, update smoothed
1281 * round trip time. If no timestamp is present but
1282 * transmit timer is running and timed sequence
1283 * number was acked, update smoothed round trip time.
1284 * Since we now have an rtt measurement, cancel the
1285 * timer backoff (cf., Phil Karn's retransmit alg.).
1286 * Recompute the initial retransmit timer.
1287 */
1288#if 0
1289 if (ts_present)
1290 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1291 else
1292#endif
1293 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1294 tcp_xmit_timer(pData, tp, tp->t_rtt);
1295
1296 /*
1297 * If all outstanding data is acked, stop retransmit
1298 * timer and remember to restart (more output or persist).
1299 * If there is more data to be acked, restart retransmit
1300 * timer, using current (possibly backed-off) value.
1301 */
1302 if (ti->ti_ack == tp->snd_max)
1303 {
1304 tp->t_timer[TCPT_REXMT] = 0;
1305 needoutput = 1;
1306 }
1307 else if (tp->t_timer[TCPT_PERSIST] == 0)
1308 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1309 /*
1310 * When new data is acked, open the congestion window.
1311 * If the window gives us less than ssthresh packets
1312 * in flight, open exponentially (maxseg per packet).
1313 * Otherwise open linearly: maxseg per window
1314 * (maxseg^2 / cwnd per packet).
1315 */
1316 {
1317 register u_int cw = tp->snd_cwnd;
1318 register u_int incr = tp->t_maxseg;
1319
1320 if (cw > tp->snd_ssthresh)
1321 incr = incr * incr / cw;
1322 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1323 }
1324 if (acked > so->so_snd.sb_cc)
1325 {
1326 tp->snd_wnd -= so->so_snd.sb_cc;
1327 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1328 ourfinisacked = 1;
1329 }
1330 else
1331 {
1332 sbdrop(&so->so_snd, acked);
1333 tp->snd_wnd -= acked;
1334 ourfinisacked = 0;
1335 }
1336 /*
1337 * XXX sowwakup is called when data is acked and there's room for
1338 * for more data... it should read() the socket
1339 */
1340#if 0
1341 if (so->so_snd.sb_flags & SB_NOTIFY)
1342 sowwakeup(so);
1343#endif
1344 tp->snd_una = ti->ti_ack;
1345 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1346 tp->snd_nxt = tp->snd_una;
1347
1348 switch (tp->t_state)
1349 {
1350 /*
1351 * In FIN_WAIT_1 STATE in addition to the processing
1352 * for the ESTABLISHED state if our FIN is now acknowledged
1353 * then enter FIN_WAIT_2.
1354 */
1355 case TCPS_FIN_WAIT_1:
1356 if (ourfinisacked)
1357 {
1358 /*
1359 * If we can't receive any more
1360 * data, then closing user can proceed.
1361 * Starting the timer is contrary to the
1362 * specification, but if we don't get a FIN
1363 * we'll hang forever.
1364 */
1365 if (so->so_state & SS_FCANTRCVMORE)
1366 {
1367 soisfdisconnected(so);
1368 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1369 }
1370 tp->t_state = TCPS_FIN_WAIT_2;
1371 }
1372 break;
1373
1374 /*
1375 * In CLOSING STATE in addition to the processing for
1376 * the ESTABLISHED state if the ACK acknowledges our FIN
1377 * then enter the TIME-WAIT state, otherwise ignore
1378 * the segment.
1379 */
1380 case TCPS_CLOSING:
1381 if (ourfinisacked)
1382 {
1383 tp->t_state = TCPS_TIME_WAIT;
1384 tcp_canceltimers(tp);
1385 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1386 soisfdisconnected(so);
1387 }
1388 break;
1389
1390 /*
1391 * In LAST_ACK, we may still be waiting for data to drain
1392 * and/or to be acked, as well as for the ack of our FIN.
1393 * If our FIN is now acknowledged, delete the TCB,
1394 * enter the closed state and return.
1395 */
1396 case TCPS_LAST_ACK:
1397 if (ourfinisacked)
1398 {
1399 tp = tcp_close(pData, tp);
1400 goto drop;
1401 }
1402 break;
1403
1404 /*
1405 * In TIME_WAIT state the only thing that should arrive
1406 * is a retransmission of the remote FIN. Acknowledge
1407 * it and restart the finack timer.
1408 */
1409 case TCPS_TIME_WAIT:
1410 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1411 goto dropafterack;
1412 }
1413 } /* switch(tp->t_state) */
1414
1415step6:
1416 /*
1417 * Update window information.
1418 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1419 */
1420 if ( (tiflags & TH_ACK)
1421 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1422 || ( tp->snd_wl1 == ti->ti_seq
1423 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1424 || ( tp->snd_wl2 == ti->ti_ack
1425 && tiwin > tp->snd_wnd)))))
1426 {
1427 /* keep track of pure window updates */
1428 if ( ti->ti_len == 0
1429 && tp->snd_wl2 == ti->ti_ack
1430 && tiwin > tp->snd_wnd)
1431 tcpstat.tcps_rcvwinupd++;
1432 tp->snd_wnd = tiwin;
1433 tp->snd_wl1 = ti->ti_seq;
1434 tp->snd_wl2 = ti->ti_ack;
1435 if (tp->snd_wnd > tp->max_sndwnd)
1436 tp->max_sndwnd = tp->snd_wnd;
1437 needoutput = 1;
1438 }
1439
1440 /*
1441 * Process segments with URG.
1442 */
1443 if ((tiflags & TH_URG) && ti->ti_urp &&
1444 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1445 {
1446 /*
1447 * This is a kludge, but if we receive and accept
1448 * random urgent pointers, we'll crash in
1449 * soreceive. It's hard to imagine someone
1450 * actually wanting to send this much urgent data.
1451 */
1452 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1453 {
1454 ti->ti_urp = 0;
1455 tiflags &= ~TH_URG;
1456 goto dodata;
1457 }
1458 /*
1459 * If this segment advances the known urgent pointer,
1460 * then mark the data stream. This should not happen
1461 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1462 * a FIN has been received from the remote side.
1463 * In these states we ignore the URG.
1464 *
1465 * According to RFC961 (Assigned Protocols),
1466 * the urgent pointer points to the last octet
1467 * of urgent data. We continue, however,
1468 * to consider it to indicate the first octet
1469 * of data past the urgent section as the original
1470 * spec states (in one of two places).
1471 */
1472 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1473 {
1474 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1475 so->so_urgc = so->so_rcv.sb_cc +
1476 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1477 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1478 }
1479 }
1480 else
1481 /*
1482 * If no out of band data is expected,
1483 * pull receive urgent pointer along
1484 * with the receive window.
1485 */
1486 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1487 tp->rcv_up = tp->rcv_nxt;
1488dodata:
1489
1490 /*
1491 * If this is a small packet, then ACK now - with Nagel
1492 * congestion avoidance sender won't send more until
1493 * he gets an ACK.
1494 *
1495 * See above.
1496 */
1497 if ( ti->ti_len
1498 && (unsigned)ti->ti_len <= 5
1499 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1500 {
1501 tp->t_flags |= TF_ACKNOW;
1502 }
1503
1504 /*
1505 * Process the segment text, merging it into the TCP sequencing queue,
1506 * and arranging for acknowledgment of receipt if necessary.
1507 * This process logically involves adjusting tp->rcv_wnd as data
1508 * is presented to the user (this happens in tcp_usrreq.c,
1509 * case PRU_RCVD). If a FIN has already been received on this
1510 * connection then we just ignore the text.
1511 */
1512 if ( (ti->ti_len || (tiflags&TH_FIN))
1513 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1514 {
1515 if ( ti->ti_seq == tp->rcv_nxt
1516 && LIST_EMPTY(&tp->t_segq)
1517 && tp->t_state == TCPS_ESTABLISHED)
1518 {
1519 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1520 tp->rcv_nxt += tlen;
1521 tiflags = ti->ti_t.th_flags & TH_FIN;
1522 tcpstat.tcps_rcvpack++;
1523 tcpstat.tcps_rcvbyte += tlen;
1524 if (so->so_state & SS_FCANTRCVMORE)
1525 m_freem(pData, m);
1526 else
1527 {
1528 if (so->so_emu)
1529 {
1530 if (tcp_emu(pData, so, m))
1531 sbappend(pData, so, m);
1532 }
1533 else
1534 sbappend(pData, so, m);
1535 }
1536 }
1537 else
1538 {
1539 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1540 tiflags |= TF_ACKNOW;
1541 }
1542 /*
1543 * Note the amount of data that peer has sent into
1544 * our window, in order to estimate the sender's
1545 * buffer size.
1546 */
1547 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1548 }
1549 else
1550 {
1551 m_free(pData, m);
1552 tiflags &= ~TH_FIN;
1553 }
1554
1555 /*
1556 * If FIN is received ACK the FIN and let the user know
1557 * that the connection is closing.
1558 */
1559 if (tiflags & TH_FIN)
1560 {
1561 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1562 {
1563 /*
1564 * If we receive a FIN we can't send more data,
1565 * set it SS_FDRAIN
1566 * Shutdown the socket if there is no rx data in the
1567 * buffer.
1568 * soread() is called on completion of shutdown() and
1569 * will got to TCPS_LAST_ACK, and use tcp_output()
1570 * to send the FIN.
1571 */
1572/* sofcantrcvmore(so); */
1573 sofwdrain(so);
1574
1575 tp->t_flags |= TF_ACKNOW;
1576 tp->rcv_nxt++;
1577 }
1578 switch (tp->t_state)
1579 {
1580 /*
1581 * In SYN_RECEIVED and ESTABLISHED STATES
1582 * enter the CLOSE_WAIT state.
1583 */
1584 case TCPS_SYN_RECEIVED:
1585 case TCPS_ESTABLISHED:
1586 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1587 tp->t_state = TCPS_LAST_ACK;
1588 else
1589 tp->t_state = TCPS_CLOSE_WAIT;
1590 break;
1591
1592 /*
1593 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1594 * enter the CLOSING state.
1595 */
1596 case TCPS_FIN_WAIT_1:
1597 tp->t_state = TCPS_CLOSING;
1598 break;
1599
1600 /*
1601 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1602 * starting the time-wait timer, turning off the other
1603 * standard timers.
1604 */
1605 case TCPS_FIN_WAIT_2:
1606 tp->t_state = TCPS_TIME_WAIT;
1607 tcp_canceltimers(tp);
1608 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1609 soisfdisconnected(so);
1610 break;
1611
1612 /*
1613 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1614 */
1615 case TCPS_TIME_WAIT:
1616 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1617 break;
1618 }
1619 }
1620
1621 /*
1622 * Return any desired output.
1623 */
1624 if (needoutput || (tp->t_flags & TF_ACKNOW))
1625 tcp_output(pData, tp);
1626
1627 SOCKET_UNLOCK(so);
1628 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1629 return;
1630
1631dropafterack:
1632 Log2(("drop after ack\n"));
1633 /*
1634 * Generate an ACK dropping incoming segment if it occupies
1635 * sequence space, where the ACK reflects our state.
1636 */
1637 if (tiflags & TH_RST)
1638 goto drop;
1639 m_freem(pData, m);
1640 tp->t_flags |= TF_ACKNOW;
1641 (void) tcp_output(pData, tp);
1642 SOCKET_UNLOCK(so);
1643 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1644 return;
1645
1646dropwithreset:
1647 /* reuses m if m!=NULL, m_free() unnecessary */
1648 if (tiflags & TH_ACK)
1649 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1650 else
1651 {
1652 if (tiflags & TH_SYN) ti->ti_len++;
1653 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1654 TH_RST|TH_ACK);
1655 }
1656
1657 if (so != &tcb)
1658 SOCKET_UNLOCK(so);
1659 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1660 return;
1661
1662drop:
1663 /*
1664 * Drop space held by incoming segment and return.
1665 */
1666 m_free(pData, m);
1667
1668#ifdef VBOX_WITH_SLIRP_MT
1669 if (RTCritSectIsOwned(&so->so_mutex))
1670 {
1671 SOCKET_UNLOCK(so);
1672 }
1673#endif
1674
1675 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1676 return;
1677}
1678
1679void
1680tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1681{
1682 u_int16_t mss;
1683 int opt, optlen;
1684
1685 DEBUG_CALL("tcp_dooptions");
1686 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1687
1688 for (; cnt > 0; cnt -= optlen, cp += optlen)
1689 {
1690 opt = cp[0];
1691 if (opt == TCPOPT_EOL)
1692 break;
1693 if (opt == TCPOPT_NOP)
1694 optlen = 1;
1695 else
1696 {
1697 optlen = cp[1];
1698 if (optlen <= 0)
1699 break;
1700 }
1701 switch (opt)
1702 {
1703 default:
1704 continue;
1705
1706 case TCPOPT_MAXSEG:
1707 if (optlen != TCPOLEN_MAXSEG)
1708 continue;
1709 if (!(ti->ti_flags & TH_SYN))
1710 continue;
1711 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1712 NTOHS(mss);
1713 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1714 break;
1715
1716#if 0
1717 case TCPOPT_WINDOW:
1718 if (optlen != TCPOLEN_WINDOW)
1719 continue;
1720 if (!(ti->ti_flags & TH_SYN))
1721 continue;
1722 tp->t_flags |= TF_RCVD_SCALE;
1723 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1724 break;
1725
1726 case TCPOPT_TIMESTAMP:
1727 if (optlen != TCPOLEN_TIMESTAMP)
1728 continue;
1729 *ts_present = 1;
1730 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1731 NTOHL(*ts_val);
1732 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1733 NTOHL(*ts_ecr);
1734
1735 /*
1736 * A timestamp received in a SYN makes
1737 * it ok to send timestamp requests and replies.
1738 */
1739 if (ti->ti_flags & TH_SYN)
1740 {
1741 tp->t_flags |= TF_RCVD_TSTMP;
1742 tp->ts_recent = *ts_val;
1743 tp->ts_recent_age = tcp_now;
1744 }
1745 break;
1746#endif
1747 }
1748 }
1749}
1750
1751
1752/*
1753 * Pull out of band byte out of a segment so
1754 * it doesn't appear in the user's data queue.
1755 * It is still reflected in the segment length for
1756 * sequencing purposes.
1757 */
1758
1759#if 0
1760void
1761tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1762{
1763 int cnt = ti->ti_urp - 1;
1764
1765 while (cnt >= 0)
1766 {
1767 if (m->m_len > cnt)
1768 {
1769 char *cp = mtod(m, caddr_t) + cnt;
1770 struct tcpcb *tp = sototcpcb(so);
1771
1772 tp->t_iobc = *cp;
1773 tp->t_oobflags |= TCPOOB_HAVEDATA;
1774 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1775 m->m_len--;
1776 return;
1777 }
1778 cnt -= m->m_len;
1779 m = m->m_next; /* XXX WRONG! Fix it! */
1780 if (m == 0)
1781 break;
1782 }
1783 panic("tcp_pulloutofband");
1784}
1785#endif
1786
1787/*
1788 * Collect new round-trip time estimate
1789 * and update averages and current timeout.
1790 */
1791
1792void
1793tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1794{
1795 register short delta;
1796
1797 DEBUG_CALL("tcp_xmit_timer");
1798 DEBUG_ARG("tp = %lx", (long)tp);
1799 DEBUG_ARG("rtt = %d", rtt);
1800
1801 tcpstat.tcps_rttupdated++;
1802 if (tp->t_srtt != 0)
1803 {
1804 /*
1805 * srtt is stored as fixed point with 3 bits after the
1806 * binary point (i.e., scaled by 8). The following magic
1807 * is equivalent to the smoothing algorithm in rfc793 with
1808 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1809 * point). Adjust rtt to origin 0.
1810 */
1811 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1812 if ((tp->t_srtt += delta) <= 0)
1813 tp->t_srtt = 1;
1814 /*
1815 * We accumulate a smoothed rtt variance (actually, a
1816 * smoothed mean difference), then set the retransmit
1817 * timer to smoothed rtt + 4 times the smoothed variance.
1818 * rttvar is stored as fixed point with 2 bits after the
1819 * binary point (scaled by 4). The following is
1820 * equivalent to rfc793 smoothing with an alpha of .75
1821 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1822 * rfc793's wired-in beta.
1823 */
1824 if (delta < 0)
1825 delta = -delta;
1826 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1827 if ((tp->t_rttvar += delta) <= 0)
1828 tp->t_rttvar = 1;
1829 }
1830 else
1831 {
1832 /*
1833 * No rtt measurement yet - use the unsmoothed rtt.
1834 * Set the variance to half the rtt (so our first
1835 * retransmit happens at 3*rtt).
1836 */
1837 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1838 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1839 }
1840 tp->t_rtt = 0;
1841 tp->t_rxtshift = 0;
1842
1843 /*
1844 * the retransmit should happen at rtt + 4 * rttvar.
1845 * Because of the way we do the smoothing, srtt and rttvar
1846 * will each average +1/2 tick of bias. When we compute
1847 * the retransmit timer, we want 1/2 tick of rounding and
1848 * 1 extra tick because of +-1/2 tick uncertainty in the
1849 * firing of the timer. The bias will give us exactly the
1850 * 1.5 tick we need. But, because the bias is
1851 * statistical, we have to test that we don't drop below
1852 * the minimum feasible timer (which is 2 ticks).
1853 */
1854 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1855 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1856
1857 /*
1858 * We received an ack for a packet that wasn't retransmitted;
1859 * it is probably safe to discard any error indications we've
1860 * received recently. This isn't quite right, but close enough
1861 * for now (a route might have failed after we sent a segment,
1862 * and the return path might not be symmetrical).
1863 */
1864 tp->t_softerror = 0;
1865}
1866
1867/*
1868 * Determine a reasonable value for maxseg size.
1869 * If the route is known, check route for mtu.
1870 * If none, use an mss that can be handled on the outgoing
1871 * interface without forcing IP to fragment; if bigger than
1872 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1873 * to utilize large mbufs. If no route is found, route has no mtu,
1874 * or the destination isn't local, use a default, hopefully conservative
1875 * size (usually 512 or the default IP max size, but no more than the mtu
1876 * of the interface), as we can't discover anything about intervening
1877 * gateways or networks. We also initialize the congestion/slow start
1878 * window to be a single segment if the destination isn't local.
1879 * While looking at the routing entry, we also initialize other path-dependent
1880 * parameters from pre-set or cached values in the routing entry.
1881 */
1882
1883int
1884tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
1885{
1886 struct socket *so = tp->t_socket;
1887 int mss;
1888
1889 DEBUG_CALL("tcp_mss");
1890 DEBUG_ARG("tp = %lx", (long)tp);
1891 DEBUG_ARG("offer = %d", offer);
1892
1893 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
1894 if (offer)
1895 mss = min(mss, offer);
1896 mss = max(mss, 32);
1897 if (mss < tp->t_maxseg || offer != 0)
1898 tp->t_maxseg = mss;
1899
1900 tp->snd_cwnd = mss;
1901
1902 sbreserve(pData, &so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
1903 sbreserve(pData, &so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
1904
1905 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1906
1907 return mss;
1908}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette