1 | /*
|
---|
2 | * Copyright (c) 1995 Danny Gasparovski.
|
---|
3 | *
|
---|
4 | * Please read the file COPYRIGHT for the
|
---|
5 | * terms and conditions of the copyright.
|
---|
6 | */
|
---|
7 |
|
---|
8 | #include <slirp.h>
|
---|
9 |
|
---|
10 |
|
---|
11 | #define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm))
|
---|
12 |
|
---|
13 | static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
|
---|
14 | {
|
---|
15 | ifm->ifs_next = ifmhead->ifs_next;
|
---|
16 | ifmhead->ifs_next = ifm;
|
---|
17 | ifm->ifs_prev = ifmhead;
|
---|
18 | ifm->ifs_next->ifs_prev = ifm;
|
---|
19 | }
|
---|
20 |
|
---|
21 | static void ifs_remque(struct mbuf *ifm)
|
---|
22 | {
|
---|
23 | ifm->ifs_prev->ifs_next = ifm->ifs_next;
|
---|
24 | ifm->ifs_next->ifs_prev = ifm->ifs_prev;
|
---|
25 | }
|
---|
26 |
|
---|
27 | void
|
---|
28 | if_init(PNATState pData)
|
---|
29 | {
|
---|
30 | #if 0
|
---|
31 | /*
|
---|
32 | * Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP,
|
---|
33 | * and 8 bytes for PPP, but need to have it on an 8byte boundary
|
---|
34 | */
|
---|
35 | #ifdef USE_PPP
|
---|
36 | if_maxlinkhdr = 48;
|
---|
37 | #else
|
---|
38 | if_maxlinkhdr = 40;
|
---|
39 | #endif
|
---|
40 | #else
|
---|
41 | /* 2 for alignment, 14 for ethernet, 40 for TCP/IP */
|
---|
42 | if_maxlinkhdr = 2 + 14 + 40;
|
---|
43 | #endif
|
---|
44 | if_queued = 0;
|
---|
45 | if_thresh = 10;
|
---|
46 | if_mtu = 1500;
|
---|
47 | if_mru = 1500;
|
---|
48 | if_comp = IF_AUTOCOMP;
|
---|
49 | if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq;
|
---|
50 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
51 | RTSemMutexCreate(&pData->if_fastq_mutex);
|
---|
52 | RTSemMutexCreate(&if_fastq.m_mutex);
|
---|
53 | #endif
|
---|
54 |
|
---|
55 | if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq;
|
---|
56 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
57 | RTSemMutexCreate(&pData->if_batchq_mutex);
|
---|
58 | RTSemMutexCreate(&if_batchq.m_mutex);
|
---|
59 | #endif
|
---|
60 | /* sl_compress_init(&comp_s); */
|
---|
61 | next_m = &if_batchq;
|
---|
62 | }
|
---|
63 |
|
---|
64 | #if 0
|
---|
65 | /*
|
---|
66 | * This shouldn't be needed since the modem is blocking and
|
---|
67 | * we don't expect any signals, but what the hell..
|
---|
68 | */
|
---|
69 | inline int
|
---|
70 | writen(fd, bptr, n)
|
---|
71 | int fd;
|
---|
72 | char *bptr;
|
---|
73 | int n;
|
---|
74 | {
|
---|
75 | int ret;
|
---|
76 | int total;
|
---|
77 |
|
---|
78 | /* This should succeed most of the time */
|
---|
79 | ret = send(fd, bptr, n,0);
|
---|
80 | if (ret == n || ret <= 0)
|
---|
81 | return ret;
|
---|
82 |
|
---|
83 | /* Didn't write everything, go into the loop */
|
---|
84 | total = ret;
|
---|
85 | while (n > total) {
|
---|
86 | ret = send(fd, bptr+total, n-total,0);
|
---|
87 | if (ret <= 0)
|
---|
88 | return ret;
|
---|
89 | total += ret;
|
---|
90 | }
|
---|
91 | return total;
|
---|
92 | }
|
---|
93 |
|
---|
94 | /*
|
---|
95 | * if_input - read() the tty, do "top level" processing (ie: check for any escapes),
|
---|
96 | * and pass onto (*ttyp->if_input)
|
---|
97 | *
|
---|
98 | * XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet.
|
---|
99 | */
|
---|
100 | #define INBUFF_SIZE 2048 /* XXX */
|
---|
101 | void
|
---|
102 | if_input(ttyp)
|
---|
103 | struct ttys *ttyp;
|
---|
104 | {
|
---|
105 | u_char if_inbuff[INBUFF_SIZE];
|
---|
106 | int if_n;
|
---|
107 |
|
---|
108 | DEBUG_CALL("if_input");
|
---|
109 | DEBUG_ARG("ttyp = %lx", (long)ttyp);
|
---|
110 |
|
---|
111 | if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0);
|
---|
112 |
|
---|
113 | DEBUG_MISC((dfd, " read %d bytes\n", if_n));
|
---|
114 |
|
---|
115 | if (if_n <= 0) {
|
---|
116 | if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) {
|
---|
117 | if (ttyp->up)
|
---|
118 | link_up--;
|
---|
119 | tty_detached(ttyp, 0);
|
---|
120 | }
|
---|
121 | return;
|
---|
122 | }
|
---|
123 | if (if_n == 1) {
|
---|
124 | if (*if_inbuff == '0') {
|
---|
125 | ttyp->ones = 0;
|
---|
126 | if (++ttyp->zeros >= 5)
|
---|
127 | slirp_exit(0);
|
---|
128 | return;
|
---|
129 | }
|
---|
130 | if (*if_inbuff == '1') {
|
---|
131 | ttyp->zeros = 0;
|
---|
132 | if (++ttyp->ones >= 5)
|
---|
133 | tty_detached(ttyp, 0);
|
---|
134 | return;
|
---|
135 | }
|
---|
136 | }
|
---|
137 | ttyp->ones = ttyp->zeros = 0;
|
---|
138 |
|
---|
139 | (*ttyp->if_input)(ttyp, if_inbuff, if_n);
|
---|
140 | }
|
---|
141 | #endif
|
---|
142 |
|
---|
143 | /*
|
---|
144 | * if_output: Queue packet into an output queue.
|
---|
145 | * There are 2 output queue's, if_fastq and if_batchq.
|
---|
146 | * Each output queue is a doubly linked list of double linked lists
|
---|
147 | * of mbufs, each list belonging to one "session" (socket). This
|
---|
148 | * way, we can output packets fairly by sending one packet from each
|
---|
149 | * session, instead of all the packets from one session, then all packets
|
---|
150 | * from the next session, etc. Packets on the if_fastq get absolute
|
---|
151 | * priority, but if one session hogs the link, it gets "downgraded"
|
---|
152 | * to the batchq until it runs out of packets, then it'll return
|
---|
153 | * to the fastq (eg. if the user does an ls -alR in a telnet session,
|
---|
154 | * it'll temporarily get downgraded to the batchq)
|
---|
155 | */
|
---|
156 | void
|
---|
157 | if_output(PNATState pData, struct socket *so, struct mbuf *ifm)
|
---|
158 | {
|
---|
159 | struct mbuf *ifq;
|
---|
160 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
161 | struct mbuf *ifqprev;
|
---|
162 | #endif
|
---|
163 | int on_fastq = 1;
|
---|
164 |
|
---|
165 | DEBUG_CALL("if_output");
|
---|
166 | DEBUG_ARG("so = %lx", (long)so);
|
---|
167 | DEBUG_ARG("ifm = %lx", (long)ifm);
|
---|
168 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
169 | if (so != NULL)
|
---|
170 | RTSemMutexRequest(so->so_mutex, RT_INDEFINITE_WAIT);
|
---|
171 | #endif
|
---|
172 |
|
---|
173 | /*
|
---|
174 | * First remove the mbuf from m_usedlist,
|
---|
175 | * since we're gonna use m_next and m_prev ourselves
|
---|
176 | * XXX Shouldn't need this, gotta change dtom() etc.
|
---|
177 | */
|
---|
178 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
179 | RTSemMutexRequest(pData->m_usedlist_mutex, RT_INDEFINITE_WAIT);
|
---|
180 | RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
|
---|
181 | #endif
|
---|
182 | if (ifm->m_flags & M_USEDLIST) {
|
---|
183 | remque(pData, ifm);
|
---|
184 | ifm->m_flags &= ~M_USEDLIST;
|
---|
185 | }
|
---|
186 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
187 | RTSemMutexRelease(pData->m_usedlist_mutex);
|
---|
188 | #endif
|
---|
189 |
|
---|
190 | /*
|
---|
191 | * See if there's already a batchq list for this session.
|
---|
192 | * This can include an interactive session, which should go on fastq,
|
---|
193 | * but gets too greedy... hence it'll be downgraded from fastq to batchq.
|
---|
194 | * We mustn't put this packet back on the fastq (or we'll send it out of order)
|
---|
195 | * XXX add cache here?
|
---|
196 | */
|
---|
197 | #ifndef VBOX_WITH_SYNC_SLIRP
|
---|
198 | for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) {
|
---|
199 | #else
|
---|
200 | RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
|
---|
201 | ifq = if_batchq.ifq_prev;
|
---|
202 | while(1){
|
---|
203 | if (ifq == &if_batchq) {
|
---|
204 | RTSemMutexRelease(pData->if_batchq_mutex);
|
---|
205 | break;
|
---|
206 | }
|
---|
207 | ifqprev = ifq->ifq_prev;
|
---|
208 | RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
|
---|
209 | RTSemMutexRelease(pData->if_batchq_mutex);
|
---|
210 | #endif
|
---|
211 | if (so == ifq->ifq_so) {
|
---|
212 | /* A match! */
|
---|
213 | ifm->ifq_so = so;
|
---|
214 | ifs_insque(ifm, ifq->ifs_prev);
|
---|
215 | goto diddit;
|
---|
216 | }
|
---|
217 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
218 | RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
|
---|
219 | RTSemMutexRelease(ifq->m_mutex);
|
---|
220 | ifq = ifqprev;
|
---|
221 | #endif
|
---|
222 | }
|
---|
223 |
|
---|
224 | /* No match, check which queue to put it on */
|
---|
225 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
|
---|
226 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
227 | RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
|
---|
228 | #endif
|
---|
229 | ifq = if_fastq.ifq_prev;
|
---|
230 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
231 | RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
|
---|
232 | RTSemMutexRelease(pData->if_fastq_mutex);
|
---|
233 | #endif
|
---|
234 | on_fastq = 1;
|
---|
235 | /*
|
---|
236 | * Check if this packet is a part of the last
|
---|
237 | * packet's session
|
---|
238 | */
|
---|
239 | if (ifq->ifq_so == so) {
|
---|
240 | ifm->ifq_so = so;
|
---|
241 | ifs_insque(ifm, ifq->ifs_prev);
|
---|
242 | goto diddit;
|
---|
243 | }
|
---|
244 | }
|
---|
245 | else {
|
---|
246 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
247 | RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
|
---|
248 | #endif
|
---|
249 | ifq = if_batchq.ifq_prev;
|
---|
250 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
251 | RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
|
---|
252 | RTSemMutexRelease(pData->if_batchq_mutex);
|
---|
253 | #endif
|
---|
254 | }
|
---|
255 |
|
---|
256 | /* Create a new doubly linked list for this session */
|
---|
257 | ifm->ifq_so = so;
|
---|
258 | ifs_init(ifm);
|
---|
259 | insque(pData, ifm, ifq);
|
---|
260 |
|
---|
261 | diddit:
|
---|
262 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
263 | RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
|
---|
264 | #endif
|
---|
265 | ++if_queued;
|
---|
266 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
267 | RTSemMutexRelease(pData->if_queued_mutex);
|
---|
268 | #endif
|
---|
269 |
|
---|
270 | if (so) {
|
---|
271 | /* Update *_queued */
|
---|
272 | so->so_queued++;
|
---|
273 | so->so_nqueued++;
|
---|
274 | /*
|
---|
275 | * Check if the interactive session should be downgraded to
|
---|
276 | * the batchq. A session is downgraded if it has queued 6
|
---|
277 | * packets without pausing, and at least 3 of those packets
|
---|
278 | * have been sent over the link
|
---|
279 | * (XXX These are arbitrary numbers, probably not optimal..)
|
---|
280 | */
|
---|
281 | if (on_fastq && ((so->so_nqueued >= 6) &&
|
---|
282 | (so->so_nqueued - so->so_queued) >= 3)) {
|
---|
283 |
|
---|
284 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
285 | RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
|
---|
286 | #endif
|
---|
287 | /* Remove from current queue... */
|
---|
288 | remque(pData, ifm->ifs_next);
|
---|
289 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
290 | RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
|
---|
291 | #endif
|
---|
292 |
|
---|
293 | /* ...And insert in the new. That'll teach ya! */
|
---|
294 | insque(pData, ifm->ifs_next, &if_batchq);
|
---|
295 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
296 | RTSemMutexRelease(pData->if_fastq_mutex);
|
---|
297 | RTSemMutexRelease(pData->if_batchq_mutex);
|
---|
298 | #endif
|
---|
299 | }
|
---|
300 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
301 | RTSemMutexRelease(so->so_mutex);
|
---|
302 | #endif
|
---|
303 | }
|
---|
304 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
305 | RTSemMutexRelease(ifq->m_mutex);
|
---|
306 | RTSemMutexRelease(ifm->m_mutex);
|
---|
307 | #endif
|
---|
308 |
|
---|
309 | #ifndef FULL_BOLT
|
---|
310 | /*
|
---|
311 | * This prevents us from malloc()ing too many mbufs
|
---|
312 | */
|
---|
313 | if (link_up) {
|
---|
314 | /* if_start will check towrite */
|
---|
315 | if_start(pData);
|
---|
316 | }
|
---|
317 | #endif
|
---|
318 | }
|
---|
319 |
|
---|
320 | /*
|
---|
321 | * Send a packet
|
---|
322 | * We choose a packet based on it's position in the output queues;
|
---|
323 | * If there are packets on the fastq, they are sent FIFO, before
|
---|
324 | * everything else. Otherwise we choose the first packet from the
|
---|
325 | * batchq and send it. the next packet chosen will be from the session
|
---|
326 | * after this one, then the session after that one, and so on.. So,
|
---|
327 | * for example, if there are 3 ftp session's fighting for bandwidth,
|
---|
328 | * one packet will be sent from the first session, then one packet
|
---|
329 | * from the second session, then one packet from the third, then back
|
---|
330 | * to the first, etc. etc.
|
---|
331 | */
|
---|
332 | void
|
---|
333 | if_start(PNATState pData)
|
---|
334 | {
|
---|
335 | struct mbuf *ifm, *ifqt;
|
---|
336 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
337 | int on_fast = 0; /*required for correctness */
|
---|
338 | struct mbuf *ifm_prev;
|
---|
339 | #endif
|
---|
340 |
|
---|
341 | DEBUG_CALL("if_start");
|
---|
342 |
|
---|
343 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
344 | RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
|
---|
345 | #endif
|
---|
346 | if (if_queued <= 0) {
|
---|
347 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
348 | RTSemMutexRelease(pData->if_queued_mutex);
|
---|
349 | #endif
|
---|
350 | return; /* Nothing to do */
|
---|
351 | }
|
---|
352 |
|
---|
353 | again:
|
---|
354 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
355 | RTSemMutexRelease(pData->if_queued_mutex);
|
---|
356 | #endif
|
---|
357 | /* check if we can really output */
|
---|
358 | if (!slirp_can_output(pData->pvUser))
|
---|
359 | return;
|
---|
360 |
|
---|
361 | /*
|
---|
362 | * See which queue to get next packet from
|
---|
363 | * If there's something in the fastq, select it immediately
|
---|
364 | */
|
---|
365 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
366 | RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
|
---|
367 | #endif
|
---|
368 | if (if_fastq.ifq_next != &if_fastq) {
|
---|
369 | ifm = if_fastq.ifq_next;
|
---|
370 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
371 | on_fast = 1;
|
---|
372 | RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
|
---|
373 | #endif
|
---|
374 | } else {
|
---|
375 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
376 | RTSemMutexRelease(pData->if_fastq_mutex);
|
---|
377 | RTSemMutexRequest(pData->next_m_mutex, RT_INDEFINITE_WAIT);
|
---|
378 | RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
|
---|
379 | #endif
|
---|
380 | /* Nothing on fastq, see if next_m is valid */
|
---|
381 | if (next_m != &if_batchq)
|
---|
382 | ifm = next_m;
|
---|
383 | else
|
---|
384 | ifm = if_batchq.ifq_next;
|
---|
385 |
|
---|
386 | /* Set which packet to send on next iteration */
|
---|
387 | next_m = ifm->ifq_next;
|
---|
388 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
389 | RTSemMutexRelease(pData->next_m_mutex);
|
---|
390 | #endif
|
---|
391 | }
|
---|
392 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
393 | RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
|
---|
394 | RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
|
---|
395 | #endif
|
---|
396 | /* Remove it from the queue */
|
---|
397 | ifqt = ifm->ifq_prev;
|
---|
398 | remque(pData, ifm);
|
---|
399 |
|
---|
400 | --if_queued;
|
---|
401 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
402 | RTSemMutexRelease(pData->if_queued_mutex);
|
---|
403 | if (on_fast == 1) {
|
---|
404 | RTSemMutexRelease(pData->if_fastq_mutex);
|
---|
405 | }
|
---|
406 | else {
|
---|
407 | RTSemMutexRelease(pData->if_batchq_mutex);
|
---|
408 | }
|
---|
409 | #endif
|
---|
410 |
|
---|
411 | /* If there are more packets for this session, re-queue them */
|
---|
412 | if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) {
|
---|
413 | insque(pData, ifm->ifs_next, ifqt);
|
---|
414 | ifs_remque(ifm);
|
---|
415 | }
|
---|
416 |
|
---|
417 | /* Update so_queued */
|
---|
418 | if (ifm->ifq_so) {
|
---|
419 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
420 | RTSemMutexRequest(ifm->ifq_so->so_mutex, RT_INDEFINITE_WAIT);
|
---|
421 | #endif
|
---|
422 | if (--ifm->ifq_so->so_queued == 0)
|
---|
423 | /* If there's no more queued, reset nqueued */
|
---|
424 | ifm->ifq_so->so_nqueued = 0;
|
---|
425 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
426 | RTSemMutexRelease(ifm->ifq_so->so_mutex);
|
---|
427 | #endif
|
---|
428 | }
|
---|
429 |
|
---|
430 | /* Encapsulate the packet for sending */
|
---|
431 | if_encap(pData, (const uint8_t *)ifm->m_data, ifm->m_len);
|
---|
432 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
433 | RTSemMutexRelease(ifm->m_mutex);
|
---|
434 | #endif
|
---|
435 |
|
---|
436 | m_free(pData, ifm);
|
---|
437 |
|
---|
438 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
439 | RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
|
---|
440 | /*We release if_queued_mutex after again label and before return*/
|
---|
441 | #endif
|
---|
442 | if (if_queued > 0)
|
---|
443 | goto again;
|
---|
444 | #ifdef VBOX_WITH_SYNC_SLIRP
|
---|
445 | RTSemMutexRelease(pData->if_queued_mutex);
|
---|
446 | #endif
|
---|
447 | }
|
---|