VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/if.c@ 13604

最後變更 在這個檔案從13604是 13604,由 vboxsync 提交於 16 年 前

Synchronized slirp was inroduced

  • 屬性 svn:eol-style 設為 native
檔案大小: 11.9 KB
 
1/*
2 * Copyright (c) 1995 Danny Gasparovski.
3 *
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
7
8#include <slirp.h>
9
10
11#define ifs_init(ifm) ((ifm)->ifs_next = (ifm)->ifs_prev = (ifm))
12
13static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
14{
15 ifm->ifs_next = ifmhead->ifs_next;
16 ifmhead->ifs_next = ifm;
17 ifm->ifs_prev = ifmhead;
18 ifm->ifs_next->ifs_prev = ifm;
19}
20
21static void ifs_remque(struct mbuf *ifm)
22{
23 ifm->ifs_prev->ifs_next = ifm->ifs_next;
24 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
25}
26
27void
28if_init(PNATState pData)
29{
30#if 0
31 /*
32 * Set if_maxlinkhdr to 48 because it's 40 bytes for TCP/IP,
33 * and 8 bytes for PPP, but need to have it on an 8byte boundary
34 */
35#ifdef USE_PPP
36 if_maxlinkhdr = 48;
37#else
38 if_maxlinkhdr = 40;
39#endif
40#else
41 /* 2 for alignment, 14 for ethernet, 40 for TCP/IP */
42 if_maxlinkhdr = 2 + 14 + 40;
43#endif
44 if_queued = 0;
45 if_thresh = 10;
46 if_mtu = 1500;
47 if_mru = 1500;
48 if_comp = IF_AUTOCOMP;
49 if_fastq.ifq_next = if_fastq.ifq_prev = &if_fastq;
50#ifdef VBOX_WITH_SYNC_SLIRP
51 RTSemMutexCreate(&pData->if_fastq_mutex);
52 RTSemMutexCreate(&if_fastq.m_mutex);
53#endif
54
55 if_batchq.ifq_next = if_batchq.ifq_prev = &if_batchq;
56#ifdef VBOX_WITH_SYNC_SLIRP
57 RTSemMutexCreate(&pData->if_batchq_mutex);
58 RTSemMutexCreate(&if_batchq.m_mutex);
59#endif
60 /* sl_compress_init(&comp_s); */
61 next_m = &if_batchq;
62}
63
64#if 0
65/*
66 * This shouldn't be needed since the modem is blocking and
67 * we don't expect any signals, but what the hell..
68 */
69inline int
70writen(fd, bptr, n)
71 int fd;
72 char *bptr;
73 int n;
74{
75 int ret;
76 int total;
77
78 /* This should succeed most of the time */
79 ret = send(fd, bptr, n,0);
80 if (ret == n || ret <= 0)
81 return ret;
82
83 /* Didn't write everything, go into the loop */
84 total = ret;
85 while (n > total) {
86 ret = send(fd, bptr+total, n-total,0);
87 if (ret <= 0)
88 return ret;
89 total += ret;
90 }
91 return total;
92}
93
94/*
95 * if_input - read() the tty, do "top level" processing (ie: check for any escapes),
96 * and pass onto (*ttyp->if_input)
97 *
98 * XXXXX Any zeros arriving by themselves are NOT placed into the arriving packet.
99 */
100#define INBUFF_SIZE 2048 /* XXX */
101void
102if_input(ttyp)
103 struct ttys *ttyp;
104{
105 u_char if_inbuff[INBUFF_SIZE];
106 int if_n;
107
108 DEBUG_CALL("if_input");
109 DEBUG_ARG("ttyp = %lx", (long)ttyp);
110
111 if_n = recv(ttyp->fd, (char *)if_inbuff, INBUFF_SIZE,0);
112
113 DEBUG_MISC((dfd, " read %d bytes\n", if_n));
114
115 if (if_n <= 0) {
116 if (if_n == 0 || (errno != EINTR && errno != EAGAIN)) {
117 if (ttyp->up)
118 link_up--;
119 tty_detached(ttyp, 0);
120 }
121 return;
122 }
123 if (if_n == 1) {
124 if (*if_inbuff == '0') {
125 ttyp->ones = 0;
126 if (++ttyp->zeros >= 5)
127 slirp_exit(0);
128 return;
129 }
130 if (*if_inbuff == '1') {
131 ttyp->zeros = 0;
132 if (++ttyp->ones >= 5)
133 tty_detached(ttyp, 0);
134 return;
135 }
136 }
137 ttyp->ones = ttyp->zeros = 0;
138
139 (*ttyp->if_input)(ttyp, if_inbuff, if_n);
140}
141#endif
142
143/*
144 * if_output: Queue packet into an output queue.
145 * There are 2 output queue's, if_fastq and if_batchq.
146 * Each output queue is a doubly linked list of double linked lists
147 * of mbufs, each list belonging to one "session" (socket). This
148 * way, we can output packets fairly by sending one packet from each
149 * session, instead of all the packets from one session, then all packets
150 * from the next session, etc. Packets on the if_fastq get absolute
151 * priority, but if one session hogs the link, it gets "downgraded"
152 * to the batchq until it runs out of packets, then it'll return
153 * to the fastq (eg. if the user does an ls -alR in a telnet session,
154 * it'll temporarily get downgraded to the batchq)
155 */
156void
157if_output(PNATState pData, struct socket *so, struct mbuf *ifm)
158{
159 struct mbuf *ifq;
160#ifdef VBOX_WITH_SYNC_SLIRP
161 struct mbuf *ifqprev;
162#endif
163 int on_fastq = 1;
164
165 DEBUG_CALL("if_output");
166 DEBUG_ARG("so = %lx", (long)so);
167 DEBUG_ARG("ifm = %lx", (long)ifm);
168#ifdef VBOX_WITH_SYNC_SLIRP
169 if (so != NULL)
170 RTSemMutexRequest(so->so_mutex, RT_INDEFINITE_WAIT);
171#endif
172
173 /*
174 * First remove the mbuf from m_usedlist,
175 * since we're gonna use m_next and m_prev ourselves
176 * XXX Shouldn't need this, gotta change dtom() etc.
177 */
178#ifdef VBOX_WITH_SYNC_SLIRP
179 RTSemMutexRequest(pData->m_usedlist_mutex, RT_INDEFINITE_WAIT);
180 RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
181#endif
182 if (ifm->m_flags & M_USEDLIST) {
183 remque(pData, ifm);
184 ifm->m_flags &= ~M_USEDLIST;
185 }
186#ifdef VBOX_WITH_SYNC_SLIRP
187 RTSemMutexRelease(pData->m_usedlist_mutex);
188#endif
189
190 /*
191 * See if there's already a batchq list for this session.
192 * This can include an interactive session, which should go on fastq,
193 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
194 * We mustn't put this packet back on the fastq (or we'll send it out of order)
195 * XXX add cache here?
196 */
197#ifndef VBOX_WITH_SYNC_SLIRP
198 for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) {
199#else
200 RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
201 ifq = if_batchq.ifq_prev;
202 while(1){
203 if (ifq == &if_batchq) {
204 RTSemMutexRelease(pData->if_batchq_mutex);
205 break;
206 }
207 ifqprev = ifq->ifq_prev;
208 RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
209 RTSemMutexRelease(pData->if_batchq_mutex);
210#endif
211 if (so == ifq->ifq_so) {
212 /* A match! */
213 ifm->ifq_so = so;
214 ifs_insque(ifm, ifq->ifs_prev);
215 goto diddit;
216 }
217#ifdef VBOX_WITH_SYNC_SLIRP
218 RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
219 RTSemMutexRelease(ifq->m_mutex);
220 ifq = ifqprev;
221#endif
222 }
223
224 /* No match, check which queue to put it on */
225 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
226#ifdef VBOX_WITH_SYNC_SLIRP
227 RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
228#endif
229 ifq = if_fastq.ifq_prev;
230#ifdef VBOX_WITH_SYNC_SLIRP
231 RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
232 RTSemMutexRelease(pData->if_fastq_mutex);
233#endif
234 on_fastq = 1;
235 /*
236 * Check if this packet is a part of the last
237 * packet's session
238 */
239 if (ifq->ifq_so == so) {
240 ifm->ifq_so = so;
241 ifs_insque(ifm, ifq->ifs_prev);
242 goto diddit;
243 }
244 }
245 else {
246#ifdef VBOX_WITH_SYNC_SLIRP
247 RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
248#endif
249 ifq = if_batchq.ifq_prev;
250#ifdef VBOX_WITH_SYNC_SLIRP
251 RTSemMutexRequest(ifq->m_mutex, RT_INDEFINITE_WAIT);
252 RTSemMutexRelease(pData->if_batchq_mutex);
253#endif
254 }
255
256 /* Create a new doubly linked list for this session */
257 ifm->ifq_so = so;
258 ifs_init(ifm);
259 insque(pData, ifm, ifq);
260
261diddit:
262#ifdef VBOX_WITH_SYNC_SLIRP
263 RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
264#endif
265 ++if_queued;
266#ifdef VBOX_WITH_SYNC_SLIRP
267 RTSemMutexRelease(pData->if_queued_mutex);
268#endif
269
270 if (so) {
271 /* Update *_queued */
272 so->so_queued++;
273 so->so_nqueued++;
274 /*
275 * Check if the interactive session should be downgraded to
276 * the batchq. A session is downgraded if it has queued 6
277 * packets without pausing, and at least 3 of those packets
278 * have been sent over the link
279 * (XXX These are arbitrary numbers, probably not optimal..)
280 */
281 if (on_fastq && ((so->so_nqueued >= 6) &&
282 (so->so_nqueued - so->so_queued) >= 3)) {
283
284#ifdef VBOX_WITH_SYNC_SLIRP
285 RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
286#endif
287 /* Remove from current queue... */
288 remque(pData, ifm->ifs_next);
289#ifdef VBOX_WITH_SYNC_SLIRP
290 RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
291#endif
292
293 /* ...And insert in the new. That'll teach ya! */
294 insque(pData, ifm->ifs_next, &if_batchq);
295#ifdef VBOX_WITH_SYNC_SLIRP
296 RTSemMutexRelease(pData->if_fastq_mutex);
297 RTSemMutexRelease(pData->if_batchq_mutex);
298#endif
299 }
300 RTSemMutexRelease(so->so_mutex);
301 }
302#ifdef VBOX_WITH_SYNC_SLIRP
303 RTSemMutexRelease(ifq->m_mutex);
304 RTSemMutexRelease(ifm->m_mutex);
305#endif
306
307#ifndef FULL_BOLT
308 /*
309 * This prevents us from malloc()ing too many mbufs
310 */
311 if (link_up) {
312 /* if_start will check towrite */
313 if_start(pData);
314 }
315#endif
316}
317
318/*
319 * Send a packet
320 * We choose a packet based on it's position in the output queues;
321 * If there are packets on the fastq, they are sent FIFO, before
322 * everything else. Otherwise we choose the first packet from the
323 * batchq and send it. the next packet chosen will be from the session
324 * after this one, then the session after that one, and so on.. So,
325 * for example, if there are 3 ftp session's fighting for bandwidth,
326 * one packet will be sent from the first session, then one packet
327 * from the second session, then one packet from the third, then back
328 * to the first, etc. etc.
329 */
330void
331if_start(PNATState pData)
332{
333 struct mbuf *ifm, *ifqt;
334#ifdef VBOX_WITH_SYNC_SLIRP
335 int on_fast = 0; /*required for correctness */
336 struct mbuf *ifm_prev;
337#endif
338
339 DEBUG_CALL("if_start");
340
341#ifdef VBOX_WITH_SYNC_SLIRP
342 RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
343#endif
344 if (if_queued <= 0) {
345#ifdef VBOX_WITH_SYNC_SLIRP
346 RTSemMutexRelease(pData->if_queued_mutex);
347#endif
348 return; /* Nothing to do */
349 }
350
351 again:
352#ifdef VBOX_WITH_SYNC_SLIRP
353 RTSemMutexRelease(pData->if_queued_mutex);
354#endif
355 /* check if we can really output */
356 if (!slirp_can_output(pData->pvUser))
357 return;
358
359 /*
360 * See which queue to get next packet from
361 * If there's something in the fastq, select it immediately
362 */
363#ifdef VBOX_WITH_SYNC_SLIRP
364 RTSemMutexRequest(pData->if_fastq_mutex, RT_INDEFINITE_WAIT);
365#endif
366 if (if_fastq.ifq_next != &if_fastq) {
367 ifm = if_fastq.ifq_next;
368#ifdef VBOX_WITH_SYNC_SLIRP
369 on_fast = 1;
370 RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
371#endif
372 } else {
373#ifdef VBOX_WITH_SYNC_SLIRP
374 RTSemMutexRelease(pData->if_fastq_mutex);
375 RTSemMutexRequest(pData->next_m_mutex, RT_INDEFINITE_WAIT);
376 RTSemMutexRequest(pData->if_batchq_mutex, RT_INDEFINITE_WAIT);
377#endif
378 /* Nothing on fastq, see if next_m is valid */
379 if (next_m != &if_batchq)
380 ifm = next_m;
381 else
382 ifm = if_batchq.ifq_next;
383
384 /* Set which packet to send on next iteration */
385 next_m = ifm->ifq_next;
386#ifdef VBOX_WITH_SYNC_SLIRP
387 RTSemMutexRelease(pData->next_m_mutex);
388#endif
389 }
390#ifdef VBOX_WITH_SYNC_SLIRP
391 RTSemMutexRequest(ifm->m_mutex, RT_INDEFINITE_WAIT);
392 RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
393#endif
394 /* Remove it from the queue */
395 ifqt = ifm->ifq_prev;
396 ifqt = ifm->ifq_prev;
397 remque(pData, ifm);
398
399 --if_queued;
400#ifdef VBOX_WITH_SYNC_SLIRP
401 RTSemMutexRelease(pData->if_queued_mutex);
402 if (on_fast == 1) {
403 RTSemMutexRelease(pData->if_fastq_mutex);
404 }
405 else {
406 RTSemMutexRelease(pData->if_batchq_mutex);
407 }
408#endif
409
410 /* If there are more packets for this session, re-queue them */
411 if (ifm->ifs_next != /* ifm->ifs_prev != */ ifm) {
412 insque(pData, ifm->ifs_next, ifqt);
413 ifs_remque(ifm);
414 }
415
416 /* Update so_queued */
417 if (ifm->ifq_so) {
418#ifndef VBOX_WITH_SYNC_SLIRP
419 RTSemMutexRequest(ifm->ifq_so->so_mutex, RT_INDEFINITE_WAIT);
420#endif
421 if (--ifm->ifq_so->so_queued == 0)
422 /* If there's no more queued, reset nqueued */
423 ifm->ifq_so->so_nqueued = 0;
424#ifndef VBOX_WITH_SYNC_SLIRP
425 RTSemMutexRelease(ifm->ifq_so->so_mutex);
426#endif
427 }
428
429 /* Encapsulate the packet for sending */
430 if_encap(pData, (const uint8_t *)ifm->m_data, ifm->m_len);
431#ifdef VBOX_WITH_SYNC_SLIRP
432 RTSemMutexRelease(ifm->m_mutex);
433#endif
434
435 m_free(pData, ifm);
436
437#ifdef VBOX_WITH_SYNC_SLIRP
438 RTSemMutexRequest(pData->if_queued_mutex, RT_INDEFINITE_WAIT);
439 /*We release if_queued_mutex after again label and before return*/
440#endif
441 if (if_queued > 0)
442 goto again;
443#ifdef VBOX_WITH_SYNC_SLIRP
444 RTSemMutexRelease(pData->if_queued_mutex);
445#endif
446}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette