VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/misc.c@ 26594

最後變更 在這個檔案從26594是 26404,由 vboxsync 提交於 15 年 前

NAT: applied patch from xtracker 3993 (use BSD mbufs)

  • 屬性 svn:eol-style 設為 native
檔案大小: 9.2 KB
 
1/*
2 * Copyright (c) 1995 Danny Gasparovski.
3 *
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
7
8#define WANT_SYS_IOCTL_H
9#include <slirp.h>
10
11#ifndef HAVE_INET_ATON
12int
13inet_aton(const char *cp, struct in_addr *ia)
14{
15 u_int32_t addr = inet_addr(cp);
16 if (addr == 0xffffffff)
17 return 0;
18 ia->s_addr = addr;
19 return 1;
20}
21#endif
22
23/*
24 * Get our IP address and put it in our_addr
25 */
26void
27getouraddr(PNATState pData)
28{
29 our_addr.s_addr = loopback_addr.s_addr;
30}
31
32struct quehead
33{
34 struct quehead *qh_link;
35 struct quehead *qh_rlink;
36};
37
38void
39insque(PNATState pData, void *a, void *b)
40{
41 register struct quehead *element = (struct quehead *) a;
42 register struct quehead *head = (struct quehead *) b;
43 element->qh_link = head->qh_link;
44 head->qh_link = (struct quehead *)element;
45 element->qh_rlink = (struct quehead *)head;
46 ((struct quehead *)(element->qh_link))->qh_rlink = (struct quehead *)element;
47}
48
49void
50remque(PNATState pData, void *a)
51{
52 register struct quehead *element = (struct quehead *) a;
53 ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink;
54 ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link;
55 element->qh_rlink = NULL;
56 /* element->qh_link = NULL; TCP FIN1 crashes if you do this. Why ? */
57}
58
59int
60add_exec(struct ex_list **ex_ptr, int do_pty, char *exec, int addr, int port)
61{
62 struct ex_list *tmp_ptr;
63
64 /* First, check if the port is "bound" */
65 for (tmp_ptr = *ex_ptr; tmp_ptr; tmp_ptr = tmp_ptr->ex_next)
66 {
67 if (port == tmp_ptr->ex_fport && addr == tmp_ptr->ex_addr)
68 return -1;
69 }
70
71 tmp_ptr = *ex_ptr;
72 *ex_ptr = (struct ex_list *)RTMemAlloc(sizeof(struct ex_list));
73 (*ex_ptr)->ex_fport = port;
74 (*ex_ptr)->ex_addr = addr;
75 (*ex_ptr)->ex_pty = do_pty;
76 (*ex_ptr)->ex_exec = RTStrDup(exec);
77 (*ex_ptr)->ex_next = tmp_ptr;
78 return 0;
79}
80
81
82/*
83 * Set fd blocking and non-blocking
84 */
85void
86fd_nonblock(int fd)
87{
88#ifdef FIONBIO
89 int opt = 1;
90
91 ioctlsocket(fd, FIONBIO, &opt);
92#else
93 int opt;
94
95 opt = fcntl(fd, F_GETFL, 0);
96 opt |= O_NONBLOCK;
97 fcntl(fd, F_SETFL, opt);
98#endif
99}
100
101void
102fd_block(int fd)
103{
104#ifdef FIONBIO
105 int opt = 0;
106
107 ioctlsocket(fd, FIONBIO, &opt);
108#else
109 int opt;
110
111 opt = fcntl(fd, F_GETFL, 0);
112 opt &= ~O_NONBLOCK;
113 fcntl(fd, F_SETFL, opt);
114#endif
115}
116
117#ifdef VBOX_WITH_SLIRP_BSD_MBUF
118#define ITEM_MAGIC 0xdead0001
119struct item
120{
121 uint32_t magic;
122 uma_zone_t zone;
123 uint32_t ref_count;
124 LIST_ENTRY(item) list;
125};
126
127#define ZONE_MAGIC 0xdead0002
128struct uma_zone
129{
130 uint32_t magic;
131 PNATState pData; /* to minimize changes in the rest of UMA emulation code */
132 RTCRITSECT csZone;
133 const char *name;
134 size_t size; /* item size */
135 ctor_t pfCtor;
136 dtor_t pfDtor;
137 zinit_t pfInit;
138 zfini_t pfFini;
139 uma_alloc_t pfAlloc;
140 uma_free_t pfFree;
141 int max_items;
142 int cur_items;
143 LIST_HEAD(RT_NOTHING, item) used_items;
144 LIST_HEAD(RT_NOTHING, item) free_items;
145 uma_zone_t master_zone;
146};
147
148
149static void *slirp_uma_alloc(uma_zone_t zone,
150 int size, uint8_t *pflags, int wait)
151{
152 struct item *it;
153 RTCritSectEnter(&zone->csZone);
154 if ( (zone->max_items != 0 && zone->cur_items >= zone->max_items)
155 || (zone->max_items == 0 && !LIST_EMPTY(&zone->free_items))
156 )
157 {
158 /*
159 * @todo (r=vvl) here should be some
160 * accounting of extra items in case
161 * breakthrough barrier
162 */
163 if (LIST_EMPTY(&zone->free_items))
164 {
165 RTCritSectLeave(&zone->csZone);
166 return NULL;
167 }
168 it = LIST_FIRST(&zone->free_items);
169 LIST_REMOVE(it, list);
170 LIST_INSERT_HEAD(&zone->used_items, it, list);
171 goto allocated;
172 }
173
174 /*@todo 'Z' should be depend on flag */
175 it = RTMemAllocZ(sizeof(struct item) + zone->size);
176 if (it == NULL)
177 {
178 Log(("NAT: uma no memory"));
179 RTCritSectLeave(&zone->csZone);
180 return NULL;
181 }
182 it->magic = ITEM_MAGIC;
183 LIST_INSERT_HEAD(&zone->used_items, it, list);
184 zone->cur_items++;
185 it->zone = zone;
186
187 allocated:
188 if (zone->pfInit)
189 zone->pfInit(zone->pData, (void *)&it[1], zone->size, M_DONTWAIT);
190 RTCritSectLeave(&zone->csZone);
191 return (void *)&it[1];
192}
193
194static void slirp_uma_free(void *item, int size, uint8_t flags)
195{
196 struct item *it;
197 uma_zone_t zone;
198 Assert(item);
199 it = &((struct item *)item)[-1];
200 Assert(it->magic == ITEM_MAGIC);
201 zone = it->zone;
202 RTCritSectEnter(&zone->csZone);
203 Assert(zone->magic == ZONE_MAGIC);
204 LIST_REMOVE(it, list);
205 LIST_INSERT_HEAD(&zone->free_items, it, list);
206 zone->cur_items--;
207 RTCritSectLeave(&zone->csZone);
208}
209
210uma_zone_t uma_zcreate(PNATState pData, char *name, size_t size,
211 ctor_t ctor, dtor_t dtor, zinit_t init, zfini_t fini, int flags1, int flags2)
212{
213 uma_zone_t zone = RTMemAllocZ(sizeof(struct uma_zone) + size);
214 Assert((pData));
215 zone->magic = ZONE_MAGIC;
216 zone->pData = pData;
217 zone->name = name;
218 zone->size = size;
219 zone->pfCtor = ctor;
220 zone->pfDtor = dtor;
221 zone->pfInit = init;
222 zone->pfFini = fini;
223 zone->pfAlloc = slirp_uma_alloc;
224 zone->pfFree = slirp_uma_free;
225 RTCritSectInit(&zone->csZone);
226 return zone;
227
228}
229uma_zone_t uma_zsecond_create(char *name, ctor_t ctor,
230 dtor_t dtor, zinit_t init, zfini_t fini, uma_zone_t master)
231{
232 uma_zone_t zone;
233#if 0
234 if (master->pfAlloc != NULL)
235 zone = (uma_zone_t)master->pfAlloc(master, sizeof(struct uma_zone), NULL, 0);
236#endif
237 zone = RTMemAllocZ(sizeof(struct uma_zone));
238 if (zone == NULL)
239 {
240 return NULL;
241 }
242 Assert((master && master->pData));
243 zone->magic = ZONE_MAGIC;
244 zone->pData = master->pData;
245 zone->name = name;
246 zone->pfCtor = ctor;
247 zone->pfDtor = dtor;
248 zone->pfInit = init;
249 zone->pfFini = fini;
250 zone->pfAlloc = slirp_uma_alloc;
251 zone->pfFree = slirp_uma_free;
252 zone->size = master->size;
253 RTCritSectInit(&zone->csZone);
254 return zone;
255}
256void uma_zone_set_max(uma_zone_t zone, int max)
257{
258 zone->max_items = max;
259}
260void uma_zone_set_allocf(uma_zone_t zone, uma_alloc_t pfAlloc)
261{
262 zone->pfAlloc = pfAlloc;
263}
264void uma_zone_set_freef(uma_zone_t zone, uma_free_t pfFree)
265{
266 zone->pfFree = pfFree;
267}
268
269uint32_t *uma_find_refcnt(uma_zone_t zone, void *mem)
270{
271 /*@todo (vvl) this function supposed to work with special zone storing
272 reference counters */
273 struct item *it = (struct item *)mem; /* 1st element */
274 Assert(mem != NULL);
275 Assert(zone->magic == ZONE_MAGIC);
276 /* for returning pointer to counter we need get 0 elemnt */
277 Assert(it[-1].magic == ITEM_MAGIC);
278 return &it[-1].ref_count;
279}
280void *uma_zalloc_arg(uma_zone_t zone, void *args, int how)
281{
282 void *mem;
283 Assert(zone->magic == ZONE_MAGIC);
284 if (zone->pfAlloc == NULL)
285 return NULL;
286 RTCritSectEnter(&zone->csZone);
287 mem = zone->pfAlloc(zone, zone->size, NULL, 0);
288 if (zone->pfCtor)
289 zone->pfCtor(zone->pData, mem, zone->size, args, M_DONTWAIT);
290 RTCritSectLeave(&zone->csZone);
291 return mem;
292}
293
294void uma_zfree(uma_zone_t zone, void *item)
295{
296 uma_zfree_arg(zone, item, NULL);
297}
298
299void uma_zfree_arg(uma_zone_t zone, void *mem, void *flags)
300{
301 struct item *it;
302 Assert(zone->magic == ZONE_MAGIC);
303 if (zone->pfFree == NULL)
304 return;
305 Assert((mem));
306 RTCritSectEnter(&zone->csZone);
307 it = &((struct item *)mem)[-1];
308 if (it->magic != ITEM_MAGIC)
309 {
310 Log(("NAT:UMA: %p seems to be allocated on heap ... freeing\n", mem));
311 RTMemFree(mem);
312 RTCritSectLeave(&zone->csZone);
313 return;
314 }
315 Assert((zone->magic == ZONE_MAGIC && zone == it->zone));
316
317 if (zone->pfDtor)
318 zone->pfDtor(zone->pData, mem, zone->size, flags);
319 zone->pfFree(mem, 0, 0);
320 RTCritSectLeave(&zone->csZone);
321}
322int uma_zone_exhausted_nolock(uma_zone_t zone)
323{
324 return 0;
325}
326void zone_drain(uma_zone_t zone)
327{
328}
329
330void slirp_null_arg_free(void *mem, void *arg)
331{
332 /*@todo (r=vvl) make it wiser*/
333 Assert(mem);
334 RTMemFree(mem);
335}
336
337void *uma_zalloc(uma_zone_t zone, int len)
338{
339 return NULL;
340}
341
342void *slirp_ext_m_get(PNATState pData, uint8_t *pkt, size_t pkt_len)
343{
344 struct mbuf *m;
345 size_t size = MCLBYTES;
346 if (pkt_len < MSIZE)
347 size = MCLBYTES;
348 else if (pkt_len < MCLBYTES)
349 size = MCLBYTES;
350 else if (pkt_len < MJUM9BYTES)
351 size = MJUM9BYTES;
352 else if (pkt_len < MJUM16BYTES)
353 size = MJUM16BYTES;
354 else
355 AssertMsgFailed(("Unsupported size"));
356
357 m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
358 m->m_len = pkt_len ;
359 memcpy(m->m_data, pkt, pkt_len);
360 return (void *)m;
361}
362
363void slirp_ext_m_free(PNATState pData, void *arg)
364{
365 struct mbuf *m = (struct mbuf *)arg;
366 m_free(pData, m);
367}
368
369static void zone_destroy(uma_zone_t zone)
370{
371 RTCritSectDelete(&zone->csZone);
372 RTMemFree(zone);
373}
374void m_fini(PNATState pData)
375{
376 zone_destroy(pData->zone_mbuf);
377 zone_destroy(pData->zone_clust);
378 zone_destroy(pData->zone_pack);
379 zone_destroy(pData->zone_jumbop);
380 zone_destroy(pData->zone_jumbo9);
381 zone_destroy(pData->zone_jumbo16);
382 /*@todo do finalize here.*/
383}
384#endif /* VBOX_WITH_SLIRP_BSD_MBUF */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette