VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 51574

最後變更 在這個檔案從51574是 51574,由 vboxsync 提交於 11 年 前

NAT/Net: #define LOG_GROUP LOG_GROUP_NAT_SERVICE

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 17.0 KB
 
1/* -*- indent-tabs-mode: nil; -*- */
2#define LOG_GROUP LOG_GROUP_NAT_SERVICE
3
4#include "winutils.h"
5
6#include "proxy_pollmgr.h"
7#include "proxy.h"
8
9#ifndef RT_OS_WINDOWS
10#include <sys/socket.h>
11#include <netinet/in.h>
12#include <err.h>
13#include <errno.h>
14#include <poll.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <time.h>
19#include <unistd.h>
20#else
21#include <iprt/err.h>
22#include <stdlib.h>
23#include <string.h>
24#include "winpoll.h"
25#endif
26
27#define POLLMGR_GARBAGE (-1)
28
29struct pollmgr {
30 struct pollfd *fds;
31 struct pollmgr_handler **handlers;
32 nfds_t capacity; /* allocated size of the arrays */
33 nfds_t nfds; /* part of the arrays in use */
34
35 /* channels (socketpair) for static slots */
36 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
37#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
38#define POLLMGR_CHFD_WR 1 /* - client side */
39} pollmgr;
40
41
42static void pollmgr_loop(void);
43
44static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
45static void pollmgr_refptr_delete(struct pollmgr_refptr *);
46
47
48/*
49 * We cannot portably peek at the length of the incoming datagram and
50 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
51 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
52 * probably more expensive (haven't measured) than doing an extra copy
53 * of data, since typical UDP datagrams are small enough to avoid
54 * fragmentation.
55 *
56 * We can use shared buffer here since we read from sockets
57 * sequentially in a loop over pollfd.
58 */
59u8_t pollmgr_udpbuf[64 * 1024];
60
61
62int
63pollmgr_init(void)
64{
65 struct pollfd *newfds;
66 struct pollmgr_handler **newhdls;
67 nfds_t newcap;
68 int status;
69 nfds_t i;
70
71 pollmgr.fds = NULL;
72 pollmgr.handlers = NULL;
73 pollmgr.capacity = 0;
74 pollmgr.nfds = 0;
75
76 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
77 pollmgr.chan[i][POLLMGR_CHFD_RD] = -1;
78 pollmgr.chan[i][POLLMGR_CHFD_WR] = -1;
79 }
80
81 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
82#ifndef RT_OS_WINDOWS
83 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
84 if (status < 0) {
85 perror("socketpair");
86 goto cleanup_close;
87 }
88#else
89 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
90 AssertRCReturn(status, -1);
91
92 if (RT_FAILURE(status)) {
93 perror("socketpair");
94 goto cleanup_close;
95 }
96#endif
97 }
98
99
100 newcap = 16; /* XXX: magic */
101 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
102
103 newfds = (struct pollfd *)
104 malloc(newcap * sizeof(*pollmgr.fds));
105 if (newfds == NULL) {
106 perror("calloc");
107 goto cleanup_close;
108 }
109
110 newhdls = (struct pollmgr_handler **)
111 malloc(newcap * sizeof(*pollmgr.handlers));
112 if (newhdls == NULL) {
113 perror("malloc");
114 free(newfds);
115 goto cleanup_close;
116 }
117
118 pollmgr.capacity = newcap;
119 pollmgr.fds = newfds;
120 pollmgr.handlers = newhdls;
121
122 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
123
124 for (i = 0; i < pollmgr.capacity; ++i) {
125 pollmgr.fds[i].fd = INVALID_SOCKET;
126 pollmgr.fds[i].events = 0;
127 pollmgr.fds[i].revents = 0;
128 }
129
130 return 0;
131
132 cleanup_close:
133 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
134 SOCKET *chan = pollmgr.chan[i];
135 if (chan[POLLMGR_CHFD_RD] >= 0) {
136 closesocket(chan[POLLMGR_CHFD_RD]);
137 closesocket(chan[POLLMGR_CHFD_WR]);
138 }
139 }
140
141 return -1;
142}
143
144
145/*
146 * Must be called before pollmgr loop is started, so no locking.
147 */
148SOCKET
149pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
150{
151 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
152 handler->slot = -1;
153 return -1;
154 }
155
156 pollmgr_add_at(slot, handler, pollmgr.chan[slot][POLLMGR_CHFD_RD], POLLIN);
157 return pollmgr.chan[slot][POLLMGR_CHFD_WR];
158}
159
160
161/*
162 * Must be called from pollmgr loop (via callbacks), so no locking.
163 */
164int
165pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
166{
167 int slot;
168
169 DPRINTF2(("%s: new fd %d\n", __func__, fd));
170
171 if (pollmgr.nfds == pollmgr.capacity) {
172 struct pollfd *newfds;
173 struct pollmgr_handler **newhdls;
174 nfds_t newcap;
175 nfds_t i;
176
177 newcap = pollmgr.capacity * 2;
178
179 newfds = (struct pollfd *)
180 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
181 if (newfds == NULL) {
182 perror("realloc");
183 handler->slot = -1;
184 return -1;
185 }
186
187 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
188 /* but don't update capacity yet! */
189
190 newhdls = (struct pollmgr_handler **)
191 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
192 if (newhdls == NULL) {
193 perror("realloc");
194 /* if we failed to realloc here, then fds points to the
195 * new array, but we pretend we still has old capacity */
196 handler->slot = -1;
197 return -1;
198 }
199
200 pollmgr.handlers = newhdls;
201 pollmgr.capacity = newcap;
202
203 for (i = pollmgr.nfds; i < newcap; ++i) {
204 newfds[i].fd = INVALID_SOCKET;
205 newfds[i].events = 0;
206 newfds[i].revents = 0;
207 newhdls[i] = NULL;
208 }
209 }
210
211 slot = pollmgr.nfds;
212 ++pollmgr.nfds;
213
214 pollmgr_add_at(slot, handler, fd, events);
215 return slot;
216}
217
218
219static void
220pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
221{
222 pollmgr.fds[slot].fd = fd;
223 pollmgr.fds[slot].events = events;
224 pollmgr.fds[slot].revents = 0;
225 pollmgr.handlers[slot] = handler;
226
227 handler->slot = slot;
228}
229
230
231ssize_t
232pollmgr_chan_send(int slot, void *buf, size_t nbytes)
233{
234 SOCKET fd;
235 ssize_t nsent;
236
237 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
238 return -1;
239 }
240
241 fd = pollmgr.chan[slot][POLLMGR_CHFD_WR];
242 nsent = send(fd, buf, (int)nbytes, 0);
243 if (nsent == SOCKET_ERROR) {
244 warn("send on chan %d", slot);
245 return -1;
246 }
247 else if ((size_t)nsent != nbytes) {
248 warnx("send on chan %d: datagram truncated to %u bytes",
249 slot, (unsigned int)nsent);
250 return -1;
251 }
252
253 return nsent;
254}
255
256
257/**
258 * Receive a pointer sent over poll manager channel.
259 */
260void *
261pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
262{
263 void *ptr;
264 ssize_t nread;
265
266 if (revents & POLLNVAL) {
267 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
268 /* NOTREACHED */
269 }
270
271 if (revents & (POLLERR | POLLHUP)) {
272 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
273 /* NOTREACHED */
274 }
275
276 LWIP_ASSERT1(revents & POLLIN);
277 nread = recv(fd, (char *)&ptr, sizeof(ptr), 0);
278
279 if (nread == SOCKET_ERROR) {
280 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
281 /* NOTREACHED */
282 }
283 if (nread != sizeof(ptr)) {
284 errx(EXIT_FAILURE, "chan %d: recv: read %d bytes",
285 (int)handler->slot, (int)nread);
286 /* NOTREACHED */
287 }
288
289 return ptr;
290}
291
292
293void
294pollmgr_update_events(int slot, int events)
295{
296 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
297 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
298
299 pollmgr.fds[slot].events = events;
300}
301
302
303void
304pollmgr_del_slot(int slot)
305{
306 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
307
308 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
309 __func__, slot, pollmgr.fds[slot].fd));
310
311 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
312}
313
314
315void
316pollmgr_thread(void *ignored)
317{
318 LWIP_UNUSED_ARG(ignored);
319 pollmgr_loop();
320}
321
322
323static void
324pollmgr_loop(void)
325{
326 int nready;
327 SOCKET delfirst;
328 SOCKET *pdelprev;
329 int i;
330
331 for (;;) {
332#ifndef RT_OS_WINDOWS
333 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
334#else
335 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
336 if (RT_FAILURE(rc)) {
337 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
338 /* NOTREACHED*/
339 }
340#endif
341
342 DPRINTF2(("%s: ready %d fd%s\n",
343 __func__, nready, (nready == 1 ? "" : "s")));
344
345 if (nready < 0) {
346 if (errno == EINTR) {
347 continue;
348 }
349
350 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
351 /* NOTREACHED*/
352 }
353 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
354 continue; /* - but be defensive */
355 }
356
357
358 delfirst = INVALID_SOCKET;
359 pdelprev = &delfirst;
360
361 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
362 struct pollmgr_handler *handler;
363 SOCKET fd;
364 int revents, nevents;
365
366 fd = pollmgr.fds[i].fd;
367 revents = pollmgr.fds[i].revents;
368
369 /*
370 * Channel handlers can request deletion of dynamic slots
371 * by calling pollmgr_del_slot() that clobbers slot's fd.
372 */
373 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
374 /* adjust count if events were pending for that slot */
375 if (revents != 0) {
376 --nready;
377 }
378
379 /* pretend that slot handler requested deletion */
380 nevents = -1;
381 goto update_events;
382 }
383
384 if (revents == 0) {
385 continue; /* next fd */
386 }
387 --nready;
388
389 handler = pollmgr.handlers[i];
390
391 if (handler != NULL && handler->callback != NULL) {
392#if LWIP_PROXY_DEBUG /* DEBUG */
393 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
394 if (revents == POLLIN) {
395 DPRINTF2(("%s: ch %d\n", __func__, i));
396 }
397 else {
398 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
399 __func__, i, revents));
400 }
401 }
402 else {
403 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
404 __func__, fd, revents));
405 }
406#endif /* DEBUG */
407 nevents = (*handler->callback)(handler, fd, revents);
408 }
409 else {
410 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
411 if (handler == NULL) {
412 DPRINTF0(("NULL\n"));
413 }
414 else {
415 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
416 }
417 nevents = -1; /* delete it */
418 }
419
420 update_events:
421 if (nevents >= 0) {
422 if (nevents != pollmgr.fds[i].events) {
423 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
424 __func__, fd, nevents));
425 }
426 pollmgr.fds[i].events = nevents;
427 }
428 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
429 /* Don't garbage-collect channels. */
430 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
431 __func__, fd, i));
432 pollmgr.fds[i].fd = INVALID_SOCKET;
433 pollmgr.fds[i].events = 0;
434 pollmgr.fds[i].revents = 0;
435 pollmgr.handlers[i] = NULL;
436 }
437 else {
438 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
439
440 /* schedule for deletion (see g/c loop for details) */
441 *pdelprev = i; /* make previous entry point to us */
442 pdelprev = &pollmgr.fds[i].fd;
443
444 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
445 pollmgr.fds[i].events = POLLMGR_GARBAGE;
446 pollmgr.fds[i].revents = 0;
447 pollmgr.handlers[i] = NULL;
448 }
449 } /* processing loop */
450
451
452 /*
453 * Garbage collect and compact the array.
454 *
455 * We overload pollfd::fd of garbage entries to store the
456 * index of the next garbage entry. The garbage list is
457 * co-directional with the fds array. The index of the first
458 * entry is in "delfirst", the last entry "points to"
459 * INVALID_SOCKET.
460 *
461 * See update_events code for nevents < 0 at the end of the
462 * processing loop above.
463 */
464 while (delfirst != INVALID_SOCKET) {
465 const int last = pollmgr.nfds - 1;
466
467 /*
468 * We want a live entry in the last slot to swap into the
469 * freed slot, so make sure we have one.
470 */
471 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
472 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
473 {
474 /* drop garbage entry at the end of the array */
475 --pollmgr.nfds;
476
477 if (delfirst == last) {
478 /* congruent to delnext >= pollmgr.nfds test below */
479 delfirst = INVALID_SOCKET; /* done */
480 }
481 }
482 else {
483 const SOCKET delnext = pollmgr.fds[delfirst].fd;
484
485 /* copy live entry at the end to the first slot being freed */
486 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
487 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
488 pollmgr.handlers[delfirst]->slot = (int)delfirst;
489 --pollmgr.nfds;
490
491 if ((nfds_t)delnext >= pollmgr.nfds) {
492 delfirst = INVALID_SOCKET; /* done */
493 }
494 else {
495 delfirst = delnext;
496 }
497 }
498
499 pollmgr.fds[last].fd = INVALID_SOCKET;
500 pollmgr.fds[last].events = 0;
501 pollmgr.fds[last].revents = 0;
502 pollmgr.handlers[last] = NULL;
503 }
504 } /* poll loop */
505}
506
507
508/**
509 * Create strongly held refptr.
510 */
511struct pollmgr_refptr *
512pollmgr_refptr_create(struct pollmgr_handler *ptr)
513{
514 struct pollmgr_refptr *rp;
515
516 LWIP_ASSERT1(ptr != NULL);
517
518 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
519 if (rp == NULL) {
520 return NULL;
521 }
522
523 sys_mutex_new(&rp->lock);
524 rp->ptr = ptr;
525 rp->strong = 1;
526 rp->weak = 0;
527
528 return rp;
529}
530
531
532static void
533pollmgr_refptr_delete(struct pollmgr_refptr *rp)
534{
535 if (rp == NULL) {
536 return;
537 }
538
539 LWIP_ASSERT1(rp->strong == 0);
540 LWIP_ASSERT1(rp->weak == 0);
541
542 sys_mutex_free(&rp->lock);
543 free(rp);
544}
545
546
547/**
548 * Add weak reference before "rp" is sent over a poll manager channel.
549 */
550void
551pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
552{
553 sys_mutex_lock(&rp->lock);
554
555 LWIP_ASSERT1(rp->ptr != NULL);
556 LWIP_ASSERT1(rp->strong > 0);
557
558 ++rp->weak;
559
560 sys_mutex_unlock(&rp->lock);
561}
562
563
564/**
565 * Try to get the pointer from implicitely weak reference we've got
566 * from a channel.
567 *
568 * If we detect that the object is still strongly referenced, but no
569 * longer registered with the poll manager we abort strengthening
570 * conversion here b/c lwip thread callback is already scheduled to
571 * destruct the object.
572 */
573struct pollmgr_handler *
574pollmgr_refptr_get(struct pollmgr_refptr *rp)
575{
576 struct pollmgr_handler *handler;
577 size_t weak;
578
579 sys_mutex_lock(&rp->lock);
580
581 LWIP_ASSERT1(rp->weak > 0);
582 weak = --rp->weak;
583
584 handler = rp->ptr;
585 if (handler == NULL) {
586 LWIP_ASSERT1(rp->strong == 0);
587 sys_mutex_unlock(&rp->lock);
588 if (weak == 0) {
589 pollmgr_refptr_delete(rp);
590 }
591 return NULL;
592 }
593
594 LWIP_ASSERT1(rp->strong == 1);
595
596 /*
597 * Here we woild do:
598 *
599 * ++rp->strong;
600 *
601 * and then, after channel handler is done, we would decrement it
602 * back.
603 *
604 * Instead we check that the object is still registered with poll
605 * manager. If it is, there's no race with lwip thread trying to
606 * drop its strong reference, as lwip thread callback to destruct
607 * the object is always scheduled by its poll manager callback.
608 *
609 * Conversly, if we detect that the object is no longer registered
610 * with poll manager, we immediately abort. Since channel handler
611 * can't do anything useful anyway and would have to return
612 * immediately.
613 *
614 * Since channel handler would always find rp->strong as it had
615 * left it, just elide extra strong reference creation to avoid
616 * the whole back-and-forth.
617 */
618
619 if (handler->slot < 0) { /* no longer polling */
620 sys_mutex_unlock(&rp->lock);
621 return NULL;
622 }
623
624 sys_mutex_unlock(&rp->lock);
625 return handler;
626}
627
628
629/**
630 * Remove (the only) strong reference.
631 *
632 * If it were real strong/weak pointers, we should also call
633 * destructor for the referenced object, but
634 */
635void
636pollmgr_refptr_unref(struct pollmgr_refptr *rp)
637{
638 sys_mutex_lock(&rp->lock);
639
640 LWIP_ASSERT1(rp->strong == 1);
641 --rp->strong;
642
643 if (rp->strong > 0) {
644 sys_mutex_unlock(&rp->lock);
645 }
646 else {
647 size_t weak;
648
649 /* void *ptr = rp->ptr; */
650 rp->ptr = NULL;
651
652 /* delete ptr; // see doc comment */
653
654 weak = rp->weak;
655 sys_mutex_unlock(&rp->lock);
656 if (weak == 0) {
657 pollmgr_refptr_delete(rp);
658 }
659 }
660}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette