VirtualBox

source: vbox/trunk/src/VBox/NetworkServices/NAT/proxy_pollmgr.c@ 52493

最後變更 在這個檔案從52493是 51597,由 vboxsync 提交於 11 年 前

NAT/Net: convert perror/warn/warnx to DPRINTFs.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 17.2 KB
 
1/* -*- indent-tabs-mode: nil; -*- */
2#define LOG_GROUP LOG_GROUP_NAT_SERVICE
3
4#include "winutils.h"
5
6#include "proxy_pollmgr.h"
7#include "proxy.h"
8
9#ifndef RT_OS_WINDOWS
10#include <sys/socket.h>
11#include <netinet/in.h>
12#include <err.h>
13#include <errno.h>
14#include <poll.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <time.h>
19#include <unistd.h>
20#else
21#include <iprt/err.h>
22#include <stdlib.h>
23#include <string.h>
24#include "winpoll.h"
25#endif
26
27#define POLLMGR_GARBAGE (-1)
28
29struct pollmgr {
30 struct pollfd *fds;
31 struct pollmgr_handler **handlers;
32 nfds_t capacity; /* allocated size of the arrays */
33 nfds_t nfds; /* part of the arrays in use */
34
35 /* channels (socketpair) for static slots */
36 SOCKET chan[POLLMGR_SLOT_STATIC_COUNT][2];
37#define POLLMGR_CHFD_RD 0 /* - pollmgr side */
38#define POLLMGR_CHFD_WR 1 /* - client side */
39} pollmgr;
40
41
42static void pollmgr_loop(void);
43
44static void pollmgr_add_at(int, struct pollmgr_handler *, SOCKET, int);
45static void pollmgr_refptr_delete(struct pollmgr_refptr *);
46
47
48/*
49 * We cannot portably peek at the length of the incoming datagram and
50 * pre-allocate pbuf chain to recvmsg() directly to it. On Linux it's
51 * possible to recv with MSG_PEEK|MSG_TRUC, but extra syscall is
52 * probably more expensive (haven't measured) than doing an extra copy
53 * of data, since typical UDP datagrams are small enough to avoid
54 * fragmentation.
55 *
56 * We can use shared buffer here since we read from sockets
57 * sequentially in a loop over pollfd.
58 */
59u8_t pollmgr_udpbuf[64 * 1024];
60
61
62int
63pollmgr_init(void)
64{
65 struct pollfd *newfds;
66 struct pollmgr_handler **newhdls;
67 nfds_t newcap;
68 int status;
69 nfds_t i;
70
71 pollmgr.fds = NULL;
72 pollmgr.handlers = NULL;
73 pollmgr.capacity = 0;
74 pollmgr.nfds = 0;
75
76 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
77 pollmgr.chan[i][POLLMGR_CHFD_RD] = -1;
78 pollmgr.chan[i][POLLMGR_CHFD_WR] = -1;
79 }
80
81 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
82#ifndef RT_OS_WINDOWS
83 status = socketpair(PF_LOCAL, SOCK_DGRAM, 0, pollmgr.chan[i]);
84 if (status < 0) {
85 DPRINTF(("socketpair: %R[sockerr]\n", SOCKERRNO()));
86 goto cleanup_close;
87 }
88#else
89 status = RTWinSocketPair(PF_INET, SOCK_DGRAM, 0, pollmgr.chan[i]);
90 if (RT_FAILURE(status)) {
91 goto cleanup_close;
92 }
93#endif
94 }
95
96
97 newcap = 16; /* XXX: magic */
98 LWIP_ASSERT1(newcap >= POLLMGR_SLOT_STATIC_COUNT);
99
100 newfds = (struct pollfd *)
101 malloc(newcap * sizeof(*pollmgr.fds));
102 if (newfds == NULL) {
103 DPRINTF(("%s: Failed to allocate fds array\n", __func__));
104 goto cleanup_close;
105 }
106
107 newhdls = (struct pollmgr_handler **)
108 malloc(newcap * sizeof(*pollmgr.handlers));
109 if (newhdls == NULL) {
110 DPRINTF(("%s: Failed to allocate handlers array\n", __func__));
111 free(newfds);
112 goto cleanup_close;
113 }
114
115 pollmgr.capacity = newcap;
116 pollmgr.fds = newfds;
117 pollmgr.handlers = newhdls;
118
119 pollmgr.nfds = POLLMGR_SLOT_STATIC_COUNT;
120
121 for (i = 0; i < pollmgr.capacity; ++i) {
122 pollmgr.fds[i].fd = INVALID_SOCKET;
123 pollmgr.fds[i].events = 0;
124 pollmgr.fds[i].revents = 0;
125 }
126
127 return 0;
128
129 cleanup_close:
130 for (i = 0; i < POLLMGR_SLOT_STATIC_COUNT; ++i) {
131 SOCKET *chan = pollmgr.chan[i];
132 if (chan[POLLMGR_CHFD_RD] >= 0) {
133 closesocket(chan[POLLMGR_CHFD_RD]);
134 closesocket(chan[POLLMGR_CHFD_WR]);
135 }
136 }
137
138 return -1;
139}
140
141
142/*
143 * Must be called before pollmgr loop is started, so no locking.
144 */
145SOCKET
146pollmgr_add_chan(int slot, struct pollmgr_handler *handler)
147{
148 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
149 handler->slot = -1;
150 return -1;
151 }
152
153 pollmgr_add_at(slot, handler, pollmgr.chan[slot][POLLMGR_CHFD_RD], POLLIN);
154 return pollmgr.chan[slot][POLLMGR_CHFD_WR];
155}
156
157
158/*
159 * Must be called from pollmgr loop (via callbacks), so no locking.
160 */
161int
162pollmgr_add(struct pollmgr_handler *handler, SOCKET fd, int events)
163{
164 int slot;
165
166 DPRINTF2(("%s: new fd %d\n", __func__, fd));
167
168 if (pollmgr.nfds == pollmgr.capacity) {
169 struct pollfd *newfds;
170 struct pollmgr_handler **newhdls;
171 nfds_t newcap;
172 nfds_t i;
173
174 newcap = pollmgr.capacity * 2;
175
176 newfds = (struct pollfd *)
177 realloc(pollmgr.fds, newcap * sizeof(*pollmgr.fds));
178 if (newfds == NULL) {
179 DPRINTF(("%s: Failed to reallocate fds array\n", __func__));
180 handler->slot = -1;
181 return -1;
182 }
183
184 pollmgr.fds = newfds; /* don't crash/leak if realloc(handlers) fails */
185 /* but don't update capacity yet! */
186
187 newhdls = (struct pollmgr_handler **)
188 realloc(pollmgr.handlers, newcap * sizeof(*pollmgr.handlers));
189 if (newhdls == NULL) {
190 DPRINTF(("%s: Failed to reallocate handlers array\n", __func__));
191 /* if we failed to realloc here, then fds points to the
192 * new array, but we pretend we still has old capacity */
193 handler->slot = -1;
194 return -1;
195 }
196
197 pollmgr.handlers = newhdls;
198 pollmgr.capacity = newcap;
199
200 for (i = pollmgr.nfds; i < newcap; ++i) {
201 newfds[i].fd = INVALID_SOCKET;
202 newfds[i].events = 0;
203 newfds[i].revents = 0;
204 newhdls[i] = NULL;
205 }
206 }
207
208 slot = pollmgr.nfds;
209 ++pollmgr.nfds;
210
211 pollmgr_add_at(slot, handler, fd, events);
212 return slot;
213}
214
215
216static void
217pollmgr_add_at(int slot, struct pollmgr_handler *handler, SOCKET fd, int events)
218{
219 pollmgr.fds[slot].fd = fd;
220 pollmgr.fds[slot].events = events;
221 pollmgr.fds[slot].revents = 0;
222 pollmgr.handlers[slot] = handler;
223
224 handler->slot = slot;
225}
226
227
228ssize_t
229pollmgr_chan_send(int slot, void *buf, size_t nbytes)
230{
231 SOCKET fd;
232 ssize_t nsent;
233
234 if (slot >= POLLMGR_SLOT_FIRST_DYNAMIC) {
235 return -1;
236 }
237
238 fd = pollmgr.chan[slot][POLLMGR_CHFD_WR];
239 nsent = send(fd, buf, (int)nbytes, 0);
240 if (nsent == SOCKET_ERROR) {
241 DPRINTF(("send on chan %d: %R[sockerr]\n", slot, SOCKERRNO()));
242 return -1;
243 }
244 else if ((size_t)nsent != nbytes) {
245 DPRINTF(("send on chan %d: datagram truncated to %u bytes",
246 slot, (unsigned int)nsent));
247 return -1;
248 }
249
250 return nsent;
251}
252
253
254/**
255 * Receive a pointer sent over poll manager channel.
256 */
257void *
258pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents)
259{
260 void *ptr;
261 ssize_t nread;
262
263 if (revents & POLLNVAL) {
264 errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot);
265 /* NOTREACHED */
266 }
267
268 if (revents & (POLLERR | POLLHUP)) {
269 errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot);
270 /* NOTREACHED */
271 }
272
273 LWIP_ASSERT1(revents & POLLIN);
274 nread = recv(fd, (char *)&ptr, sizeof(ptr), 0);
275
276 if (nread == SOCKET_ERROR) {
277 err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot);
278 /* NOTREACHED */
279 }
280 if (nread != sizeof(ptr)) {
281 errx(EXIT_FAILURE, "chan %d: recv: read %d bytes",
282 (int)handler->slot, (int)nread);
283 /* NOTREACHED */
284 }
285
286 return ptr;
287}
288
289
290void
291pollmgr_update_events(int slot, int events)
292{
293 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
294 LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds);
295
296 pollmgr.fds[slot].events = events;
297}
298
299
300void
301pollmgr_del_slot(int slot)
302{
303 LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC);
304
305 DPRINTF2(("%s(%d): fd %d ! DELETED\n",
306 __func__, slot, pollmgr.fds[slot].fd));
307
308 pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */
309}
310
311
312void
313pollmgr_thread(void *ignored)
314{
315 LWIP_UNUSED_ARG(ignored);
316 pollmgr_loop();
317}
318
319
320static void
321pollmgr_loop(void)
322{
323 int nready;
324 SOCKET delfirst;
325 SOCKET *pdelprev;
326 int i;
327
328 for (;;) {
329#ifndef RT_OS_WINDOWS
330 nready = poll(pollmgr.fds, pollmgr.nfds, -1);
331#else
332 int rc = RTWinPoll(pollmgr.fds, pollmgr.nfds,RT_INDEFINITE_WAIT, &nready);
333 if (RT_FAILURE(rc)) {
334 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
335 /* NOTREACHED*/
336 }
337#endif
338
339 DPRINTF2(("%s: ready %d fd%s\n",
340 __func__, nready, (nready == 1 ? "" : "s")));
341
342 if (nready < 0) {
343 if (errno == EINTR) {
344 continue;
345 }
346
347 err(EXIT_FAILURE, "poll"); /* XXX: what to do on error? */
348 /* NOTREACHED*/
349 }
350 else if (nready == 0) { /* cannot happen, we wait forever (-1) */
351 continue; /* - but be defensive */
352 }
353
354
355 delfirst = INVALID_SOCKET;
356 pdelprev = &delfirst;
357
358 for (i = 0; (nfds_t)i < pollmgr.nfds && nready > 0; ++i) {
359 struct pollmgr_handler *handler;
360 SOCKET fd;
361 int revents, nevents;
362
363 fd = pollmgr.fds[i].fd;
364 revents = pollmgr.fds[i].revents;
365
366 /*
367 * Channel handlers can request deletion of dynamic slots
368 * by calling pollmgr_del_slot() that clobbers slot's fd.
369 */
370 if (fd == INVALID_SOCKET && i >= POLLMGR_SLOT_FIRST_DYNAMIC) {
371 /* adjust count if events were pending for that slot */
372 if (revents != 0) {
373 --nready;
374 }
375
376 /* pretend that slot handler requested deletion */
377 nevents = -1;
378 goto update_events;
379 }
380
381 if (revents == 0) {
382 continue; /* next fd */
383 }
384 --nready;
385
386 handler = pollmgr.handlers[i];
387
388 if (handler != NULL && handler->callback != NULL) {
389#if LWIP_PROXY_DEBUG /* DEBUG */
390 if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
391 if (revents == POLLIN) {
392 DPRINTF2(("%s: ch %d\n", __func__, i));
393 }
394 else {
395 DPRINTF2(("%s: ch %d @ revents 0x%x!\n",
396 __func__, i, revents));
397 }
398 }
399 else {
400 DPRINTF2(("%s: fd %d @ revents 0x%x\n",
401 __func__, fd, revents));
402 }
403#endif /* DEBUG */
404 nevents = (*handler->callback)(handler, fd, revents);
405 }
406 else {
407 DPRINTF0(("%s: invalid handler for fd %d: ", __func__, fd));
408 if (handler == NULL) {
409 DPRINTF0(("NULL\n"));
410 }
411 else {
412 DPRINTF0(("%p (callback = NULL)\n", (void *)handler));
413 }
414 nevents = -1; /* delete it */
415 }
416
417 update_events:
418 if (nevents >= 0) {
419 if (nevents != pollmgr.fds[i].events) {
420 DPRINTF2(("%s: fd %d ! nevents 0x%x\n",
421 __func__, fd, nevents));
422 }
423 pollmgr.fds[i].events = nevents;
424 }
425 else if (i < POLLMGR_SLOT_FIRST_DYNAMIC) {
426 /* Don't garbage-collect channels. */
427 DPRINTF2(("%s: fd %d ! DELETED (channel %d)\n",
428 __func__, fd, i));
429 pollmgr.fds[i].fd = INVALID_SOCKET;
430 pollmgr.fds[i].events = 0;
431 pollmgr.fds[i].revents = 0;
432 pollmgr.handlers[i] = NULL;
433 }
434 else {
435 DPRINTF2(("%s: fd %d ! DELETED\n", __func__, fd));
436
437 /* schedule for deletion (see g/c loop for details) */
438 *pdelprev = i; /* make previous entry point to us */
439 pdelprev = &pollmgr.fds[i].fd;
440
441 pollmgr.fds[i].fd = INVALID_SOCKET; /* end of list (for now) */
442 pollmgr.fds[i].events = POLLMGR_GARBAGE;
443 pollmgr.fds[i].revents = 0;
444 pollmgr.handlers[i] = NULL;
445 }
446 } /* processing loop */
447
448
449 /*
450 * Garbage collect and compact the array.
451 *
452 * We overload pollfd::fd of garbage entries to store the
453 * index of the next garbage entry. The garbage list is
454 * co-directional with the fds array. The index of the first
455 * entry is in "delfirst", the last entry "points to"
456 * INVALID_SOCKET.
457 *
458 * See update_events code for nevents < 0 at the end of the
459 * processing loop above.
460 */
461 while (delfirst != INVALID_SOCKET) {
462 const int last = pollmgr.nfds - 1;
463
464 /*
465 * We want a live entry in the last slot to swap into the
466 * freed slot, so make sure we have one.
467 */
468 if (pollmgr.fds[last].events == POLLMGR_GARBAGE /* garbage */
469 || pollmgr.fds[last].fd == INVALID_SOCKET) /* or killed */
470 {
471 /* drop garbage entry at the end of the array */
472 --pollmgr.nfds;
473
474 if (delfirst == last) {
475 /* congruent to delnext >= pollmgr.nfds test below */
476 delfirst = INVALID_SOCKET; /* done */
477 }
478 }
479 else {
480 const SOCKET delnext = pollmgr.fds[delfirst].fd;
481
482 /* copy live entry at the end to the first slot being freed */
483 pollmgr.fds[delfirst] = pollmgr.fds[last]; /* struct copy */
484 pollmgr.handlers[delfirst] = pollmgr.handlers[last];
485 pollmgr.handlers[delfirst]->slot = (int)delfirst;
486 --pollmgr.nfds;
487
488 if ((nfds_t)delnext >= pollmgr.nfds) {
489 delfirst = INVALID_SOCKET; /* done */
490 }
491 else {
492 delfirst = delnext;
493 }
494 }
495
496 pollmgr.fds[last].fd = INVALID_SOCKET;
497 pollmgr.fds[last].events = 0;
498 pollmgr.fds[last].revents = 0;
499 pollmgr.handlers[last] = NULL;
500 }
501 } /* poll loop */
502}
503
504
505/**
506 * Create strongly held refptr.
507 */
508struct pollmgr_refptr *
509pollmgr_refptr_create(struct pollmgr_handler *ptr)
510{
511 struct pollmgr_refptr *rp;
512
513 LWIP_ASSERT1(ptr != NULL);
514
515 rp = (struct pollmgr_refptr *)malloc(sizeof (*rp));
516 if (rp == NULL) {
517 return NULL;
518 }
519
520 sys_mutex_new(&rp->lock);
521 rp->ptr = ptr;
522 rp->strong = 1;
523 rp->weak = 0;
524
525 return rp;
526}
527
528
529static void
530pollmgr_refptr_delete(struct pollmgr_refptr *rp)
531{
532 if (rp == NULL) {
533 return;
534 }
535
536 LWIP_ASSERT1(rp->strong == 0);
537 LWIP_ASSERT1(rp->weak == 0);
538
539 sys_mutex_free(&rp->lock);
540 free(rp);
541}
542
543
544/**
545 * Add weak reference before "rp" is sent over a poll manager channel.
546 */
547void
548pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp)
549{
550 sys_mutex_lock(&rp->lock);
551
552 LWIP_ASSERT1(rp->ptr != NULL);
553 LWIP_ASSERT1(rp->strong > 0);
554
555 ++rp->weak;
556
557 sys_mutex_unlock(&rp->lock);
558}
559
560
561/**
562 * Try to get the pointer from implicitely weak reference we've got
563 * from a channel.
564 *
565 * If we detect that the object is still strongly referenced, but no
566 * longer registered with the poll manager we abort strengthening
567 * conversion here b/c lwip thread callback is already scheduled to
568 * destruct the object.
569 */
570struct pollmgr_handler *
571pollmgr_refptr_get(struct pollmgr_refptr *rp)
572{
573 struct pollmgr_handler *handler;
574 size_t weak;
575
576 sys_mutex_lock(&rp->lock);
577
578 LWIP_ASSERT1(rp->weak > 0);
579 weak = --rp->weak;
580
581 handler = rp->ptr;
582 if (handler == NULL) {
583 LWIP_ASSERT1(rp->strong == 0);
584 sys_mutex_unlock(&rp->lock);
585 if (weak == 0) {
586 pollmgr_refptr_delete(rp);
587 }
588 return NULL;
589 }
590
591 LWIP_ASSERT1(rp->strong == 1);
592
593 /*
594 * Here we woild do:
595 *
596 * ++rp->strong;
597 *
598 * and then, after channel handler is done, we would decrement it
599 * back.
600 *
601 * Instead we check that the object is still registered with poll
602 * manager. If it is, there's no race with lwip thread trying to
603 * drop its strong reference, as lwip thread callback to destruct
604 * the object is always scheduled by its poll manager callback.
605 *
606 * Conversly, if we detect that the object is no longer registered
607 * with poll manager, we immediately abort. Since channel handler
608 * can't do anything useful anyway and would have to return
609 * immediately.
610 *
611 * Since channel handler would always find rp->strong as it had
612 * left it, just elide extra strong reference creation to avoid
613 * the whole back-and-forth.
614 */
615
616 if (handler->slot < 0) { /* no longer polling */
617 sys_mutex_unlock(&rp->lock);
618 return NULL;
619 }
620
621 sys_mutex_unlock(&rp->lock);
622 return handler;
623}
624
625
626/**
627 * Remove (the only) strong reference.
628 *
629 * If it were real strong/weak pointers, we should also call
630 * destructor for the referenced object, but
631 */
632void
633pollmgr_refptr_unref(struct pollmgr_refptr *rp)
634{
635 sys_mutex_lock(&rp->lock);
636
637 LWIP_ASSERT1(rp->strong == 1);
638 --rp->strong;
639
640 if (rp->strong > 0) {
641 sys_mutex_unlock(&rp->lock);
642 }
643 else {
644 size_t weak;
645
646 /* void *ptr = rp->ptr; */
647 rp->ptr = NULL;
648
649 /* delete ptr; // see doc comment */
650
651 weak = rp->weak;
652 sys_mutex_unlock(&rp->lock);
653 if (weak == 0) {
654 pollmgr_refptr_delete(rp);
655 }
656 }
657}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette