VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 14665

最後變更 在這個檔案從14665是 14551,由 vboxsync 提交於 16 年 前

Linux hostif: Pre-2.6.10 kernel fix.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 33.3 KB
 
1/* $Id: VBoxNetFlt-linux.c 14551 2008-11-24 22:36:17Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * Sun Microsystems, Inc. confidential
10 * All rights reserved
11 */
12
13/*******************************************************************************
14* Header Files *
15*******************************************************************************/
16#include "the-linux-kernel.h"
17#include "version-generated.h"
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/rtnetlink.h>
21
22#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
23#include <VBox/log.h>
24#include <VBox/err.h>
25#include <iprt/alloca.h>
26#include <iprt/assert.h>
27#include <iprt/spinlock.h>
28#include <iprt/semaphore.h>
29#include <iprt/initterm.h>
30#include <iprt/process.h>
31#include <iprt/mem.h>
32#include <iprt/log.h>
33#include <iprt/mp.h>
34#include <iprt/mem.h>
35#include <iprt/time.h>
36
37#define VBOXNETFLT_OS_SPECFIC 1
38#include "../VBoxNetFltInternal.h"
39
40#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
41 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
42#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
43 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
44#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
45 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
46
47#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
48
49#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
50# define VBOX_SKB_TRANSPORT_HDR(skb) skb->transport_header
51# define VBOX_SKB_NETWORK_HDR(skb) skb->network_header
52# define VBOX_SKB_MAC_HDR(skb) skb->mac_header
53#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
54# define VBOX_SKB_TRANSPORT_HDR(skb) skb->h.raw
55# define VBOX_SKB_NETWORK_HDR(skb) skb->nh.raw
56# define VBOX_SKB_MAC_HDR(skb) skb->mac.raw
57#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
58
59#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
60# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
61#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
62# define CHECKSUM_PARTIAL CHECKSUM_HW
63# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
64# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
65# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
66# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
67# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
68#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
69
70#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
71# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
72 /* No features, very dumb device */
73# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
74#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
75# define VBOX_SKB_IS_GSO(skb) false
76# define VBOX_SKB_GSO_SEGMENT(skb) NULL
77#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82static int VBoxNetFltLinuxInit(void);
83static void VBoxNetFltLinuxUnload(void);
84
85
86/*******************************************************************************
87* Global Variables *
88*******************************************************************************/
89/**
90 * The (common) global data.
91 */
92static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
93
94module_init(VBoxNetFltLinuxInit);
95module_exit(VBoxNetFltLinuxUnload);
96
97MODULE_AUTHOR("Sun Microsystems, Inc.");
98MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
99MODULE_LICENSE("GPL");
100#ifdef MODULE_VERSION
101# define xstr(s) str(s)
102# define str(s) #s
103MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
104#endif
105
106/**
107 * The (common) global data.
108 */
109static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
110
111
112/**
113 * Initialize module.
114 *
115 * @returns appropriate status code.
116 */
117static int __init VBoxNetFltLinuxInit(void)
118{
119 int rc;
120 Log(("VBoxNetFltLinuxInit\n"));
121
122 /*
123 * Initialize IPRT.
124 */
125 rc = RTR0Init(0);
126 if (RT_SUCCESS(rc))
127 {
128 /*
129 * Initialize the globals and connect to the support driver.
130 *
131 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
132 * for establishing the connect to the support driver.
133 */
134 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
135 rc = vboxNetFltInitGlobals(&g_VBoxNetFltGlobals);
136 if (RT_SUCCESS(rc))
137 {
138 LogRel(("VBoxNetFlt: Successfully started.\n"));
139 return 0;
140 }
141
142 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
143 RTR0Term();
144 }
145 else
146 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
147
148 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
149 return RTErrConvertToErrno(rc);
150}
151
152
153/**
154 * Unload the module.
155 *
156 * @todo We have to prevent this if we're busy!
157 */
158static void __exit VBoxNetFltLinuxUnload(void)
159{
160 int rc;
161 Log(("VBoxNetFltLinuxUnload\n"));
162 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
163
164 /*
165 * Undo the work done during start (in reverse order).
166 */
167 rc = vboxNetFltTryDeleteGlobals(&g_VBoxNetFltGlobals);
168 AssertRC(rc); NOREF(rc);
169
170 RTR0Term();
171
172 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
173
174 Log(("VBoxNetFltLinuxUnload - done\n"));
175}
176
177
178/**
179 * Reads and retains the host interface handle.
180 *
181 * @returns The handle, NULL if detached.
182 * @param pThis
183 */
184DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
185{
186#if 0
187 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
188 struct net_device *pDev = NULL;
189
190 Log(("vboxNetFltLinuxRetainNetDev\n"));
191 /*
192 * Be careful here to avoid problems racing the detached callback.
193 */
194 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
195 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
196 {
197 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
198 if (pDev)
199 {
200 dev_hold(pDev);
201 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
202 }
203 }
204 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
205
206 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
207 return pDev;
208#else
209 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
210#endif
211}
212
213
214/**
215 * Release the host interface handle previously retained
216 * by vboxNetFltLinuxRetainNetDev.
217 *
218 * @param pThis The instance.
219 * @param pDev The vboxNetFltLinuxRetainNetDev
220 * return value, NULL is fine.
221 */
222DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
223{
224#if 0
225 Log(("vboxNetFltLinuxReleaseNetDev\n"));
226 NOREF(pThis);
227 if (pDev)
228 {
229 dev_put(pDev);
230 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
231 }
232 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
233#endif
234}
235
236#define VBOXNETFLT_CB_TAG 0xA1C9D7C3
237#define VBOXNETFLT_SKB_CB(skb) (*(uint32_t*)&((skb)->cb[0]))
238
239/**
240 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
241 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
242 *
243 * @returns true / false accordingly.
244 * @param pBuf The sk_buff.
245 */
246DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
247{
248 return VBOXNETFLT_SKB_CB(pBuf) == VBOXNETFLT_CB_TAG ;
249}
250
251
252/**
253 * Internal worker that create a linux sk_buff for a
254 * (scatter/)gather list.
255 *
256 * @returns Pointer to the sk_buff.
257 * @param pThis The instance.
258 * @param pSG The (scatter/)gather list.
259 */
260static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
261{
262 struct sk_buff *pPkt;
263 struct net_device *pDev;
264 /*
265 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
266 * will only contain one single segment.
267 */
268 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
269 {
270 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
271 return NULL;
272 }
273 if (pSG->cbTotal == 0)
274 {
275 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
276 return NULL;
277 }
278
279 /*
280 * Allocate a packet and copy over the data.
281 *
282 */
283 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
284 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
285 if (pPkt)
286 {
287 pPkt->dev = pDev;
288 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
289 skb_reserve(pPkt, NET_IP_ALIGN);
290 skb_put(pPkt, pSG->cbTotal);
291 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
292 /* Set protocol and packet_type fields. */
293 pPkt->protocol = eth_type_trans(pPkt, pDev);
294 pPkt->ip_summed = CHECKSUM_NONE;
295 if (fDstWire)
296 {
297 VBOX_SKB_NETWORK_HDR(pPkt) = pPkt->data;
298 /* Restore ethernet header back. */
299 skb_push(pPkt, ETH_HLEN);
300 }
301 VBOXNETFLT_SKB_CB(pPkt) = VBOXNETFLT_CB_TAG;
302
303 return pPkt;
304 }
305 else
306 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
307 pSG->pvUserData = NULL;
308
309 return NULL;
310}
311
312
313/**
314 * Initializes a SG list from an sk_buff.
315 *
316 * @returns Number of segments.
317 * @param pThis The instance.
318 * @param pBuf The sk_buff.
319 * @param pSG The SG.
320 * @param pvFrame The frame pointer, optional.
321 * @param cSegs The number of segments allocated for the SG.
322 * This should match the number in the mbuf exactly!
323 * @param fSrc The source of the frame.
324 */
325DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
326{
327 int i;
328 NOREF(pThis);
329
330 Assert(!skb_shinfo(pBuf)->frag_list);
331 pSG->pvOwnerData = NULL;
332 pSG->pvUserData = NULL;
333 pSG->pvUserData2 = NULL;
334 pSG->cUsers = 1;
335 pSG->fFlags = INTNETSG_FLAGS_TEMP;
336 pSG->cSegsAlloc = cSegs;
337
338 if (fSrc & INTNETTRUNKDIR_WIRE)
339 {
340 /*
341 * The packet came from wire, ethernet header was removed by device driver.
342 * Restore it.
343 */
344 skb_push(pBuf, ETH_HLEN);
345 }
346 pSG->cbTotal = pBuf->len;
347#ifdef VBOXNETFLT_SG_SUPPORT
348 pSG->aSegs[0].cb = skb_headlen(pBuf);
349 pSG->aSegs[0].pv = pBuf->data;
350 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
351
352 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
353 {
354 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
355 pSG->aSegs[i+1].cb = pFrag->size;
356 pSG->aSegs[i+1].pv = kmap(pFrag->page);
357 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
358 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
359 }
360 pSG->cSegsUsed = ++i;
361#else
362 pSG->aSegs[0].cb = pBuf->len;
363 pSG->aSegs[0].pv = pBuf->data;
364 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
365 pSG->cSegsUsed = i = 1;
366#endif
367
368
369#ifdef PADD_RUNT_FRAMES_FROM_HOST
370 /*
371 * Add a trailer if the frame is too small.
372 *
373 * Since we're getting to the packet before it is framed, it has not
374 * yet been padded. The current solution is to add a segment pointing
375 * to a buffer containing all zeros and pray that works for all frames...
376 */
377 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
378 {
379 static uint8_t const s_abZero[128] = {0};
380
381 AssertReturnVoid(i < cSegs);
382
383 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
384 pSG->aSegs[i].pv = (void *)&s_abZero[0];
385 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
386 pSG->cbTotal = 60;
387 pSG->cSegsUsed++;
388 }
389#endif
390 Log2(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
391 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
392 for (i = 0; i < pSG->cSegsUsed; i++)
393 Log2(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
394 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
395}
396
397/**
398 * Packet handler,
399 *
400 * @returns 0 or EJUSTRETURN.
401 * @param pThis The instance.
402 * @param pMBuf The mbuf.
403 * @param pvFrame The start of the frame, optional.
404 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
405 * @param eProtocol The protocol.
406 */
407static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
408 struct net_device *pSkbDev,
409 struct packet_type *pPacketType,
410 struct net_device *pOrigDev)
411{
412 PVBOXNETFLTINS pThis;
413 struct net_device *pDev;
414 /*
415 * Drop it immediately?
416 */
417 Log2(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p pOrigDev=%p\n",
418 pBuf, pSkbDev, pPacketType, pOrigDev));
419 if (!pBuf)
420 return 0;
421 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
422 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
423 if (pThis->u.s.pDev != pSkbDev)
424 {
425 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
426 return 0;
427 }
428
429 if (vboxNetFltLinuxSkBufIsOur(pBuf))
430 {
431 dev_kfree_skb(pBuf);
432 return 0;
433 }
434
435 /* Add the packet to transmit queue and schedule the bottom half. */
436 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
437 schedule_work(&pThis->u.s.XmitTask);
438 Log2(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
439 &pThis->u.s.XmitTask, pBuf));
440 /* It does not really matter what we return, it is ignored by the kernel. */
441 return 0;
442}
443
444static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
445{
446#ifdef VBOXNETFLT_SG_SUPPORT
447 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
448#else
449 unsigned cSegs = 1;
450#endif
451#ifdef PADD_RUNT_FRAMES_FROM_HOST
452 /*
453 * Add a trailer if the frame is too small.
454 */
455 if (pBuf->len < 60)
456 cSegs++;
457#endif
458 return cSegs;
459}
460
461/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
462static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
463{
464#ifdef VBOXNETFLT_SG_SUPPORT
465 int i;
466
467 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
468 {
469 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
470 kunmap(pSG->aSegs[i+1].pv);
471 }
472#endif
473
474 dev_kfree_skb(pBuf);
475}
476
477static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
478{
479 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
480 if (cSegs < MAX_SKB_FRAGS)
481 {
482 uint8_t *pTmp;
483 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
484 if (!pSG)
485 {
486 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
487 return VERR_NO_MEMORY;
488 }
489 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
490
491 pTmp = pSG->aSegs[0].pv;
492 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
493 " <-- (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
494 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
495 (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire",
496 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
497 pSG->cbTotal));
498 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
499 Log2(("VBoxNetFlt: Dropping the sk_buff.\n"));
500 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
501 }
502
503 return VINF_SUCCESS;
504}
505
506static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
507{
508 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
509
510#ifndef VBOXNETFLT_SG_SUPPORT
511 /*
512 * Get rid of fragmented packets, they cause too much trouble.
513 */
514 struct sk_buff *pCopy = skb_copy(pBuf, GFP_KERNEL);
515 kfree_skb(pBuf);
516 if (!pCopy)
517 {
518 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
519 return;
520 }
521 pBuf = pCopy;
522#endif
523
524 if (VBOX_SKB_IS_GSO(pBuf))
525 {
526 /* Need to segment the packet */
527 struct sk_buff *pNext, *pSegment;
528 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
529 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
530
531 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
532 {
533 pNext = pSegment->next;
534 pSegment->next = 0;
535 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
536 }
537 dev_kfree_skb(pBuf);
538 }
539 else
540 {
541 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
542 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
543 {
544 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
545 dev_kfree_skb(pBuf);
546 return;
547 }
548 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
549 }
550 /*
551 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
552 */
553}
554
555static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
556{
557 struct sk_buff *pBuf;
558 bool fActive;
559 PVBOXNETFLTINS pThis;
560 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
561
562 Log2(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
563 pThis = VBOX_FLT_XT_TO_INST(pWork);
564 /*
565 * Active? Retain the instance and increment the busy counter.
566 */
567 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
568 fActive = ASMAtomicUoReadBool(&pThis->fActive);
569 if (fActive)
570 vboxNetFltRetain(pThis, true /* fBusy */);
571 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
572 if (!fActive)
573 return;
574
575 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
576 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
577
578 vboxNetFltRelease(pThis, true /* fBusy */);
579}
580
581/**
582 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
583 *
584 * @returns VBox status code.
585 * @param pThis The instance.
586 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
587 * flood the release log.
588 */
589static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
590{
591 struct packet_type *pt;
592 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
593
594 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
595
596 if (!pDev)
597 {
598 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
599 return VERR_INTNET_FLT_IF_NOT_FOUND;
600 }
601
602 dev_hold(pDev);
603 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
604 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
605 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
606
607 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
608 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
609 /*
610 * Get the mac address while we still have a valid ifnet reference.
611 */
612 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
613
614 pt = &pThis->u.s.PacketType;
615 pt->type = __constant_htons(ETH_P_ALL);
616 pt->dev = pDev;
617 pt->func = vboxNetFltLinuxPacketHandler;
618 dev_add_pack(pt);
619 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
620 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
621 if (pDev)
622 {
623 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
624 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
625 pDev = NULL; /* don't dereference it */
626 }
627 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
628 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
629
630 /* Release the interface on failure. */
631 if (pDev)
632 {
633 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
634 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
635 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
636 dev_put(pDev);
637 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
638 }
639
640 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
641 return VINF_SUCCESS;
642}
643
644
645static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
646{
647 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
648
649 Assert(!pThis->fDisconnectedFromHost);
650 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
651 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
652 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
653 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
654 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
655
656 dev_remove_pack(&pThis->u.s.PacketType);
657 skb_queue_purge(&pThis->u.s.XmitQueue);
658 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
659 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
660 dev_put(pDev);
661
662 return NOTIFY_OK;
663}
664
665static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
666{
667 /* Check if we are not suspended and promiscuous mode has not been set. */
668 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
669 {
670 /* Note that there is no need for locking as the kernel got hold of the lock already. */
671 dev_set_promiscuity(pDev, 1);
672 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
673 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
674 }
675 else
676 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
677 return NOTIFY_OK;
678}
679
680static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
681{
682 /* Undo promiscuous mode if we has set it. */
683 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
684 {
685 /* Note that there is no need for locking as the kernel got hold of the lock already. */
686 dev_set_promiscuity(pDev, -1);
687 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
688 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
689 }
690 else
691 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
692 return NOTIFY_OK;
693}
694
695static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
696
697{
698 int rc;
699#ifdef DEBUG
700 char *pszEvent = "<unknown>";
701#endif
702 struct net_device *pDev = (struct net_device *)ptr;
703 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
704
705#ifdef DEBUG
706 switch (ulEventType)
707 {
708 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
709 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
710 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
711 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
712 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
713 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
714 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
715 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
716 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
717 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
718 }
719 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
720 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
721#endif
722 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
723 {
724 vboxNetFltLinuxAttachToInterface(pThis, pDev);
725 }
726 else
727 {
728 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
729 if (pDev != ptr)
730 return NOTIFY_OK;
731 rc = NOTIFY_OK;
732 switch (ulEventType)
733 {
734 case NETDEV_UNREGISTER:
735 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
736 break;
737 case NETDEV_UP:
738 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
739 break;
740 case NETDEV_GOING_DOWN:
741 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
742 break;
743 case NETDEV_CHANGENAME:
744 break;
745 }
746 }
747
748 return rc;
749}
750
751bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
752{
753 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
754}
755
756
757int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
758{
759 uint8_t *pTmp;
760 struct net_device * pDev;
761 int err;
762 int rc = VINF_SUCCESS;
763
764 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
765
766 pTmp = pSG->aSegs[0].pv;
767
768 pDev = vboxNetFltLinuxRetainNetDev(pThis);
769 if (pDev)
770 {
771 /*
772 * Create a sk_buff for the gather list and push it onto the wire.
773 */
774 if (fDst & INTNETTRUNKDIR_WIRE)
775 {
776 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
777 if (pBuf)
778 {
779 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
780 " --> (wire)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
781 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
782 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
783 pSG->cbTotal));
784 err = dev_queue_xmit(pBuf);
785 if (err)
786 rc = RTErrConvertFromErrno(err);
787 }
788 else
789 rc = VERR_NO_MEMORY;
790 }
791
792 /*
793 * Create a sk_buff for the gather list and push it onto the host stack.
794 */
795 if (fDst & INTNETTRUNKDIR_HOST)
796 {
797 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
798 if (pBuf)
799 {
800 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
801 " --> (host)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes)\n",
802 pTmp[6], pTmp[7], pTmp[8], pTmp[9], pTmp[10], pTmp[11],
803 pTmp[0], pTmp[1], pTmp[2], pTmp[3], pTmp[4], pTmp[5],
804 pSG->cbTotal));
805 err = netif_rx_ni(pBuf);
806 if (err)
807 rc = RTErrConvertFromErrno(err);
808 }
809 else
810 rc = VERR_NO_MEMORY;
811 }
812
813 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
814 }
815
816 return rc;
817}
818
819
820bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
821{
822 bool fRc = false;
823 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
824 if (pDev)
825 {
826 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
827 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
828 }
829 return fRc;
830}
831
832
833void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
834{
835 *pMac = pThis->u.s.Mac;
836}
837
838
839bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
840{
841 /* ASSUMES that the MAC address never changes. */
842 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
843 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
844 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
845}
846
847
848void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
849{
850 struct net_device * pDev;
851
852 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s\n",
853 pThis, pThis->szName, fActive?"true":"false"));
854
855 pDev = vboxNetFltLinuxRetainNetDev(pThis);
856 if (pDev)
857 {
858 /*
859 * This api is a bit weird, the best reference is the code.
860 *
861 * Also, we have a bit or race conditions wrt the maintance of
862 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
863 */
864 u_int16_t fIf;
865 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
866 if (fActive)
867 {
868 int err = 0;
869 Assert(!pThis->u.s.fPromiscuousSet);
870
871#if 0
872 /*
873 * Try bring the interface up and running if it's down.
874 */
875 fIf = dev_get_flags(pDev);
876 if ((fIf & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
877 {
878 rtnl_lock();
879 err = dev_change_flags(pDev, fIf | IFF_UP);
880 rtnl_unlock();
881 fIf = dev_get_flags(pDev);
882 }
883
884 /*
885 * Is it already up? If it isn't, leave it to the link event or
886 * we'll upset if_pcount (as stated above, ifnet_set_promiscuous is weird).
887 */
888 if ((fIf & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)
889 && !ASMAtomicReadBool(&pThis->u.s.fPromiscuousSet))
890 {
891#endif
892 rtnl_lock();
893 dev_set_promiscuity(pDev, 1);
894 rtnl_unlock();
895 pThis->u.s.fPromiscuousSet = true;
896 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
897#if 0
898 /* check if it actually worked, this stuff is not always behaving well. */
899 if (!(dev_get_flags(pDev) & IFF_PROMISC))
900 {
901 err = dev_change_flags(pDev, fIf | IFF_PROMISC);
902 if (!err)
903 Log(("vboxNetFlt: fixed IFF_PROMISC on %s (%d->%d)\n", pThis->szName, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
904 else
905 Log(("VBoxNetFlt: failed to fix IFF_PROMISC on %s, err=%d (%d->%d)\n",
906 pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
907 }
908#endif
909#if 0
910 }
911 else if (!err)
912 Log(("VBoxNetFlt: Waiting for the link to come up... (%d->%d)\n", cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
913 if (err)
914 LogRel(("VBoxNetFlt: Failed to put '%s' into promiscuous mode, err=%d (%d->%d)\n", pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
915#endif
916 }
917 else
918 {
919 if (pThis->u.s.fPromiscuousSet)
920 {
921 rtnl_lock();
922 dev_set_promiscuity(pDev, -1);
923 rtnl_unlock();
924 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
925 }
926 pThis->u.s.fPromiscuousSet = false;
927
928 fIf = dev_get_flags(pDev);
929 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
930 }
931
932 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
933 }
934}
935
936
937int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
938{
939 /* Nothing to do here. */
940 return VINF_SUCCESS;
941}
942
943
944int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
945{
946 /* Nothing to do here. */
947 return VINF_SUCCESS;
948}
949
950
951void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
952{
953 struct net_device *pDev;
954 bool fRegistered;
955 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
956
957 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
958 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
959 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
960 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
961 if (fRegistered)
962 {
963 dev_remove_pack(&pThis->u.s.PacketType);
964 skb_queue_purge(&pThis->u.s.XmitQueue);
965 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
966 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
967 dev_put(pDev);
968 }
969 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
970 unregister_netdevice_notifier(&pThis->u.s.Notifier);
971}
972
973
974int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis)
975{
976 int err;
977 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
978 err = register_netdevice_notifier(&pThis->u.s.Notifier);
979 if (err)
980 return VERR_INTNET_FLT_IF_FAILED;
981 if (!pThis->u.s.fRegistered)
982 {
983 unregister_netdevice_notifier(&pThis->u.s.Notifier);
984 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
985 return VERR_INTNET_FLT_IF_NOT_FOUND;
986 }
987 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
988 return pThis->fDisconnectedFromHost ? VERR_INTNET_FLT_IF_FAILED : VINF_SUCCESS;
989}
990
991int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
992{
993 /*
994 * Init the linux specific members.
995 */
996 pThis->u.s.pDev = NULL;
997 pThis->u.s.fRegistered = false;
998 pThis->u.s.fPromiscuousSet = false;
999 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1000 skb_queue_head_init(&pThis->u.s.XmitQueue);
1001#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1002 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1003#else
1004 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, NULL);
1005#endif
1006
1007 return VINF_SUCCESS;
1008}
1009
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette