VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/bsd/sys/mbuf.h@ 64572

最後變更 在這個檔案從64572是 63215,由 vboxsync 提交於 8 年 前

Devices: warnings (gcc)

  • 屬性 svn:eol-style 設為 native
檔案大小: 36.0 KB
 
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
31 * $FreeBSD: src/sys/sys/mbuf.h,v 1.217.2.3.4.1 2009/04/15 03:14:26 kensmith Exp $
32 */
33
34#ifndef _SYS_MBUF_H_
35#define _SYS_MBUF_H_
36
37#ifndef VBOX
38/* XXX: These includes suck. Sorry! */
39#include <sys/queue.h>
40#ifdef _KERNEL
41#include <sys/systm.h>
42#include <vm/uma.h>
43#ifdef WITNESS
44#include <sys/lock.h>
45#endif
46#endif
47#else /* VBOX */
48# include <iprt/param.h>
49# include "misc.h"
50# include "ext.h"
51
52typedef const char *c_caddr_t;
53
54DECLNORETURN(static void) panic (char *fmt, ...)
55{
56 va_list args;
57 va_start(args, fmt);
58 vbox_slirp_printV(fmt, args);
59 va_end(args);
60 AssertFatalFailed();
61}
62/* for non-gnu compilers */
63# define __func__ RT_GCC_EXTENSION __FUNCTION__
64# ifndef __inline
65# ifdef __GNUC__
66# define __inline __inline__
67# else
68# define __inline
69# endif
70# endif
71
72# define bzero(a1, len) memset((a1), 0, (len))
73
74/* (vvl) some definitions from sys/param.h */
75/*
76 * Constants related to network buffer management.
77 * MCLBYTES must be no larger than PAGE_SIZE.
78 */
79# ifndef MSIZE
80# define MSIZE 256 /* size of an mbuf */
81# endif /* MSIZE */
82
83# ifndef MCLSHIFT
84# define MCLSHIFT 11 /* convert bytes to mbuf clusters */
85# endif /* MCLSHIFT */
86
87# ifndef MCLBYTES
88# define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */
89# endif /*MCLBYTES*/
90
91# define MJUMPAGESIZE PAGE_SIZE /* jumbo cluster 4k */
92# define MJUM9BYTES (9 * 1024) /* jumbo cluster 9k */
93# define MJUM16BYTES (16 * 1024) /* jumbo cluster 16k */
94#endif /* VBOX */
95
96/*
97 * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
98 * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
99 * sys/param.h), which has no additional overhead and is used instead of the
100 * internal data area; this is done when at least MINCLSIZE of data must be
101 * stored. Additionally, it is possible to allocate a separate buffer
102 * externally and attach it to the mbuf in a way similar to that of mbuf
103 * clusters.
104 */
105#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
106#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
107#define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
108#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
109
110#if defined(_KERNEL) || defined(VBOX)
111/*-
112 * Macros for type conversion:
113 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
114 * dtom(x) -- Convert data pointer within mbuf to mbuf pointer (XXX).
115 */
116#define mtod(m, t) ((t)((m)->m_data))
117#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
118
119/*
120 * Argument structure passed to UMA routines during mbuf and packet
121 * allocations.
122 */
123struct mb_args {
124 int flags; /* Flags for mbuf being allocated */
125 short type; /* Type of mbuf being allocated */
126};
127#endif /* _KERNEL */
128
129#if defined(__LP64__)
130#define M_HDR_PAD 6
131#else
132#define M_HDR_PAD 2
133#endif
134
135/*
136 * Header present at the beginning of every mbuf.
137 */
138struct m_hdr {
139 struct mbuf *mh_next; /* next buffer in chain */
140 struct mbuf *mh_nextpkt; /* next chain in queue/record */
141 caddr_t mh_data; /* location of data */
142 int mh_len; /* amount of data in this mbuf */
143 int mh_flags; /* flags; see below */
144 short mh_type; /* type of data in this mbuf */
145#ifdef VBOX
146 struct socket *mh_so; /*socket assotiated with mbuf*/
147 TAILQ_ENTRY(mbuf) mh_ifq;
148#endif
149 uint8_t pad[M_HDR_PAD];/* word align */
150};
151
152/*
153 * Packet tag structure (see below for details).
154 */
155struct m_tag {
156 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
157 u_int16_t m_tag_id; /* Tag ID */
158 u_int16_t m_tag_len; /* Length of data */
159 u_int32_t m_tag_cookie; /* ABI/Module ID */
160 void (*m_tag_free)(struct m_tag *);
161};
162
163/*
164 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
165 */
166struct pkthdr {
167 struct ifnet *rcvif; /* rcv interface */
168 /* variables for ip and tcp reassembly */
169 void *header; /* pointer to packet header */
170 int len; /* total packet length */
171 /* variables for hardware checksum */
172 int csum_flags; /* flags regarding checksum */
173 int csum_data; /* data field used by csum routines */
174 u_int16_t tso_segsz; /* TSO segment size */
175 u_int16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */
176 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
177};
178
179/*
180 * Description of external storage mapped into mbuf; valid only if M_EXT is
181 * set.
182 */
183struct m_ext {
184 caddr_t ext_buf; /* start of buffer */
185 void (*ext_free) /* free routine if not the usual */
186 (void *, void *);
187 void *ext_args; /* optional argument pointer */
188 u_int ext_size; /* size of buffer, for ext_free */
189#ifdef VBOX
190 volatile uint32_t *ref_cnt; /* pointer to ref count info */
191#else
192 volatile u_int *ref_cnt; /* pointer to ref count info */
193#endif
194 int ext_type; /* type of external storage */
195};
196
197/*
198 * The core of the mbuf object along with some shortcut defines for practical
199 * purposes.
200 */
201struct mbuf {
202 struct m_hdr m_hdr;
203 union {
204 struct {
205 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
206 union {
207 struct m_ext MH_ext; /* M_EXT set */
208 char MH_databuf[MHLEN];
209 } MH_dat;
210 } MH;
211 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
212 } M_dat;
213};
214#define m_next m_hdr.mh_next
215#define m_len m_hdr.mh_len
216#define m_data m_hdr.mh_data
217#define m_type m_hdr.mh_type
218#define m_flags m_hdr.mh_flags
219#define m_nextpkt m_hdr.mh_nextpkt
220#define m_act m_nextpkt
221#define m_pkthdr M_dat.MH.MH_pkthdr
222#define m_ext M_dat.MH.MH_dat.MH_ext
223#define m_pktdat M_dat.MH.MH_dat.MH_databuf
224#define m_dat M_dat.M_databuf
225#ifdef VBOX
226# define m_so m_hdr.mh_so
227# define ifq_so m_hdr.mh_so
228# define m_ifq m_hdr.mh_ifq
229#endif
230
231/*
232 * mbuf flags.
233 */
234#define M_EXT 0x00000001 /* has associated external storage */
235#define M_PKTHDR 0x00000002 /* start of record */
236#define M_EOR 0x00000004 /* end of record */
237#define M_RDONLY 0x00000008 /* associated data is marked read-only */
238#define M_PROTO1 0x00000010 /* protocol-specific */
239#define M_PROTO2 0x00000020 /* protocol-specific */
240#define M_PROTO3 0x00000040 /* protocol-specific */
241#define M_PROTO4 0x00000080 /* protocol-specific */
242#define M_PROTO5 0x00000100 /* protocol-specific */
243#define M_BCAST 0x00000200 /* send/received as link-level broadcast */
244#define M_MCAST 0x00000400 /* send/received as link-level multicast */
245#define M_FRAG 0x00000800 /* packet is a fragment of a larger packet */
246#define M_FIRSTFRAG 0x00001000 /* packet is first fragment */
247#define M_LASTFRAG 0x00002000 /* packet is last fragment */
248#define M_SKIP_FIREWALL 0x00004000 /* skip firewall processing */
249#define M_FREELIST 0x00008000 /* mbuf is on the free list */
250#define M_VLANTAG 0x00010000 /* ether_vtag is valid */
251#define M_PROMISC 0x00020000 /* packet was not for us */
252#define M_NOFREE 0x00040000 /* do not free mbuf, embedded in cluster */
253#define M_PROTO6 0x00080000 /* protocol-specific */
254#define M_PROTO7 0x00100000 /* protocol-specific */
255#define M_PROTO8 0x00200000 /* protocol-specific */
256/*
257 * For RELENG_{6,7} steal these flags for limited multiple routing table
258 * support. In RELENG_8 and beyond, use just one flag and a tag.
259 */
260#define M_FIB 0xF0000000 /* steal some bits to store fib number. */
261
262#define M_NOTIFICATION M_PROTO5 /* SCTP notification */
263
264/*
265 * Flags to purge when crossing layers.
266 */
267#define M_PROTOFLAGS \
268 (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8)
269
270/*
271 * Flags preserved when copying m_pkthdr.
272 */
273#define M_COPYFLAGS \
274 (M_PKTHDR|M_EOR|M_RDONLY|M_PROTOFLAGS|M_SKIP_FIREWALL|M_BCAST|M_MCAST|\
275 M_FRAG|M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG|M_PROMISC|M_FIB)
276
277/*
278 * External buffer types: identify ext_buf type.
279 */
280#define EXT_CLUSTER 1 /* mbuf cluster */
281#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
282#define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */
283#define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */
284#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
285#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
286#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
287#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
288#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
289#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
290#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
291
292/*
293 * Flags indicating hw checksum support and sw checksum requirements. This
294 * field can be directly tested against if_data.ifi_hwassist.
295 */
296#define CSUM_IP 0x0001 /* will csum IP */
297#define CSUM_TCP 0x0002 /* will csum TCP */
298#define CSUM_UDP 0x0004 /* will csum UDP */
299#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
300#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
301#define CSUM_TSO 0x0020 /* will do TSO */
302
303#define CSUM_IP_CHECKED 0x0100 /* did csum IP */
304#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
305#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
306#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
307
308#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
309#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
310
311/*
312 * mbuf types.
313 */
314#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
315#define MT_DATA 1 /* dynamic (data) allocation */
316#define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */
317#define MT_SONAME 8 /* socket name */
318#define MT_CONTROL 14 /* extra-data protocol message */
319#define MT_OOBDATA 15 /* expedited data */
320#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
321
322#define MT_NOINIT 255 /* Not a type but a flag to allocate
323 a non-initialized mbuf */
324
325#define MB_NOTAGS 0x1UL /* no tags attached to mbuf */
326
327/*
328 * General mbuf allocator statistics structure.
329 *
330 * Many of these statistics are no longer used; we instead track many
331 * allocator statistics through UMA's built in statistics mechanism.
332 */
333struct mbstat {
334 u_long m_mbufs; /* XXX */
335 u_long m_mclusts; /* XXX */
336
337 u_long m_drain; /* times drained protocols for space */
338 u_long m_mcfail; /* XXX: times m_copym failed */
339 u_long m_mpfail; /* XXX: times m_pullup failed */
340 u_long m_msize; /* length of an mbuf */
341 u_long m_mclbytes; /* length of an mbuf cluster */
342 u_long m_minclsize; /* min length of data to allocate a cluster */
343 u_long m_mlen; /* length of data in an mbuf */
344 u_long m_mhlen; /* length of data in a header mbuf */
345
346 /* Number of mbtypes (gives # elems in mbtypes[] array) */
347 short m_numtypes;
348
349 /* XXX: Sendfile stats should eventually move to their own struct */
350 u_long sf_iocnt; /* times sendfile had to do disk I/O */
351 u_long sf_allocfail; /* times sfbuf allocation failed */
352 u_long sf_allocwait; /* times sfbuf allocation had to wait */
353};
354
355/*
356 * Flags specifying how an allocation should be made.
357 *
358 * The flag to use is as follows:
359 * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation.
360 * - M_WAIT or M_WAITOK or M_TRYWAIT from wherever it is safe to block.
361 *
362 * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly and
363 * if we cannot allocate immediately we may return NULL, whereas
364 * M_WAIT/M_WAITOK/M_TRYWAIT means that if we cannot allocate resources we
365 * will block until they are available, and thus never return NULL.
366 *
367 * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT.
368 */
369#define MBTOM(how) (how)
370#ifndef VBOX
371#define M_DONTWAIT M_NOWAIT
372#define M_TRYWAIT M_WAITOK
373#define M_WAIT M_WAITOK
374#else
375/* @todo (r=vvl) not sure we can do it in NAT */
376# define M_WAITOK 0
377# define M_NOWAIT 0
378# define M_DONTWAIT 0
379# define M_TRYWAI 0
380# define M_WAIT 0
381#endif
382
383/*
384 * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to
385 * !_KERNEL so that monitoring tools can look up the zones with
386 * libmemstat(3).
387 */
388#define MBUF_MEM_NAME "mbuf"
389#define MBUF_CLUSTER_MEM_NAME "mbuf_cluster"
390#define MBUF_PACKET_MEM_NAME "mbuf_packet"
391#define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_pagesize"
392#define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k"
393#define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k"
394#define MBUF_TAG_MEM_NAME "mbuf_tag"
395#define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
396
397#if defined(_KERNEL) || defined(VBOX)
398
399#ifdef WITNESS
400#define MBUF_CHECKSLEEP(how) do { \
401 if (how == M_WAITOK) \
402 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \
403 "Sleeping in \"%s\"", __func__); \
404} while (0)
405#else
406#define MBUF_CHECKSLEEP(how)
407#endif
408
409/*
410 * Network buffer allocation API
411 *
412 * The rest of it is defined in kern/kern_mbuf.c
413 */
414
415#ifndef VBOX
416extern uma_zone_t zone_mbuf;
417extern uma_zone_t zone_clust;
418extern uma_zone_t zone_pack;
419extern uma_zone_t zone_jumbop;
420extern uma_zone_t zone_jumbo9;
421extern uma_zone_t zone_jumbo16;
422extern uma_zone_t zone_ext_refcnt;
423#endif
424
425#ifndef VBOX
426static __inline struct mbuf *m_getcl(int how, short type, int flags);
427static __inline struct mbuf *m_get(int how, short type);
428static __inline struct mbuf *m_gethdr(int how, short type);
429static __inline struct mbuf *m_getjcl(int how, short type, int flags,
430 int size);
431static __inline struct mbuf *m_getclr(int how, short type); /* XXX */
432static __inline struct mbuf *m_free(struct mbuf *m);
433static __inline void m_clget(struct mbuf *m, int how);
434static __inline void *m_cljget(struct mbuf *m, int how, int size);
435void mb_free_ext(struct mbuf *);
436#else
437static __inline struct mbuf *m_getcl(PNATState pData, int how, short type, int flags);
438static __inline struct mbuf *m_get(PNATState pData, int how, short type);
439static __inline struct mbuf *m_gethdr(PNATState pData, int how, short type);
440static __inline struct mbuf *m_getjcl(PNATState pData, int how,
441 short type, int flags, int size);
442static __inline struct mbuf *m_getclr(PNATState pData, int how, short type); /* XXX */
443static __inline struct mbuf *m_free(PNATState pData, struct mbuf *m);
444static __inline void m_clget(PNATState pData, struct mbuf *m, int how);
445static __inline void *m_cljget(PNATState pData, struct mbuf *m, int how, int size);
446void mb_free_ext(PNATState, struct mbuf *);
447#endif
448static __inline void m_chtype(struct mbuf *m, short new_type);
449static __inline struct mbuf *m_last(struct mbuf *m);
450
451static __inline int
452m_gettype(int size)
453{
454 int type;
455
456 switch (size) {
457 case MSIZE:
458 type = EXT_MBUF;
459 break;
460 case MCLBYTES:
461 type = EXT_CLUSTER;
462 break;
463#if MJUMPAGESIZE != MCLBYTES
464 case MJUMPAGESIZE:
465 type = EXT_JUMBOP;
466 break;
467#endif
468 case MJUM9BYTES:
469 type = EXT_JUMBO9;
470 break;
471 case MJUM16BYTES:
472 type = EXT_JUMBO16;
473 break;
474 default:
475 panic("%s: m_getjcl: invalid cluster size", __func__);
476 }
477
478 return (type);
479}
480
481static __inline uma_zone_t
482#ifndef VBOX
483m_getzone(int size)
484#else
485m_getzone(PNATState pData, int size)
486#endif
487{
488 uma_zone_t zone;
489
490 switch (size) {
491 case MSIZE:
492 zone = zone_mbuf;
493 break;
494 case MCLBYTES:
495 zone = zone_clust;
496 break;
497#if MJUMPAGESIZE != MCLBYTES
498 case MJUMPAGESIZE:
499 zone = zone_jumbop;
500 break;
501#endif
502 case MJUM9BYTES:
503 zone = zone_jumbo9;
504 break;
505 case MJUM16BYTES:
506 zone = zone_jumbo16;
507 break;
508 default:
509 panic("%s: m_getjcl: invalid cluster type", __func__);
510 }
511
512 return (zone);
513}
514
515static __inline struct mbuf *
516#ifndef VBOX
517m_get(int how, short type)
518#else
519m_get(PNATState pData, int how, short type)
520#endif
521{
522 struct mb_args args;
523
524 args.flags = 0;
525 args.type = type;
526 return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
527}
528
529/*
530 * XXX This should be deprecated, very little use.
531 */
532static __inline struct mbuf *
533#ifndef VBOX
534m_getclr(int how, short type)
535#else
536m_getclr(PNATState pData, int how, short type)
537#endif
538{
539 struct mbuf *m;
540 struct mb_args args;
541
542 args.flags = 0;
543 args.type = type;
544 m = uma_zalloc_arg(zone_mbuf, &args, how);
545 if (m != NULL)
546 bzero(m->m_data, MLEN);
547 return (m);
548}
549
550static __inline struct mbuf *
551#ifndef VBOX
552m_gethdr(int how, short type)
553#else
554m_gethdr(PNATState pData, int how, short type)
555#endif
556{
557 struct mb_args args;
558
559 args.flags = M_PKTHDR;
560 args.type = type;
561 return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
562}
563
564static __inline struct mbuf *
565#ifndef VBOX
566m_getcl(int how, short type, int flags)
567#else
568m_getcl(PNATState pData, int how, short type, int flags)
569#endif
570{
571 struct mb_args args;
572
573 args.flags = flags;
574 args.type = type;
575 return ((struct mbuf *)(uma_zalloc_arg(zone_pack, &args, how)));
576}
577
578/*
579 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
580 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
581 *
582 * XXX: This is rather large, should be real function maybe.
583 */
584static __inline struct mbuf *
585#ifndef VBOX
586m_getjcl(int how, short type, int flags, int size)
587#else
588m_getjcl(PNATState pData, int how, short type, int flags, int size)
589#endif
590{
591 struct mb_args args;
592 struct mbuf *m, *n;
593 uma_zone_t zone;
594
595 args.flags = flags;
596 args.type = type;
597
598 m = uma_zalloc_arg(zone_mbuf, &args, how);
599 if (m == NULL)
600 return (NULL);
601
602#ifndef VBOX
603 zone = m_getzone(size);
604#else
605 zone = m_getzone(pData, size);
606#endif
607 n = uma_zalloc_arg(zone, m, how);
608 if (n == NULL) {
609 uma_zfree(zone_mbuf, m);
610 return (NULL);
611 }
612 return (m);
613}
614
615#ifndef VBOX
616static __inline void
617m_free_fast(struct mbuf *m)
618{
619 KASSERT(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
620
621 uma_zfree_arg(zone_mbuf, m, (void *)MB_NOTAGS);
622}
623#else
624static __inline void
625m_free_fast(PNATState pData, struct mbuf *m)
626{
627 AssertMsg(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
628
629 uma_zfree_arg(zone_mbuf, m, (void *)(uintptr_t)MB_NOTAGS);
630}
631#endif
632
633static __inline struct mbuf *
634#ifndef VBOX
635m_free(struct mbuf *m)
636#else
637m_free(PNATState pData, struct mbuf *m)
638#endif
639{
640 struct mbuf *n = m->m_next;
641
642 if (m->m_flags & M_EXT)
643#ifndef VBOX
644 mb_free_ext(m);
645#else
646 mb_free_ext(pData, m);
647#endif
648 else if ((m->m_flags & M_NOFREE) == 0)
649 uma_zfree(zone_mbuf, m);
650 return (n);
651}
652
653static __inline void
654#ifndef VBOX
655m_clget(struct mbuf *m, int how)
656#else
657m_clget(PNATState pData, struct mbuf *m, int how)
658#endif
659{
660
661 if (m->m_flags & M_EXT)
662 printf("%s: %p mbuf already has cluster\n", __func__, m);
663 m->m_ext.ext_buf = (char *)NULL;
664 uma_zalloc_arg(zone_clust, m, how);
665 /*
666 * On a cluster allocation failure, drain the packet zone and retry,
667 * we might be able to loosen a few clusters up on the drain.
668 */
669 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
670 zone_drain(zone_pack);
671 uma_zalloc_arg(zone_clust, m, how);
672 }
673}
674
675/*
676 * m_cljget() is different from m_clget() as it can allocate clusters without
677 * attaching them to an mbuf. In that case the return value is the pointer
678 * to the cluster of the requested size. If an mbuf was specified, it gets
679 * the cluster attached to it and the return value can be safely ignored.
680 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
681 */
682static __inline void *
683#ifndef VBOX
684m_cljget(struct mbuf *m, int how, int size)
685#else
686m_cljget(PNATState pData, struct mbuf *m, int how, int size)
687#endif
688{
689 uma_zone_t zone;
690
691 if (m && m->m_flags & M_EXT)
692 printf("%s: %p mbuf already has cluster\n", __func__, m);
693 if (m != NULL)
694 m->m_ext.ext_buf = NULL;
695
696#ifndef VBOX
697 zone = m_getzone(size);
698#else
699 zone = m_getzone(pData, size);
700#endif
701 return (uma_zalloc_arg(zone, m, how));
702}
703
704static __inline void
705#ifndef VBOX
706m_cljset(struct mbuf *m, void *cl, int type)
707#else
708m_cljset(PNATState pData, struct mbuf *m, void *cl, int type)
709#endif
710{
711 uma_zone_t zone;
712 int size;
713
714 switch (type) {
715 case EXT_CLUSTER:
716 size = MCLBYTES;
717 zone = zone_clust;
718 break;
719#if MJUMPAGESIZE != MCLBYTES
720 case EXT_JUMBOP:
721 size = MJUMPAGESIZE;
722 zone = zone_jumbop;
723 break;
724#endif
725 case EXT_JUMBO9:
726 size = MJUM9BYTES;
727 zone = zone_jumbo9;
728 break;
729 case EXT_JUMBO16:
730 size = MJUM16BYTES;
731 zone = zone_jumbo16;
732 break;
733 default:
734 panic("unknown cluster type");
735 break;
736 }
737
738 m->m_data = m->m_ext.ext_buf = cl;
739#ifdef VBOX
740 m->m_ext.ext_free = (void (*)(void *, void *))0;
741 m->m_ext.ext_args = NULL;
742#else
743 m->m_ext.ext_free = m->m_ext.ext_args = NULL;
744#endif
745 m->m_ext.ext_size = size;
746 m->m_ext.ext_type = type;
747 m->m_ext.ref_cnt = uma_find_refcnt(zone, cl);
748 m->m_flags |= M_EXT;
749
750}
751
752static __inline void
753m_chtype(struct mbuf *m, short new_type)
754{
755
756 m->m_type = new_type;
757}
758
759static __inline struct mbuf *
760m_last(struct mbuf *m)
761{
762
763 while (m->m_next)
764 m = m->m_next;
765 return (m);
766}
767
768/*
769 * mbuf, cluster, and external object allocation macros (for compatibility
770 * purposes).
771 */
772#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
773#ifndef VBOX
774#define MGET(m, how, type) ((m) = m_get((how), (type)))
775#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type)))
776#define MCLGET(m, how) m_clget((m), (how))
777#define MEXTADD(m, buf, size, free, args, flags, type) \
778 m_extadd((m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
779#define m_getm(m, len, how, type) \
780 m_getm2((m), (len), (how), (type), M_PKTHDR)
781#else /*!VBOX*/
782#define MGET(m, how, type) ((m) = m_get(pData, (how), (type)))
783#define MGETHDR(m, how, type) ((m) = m_gethdr(pData, (how), (type)))
784#define MCLGET(m, how) m_clget(pData, (m), (how))
785#define MEXTADD(m, buf, size, free, args, flags, type) \
786 m_extadd(pData, (m), (caddr_t)(buf), (size), (free), (args), (flags), (type))
787#define m_getm(m, len, how, type) \
788 m_getm2(pData, (m), (len), (how), (type), M_PKTHDR)
789#endif
790
791/*
792 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
793 * be both the local data payload, or an external buffer area, depending on
794 * whether M_EXT is set).
795 */
796#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \
797 (!(((m)->m_flags & M_EXT)) || \
798 (*((m)->m_ext.ref_cnt) == 1)) ) \
799
800/* Check if the supplied mbuf has a packet header, or else panic. */
801#define M_ASSERTPKTHDR(m) \
802 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
803 ("%s: no mbuf packet header!", __func__))
804
805/*
806 * Ensure that the supplied mbuf is a valid, non-free mbuf.
807 *
808 * XXX: Broken at the moment. Need some UMA magic to make it work again.
809 */
810#define M_ASSERTVALID(m) \
811 KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \
812 ("%s: attempted use of a free mbuf!", __func__))
813
814/*
815 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place an
816 * object of the specified size at the end of the mbuf, longword aligned.
817 */
818#define M_ALIGN(m, len) do { \
819 KASSERT(!((m)->m_flags & (M_PKTHDR|M_EXT)), \
820 ("%s: M_ALIGN not normal mbuf", __func__)); \
821 KASSERT((m)->m_data == (m)->m_dat, \
822 ("%s: M_ALIGN not a virgin mbuf", __func__)); \
823 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
824} while (0)
825
826/*
827 * As above, for mbufs allocated with m_gethdr/MGETHDR or initialized by
828 * M_DUP/MOVE_PKTHDR.
829 */
830#define MH_ALIGN(m, len) do { \
831 KASSERT((m)->m_flags & M_PKTHDR && !((m)->m_flags & M_EXT), \
832 ("%s: MH_ALIGN not PKTHDR mbuf", __func__)); \
833 KASSERT((m)->m_data == (m)->m_pktdat, \
834 ("%s: MH_ALIGN not a virgin mbuf", __func__)); \
835 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
836} while (0)
837
838/*
839 * Compute the amount of space available before the current start of data in
840 * an mbuf.
841 *
842 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
843 * of checking writability of the mbuf data area rests solely with the caller.
844 */
845#define M_LEADINGSPACE(m) \
846 ((m)->m_flags & M_EXT ? \
847 (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
848 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
849 (m)->m_data - (m)->m_dat)
850
851/*
852 * Compute the amount of space available after the end of data in an mbuf.
853 *
854 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
855 * of checking writability of the mbuf data area rests solely with the caller.
856 */
857#define M_TRAILINGSPACE(m) \
858 ((m)->m_flags & M_EXT ? \
859 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
860 - ((m)->m_data + (m)->m_len) : 0) : \
861 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
862
863/*
864 * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be
865 * allocated, how specifies whether to wait. If the allocation fails, the
866 * original mbuf chain is freed and m is set to NULL.
867 */
868#define M_PREPEND(m, plen, how) do { \
869 struct mbuf **_mmp = &(m); \
870 struct mbuf *_mm = *_mmp; \
871 int _mplen = (plen); \
872 int __mhow = (how); \
873 \
874 MBUF_CHECKSLEEP(how); \
875 if (M_LEADINGSPACE(_mm) >= _mplen) { \
876 _mm->m_data -= _mplen; \
877 _mm->m_len += _mplen; \
878 } else \
879 _mm = m_prepend(_mm, _mplen, __mhow); \
880 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
881 _mm->m_pkthdr.len += _mplen; \
882 *_mmp = _mm; \
883} while (0)
884
885/*
886 * Change mbuf to new type. This is a relatively expensive operation and
887 * should be avoided.
888 */
889#define MCHTYPE(m, t) m_chtype((m), (t))
890
891/* Length to m_copy to copy all. */
892#define M_COPYALL 1000000000
893
894/* Compatibility with 4.3. */
895#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
896
897extern int max_datalen; /* MHLEN - max_hdr */
898extern int max_hdr; /* Largest link + protocol header */
899extern int max_linkhdr; /* Largest link-level header */
900extern int max_protohdr; /* Largest protocol header */
901extern struct mbstat mbstat; /* General mbuf stats/infos */
902extern int nmbclusters; /* Maximum number of clusters */
903
904struct uio;
905
906void m_align(struct mbuf *, int);
907int m_apply(struct mbuf *, int, int,
908 int (*)(void *, void *, u_int), void *);
909#ifndef VBOX
910void m_adj(struct mbuf *, int);
911int m_append(struct mbuf *, int, c_caddr_t);
912struct mbuf *m_defrag(struct mbuf *, int);
913struct mbuf *m_dup(struct mbuf *, int);
914void m_cat(struct mbuf *, struct mbuf *);
915struct mbuf *m_collapse(struct mbuf *, int, int);
916void m_copyback(struct mbuf *, int, int, c_caddr_t);
917struct mbuf *m_copym(struct mbuf *, int, int, int);
918struct mbuf *m_copymdata(struct mbuf *, struct mbuf *,
919 int, int, int, int);
920struct mbuf *m_copypacket(struct mbuf *, int);
921struct mbuf *m_copyup(struct mbuf *n, int len, int dstoff);
922void m_extadd(struct mbuf *, caddr_t, u_int,
923 void (*)(void *, void *), void *, int, int);
924#else
925void m_adj(PNATState, struct mbuf *, int);
926int m_append(PNATState pData, struct mbuf *, int, c_caddr_t);
927struct mbuf *m_defrag(PNATState, struct mbuf *, int);
928struct mbuf *m_dup(PNATState, struct mbuf *, int);
929void m_cat(PNATState, struct mbuf *, struct mbuf *);
930struct mbuf *m_collapse(PNATState, struct mbuf *, int, int);
931void m_copyback(PNATState, struct mbuf *, int, int, c_caddr_t);
932struct mbuf *m_copym(PNATState, struct mbuf *, int, int, int);
933struct mbuf *m_copymdata(PNATState, struct mbuf *, struct mbuf *,
934 int, int, int, int);
935struct mbuf *m_copypacket(PNATState, struct mbuf *, int);
936struct mbuf *m_copyup(PNATState, struct mbuf *n, int len, int dstoff);
937void m_extadd(PNATState pData, struct mbuf *, caddr_t, u_int,
938 void (*)(void *, void *), void *, int, int);
939#endif
940void m_copydata(const struct mbuf *, int, int, caddr_t);
941void m_copy_pkthdr(struct mbuf *, struct mbuf *);
942void m_demote(struct mbuf *, int);
943struct mbuf *m_devget(char *, int, int, struct ifnet *,
944 void (*)(char *, caddr_t, u_int));
945int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
946u_int m_fixhdr(struct mbuf *);
947struct mbuf *m_fragment(struct mbuf *, int, int);
948#ifndef VBOX
949void m_freem(struct mbuf *);
950struct mbuf *m_getm2(struct mbuf *, int, int, short, int);
951struct mbuf *m_prepend(struct mbuf *, int, int);
952struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
953struct mbuf *m_pullup(struct mbuf *, int);
954int m_sanity(struct mbuf *, int);
955struct mbuf *m_split(struct mbuf *, int, int);
956struct mbuf *m_unshare(struct mbuf *, int how);
957#else
958void m_freem(PNATState pData, struct mbuf *);
959struct mbuf *m_getm2(PNATState pData, struct mbuf *, int, int, short, int);
960struct mbuf *m_prepend(PNATState, struct mbuf *, int, int);
961struct mbuf *m_pulldown(PNATState, struct mbuf *, int, int, int *);
962struct mbuf *m_pullup(PNATState, struct mbuf *, int);
963int m_sanity(PNATState, struct mbuf *, int);
964struct mbuf *m_split(PNATState, struct mbuf *, int, int);
965struct mbuf *m_unshare(PNATState, struct mbuf *, int how);
966#endif
967struct mbuf *m_getptr(struct mbuf *, int, int *);
968u_int m_length(struct mbuf *, struct mbuf **);
969void m_move_pkthdr(struct mbuf *, struct mbuf *);
970void m_print(const struct mbuf *, int);
971struct mbuf *m_uiotombuf(struct uio *, int, int, int, int);
972
973/*-
974 * Network packets may have annotations attached by affixing a list of
975 * "packet tags" to the pkthdr structure. Packet tags are dynamically
976 * allocated semi-opaque data structures that have a fixed header
977 * (struct m_tag) that specifies the size of the memory block and a
978 * <cookie,type> pair that identifies it. The cookie is a 32-bit unique
979 * unsigned value used to identify a module or ABI. By convention this value
980 * is chosen as the date+time that the module is created, expressed as the
981 * number of seconds since the epoch (e.g., using date -u +'%s'). The type
982 * value is an ABI/module-specific value that identifies a particular
983 * annotation and is private to the module. For compatibility with systems
984 * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
985 * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
986 * compatibility shim functions and several tag types are defined below.
987 * Users that do not require compatibility should use a private cookie value
988 * so that packet tag-related definitions can be maintained privately.
989 *
990 * Note that the packet tag returned by m_tag_alloc has the default memory
991 * alignment implemented by malloc. To reference private data one can use a
992 * construct like:
993 *
994 * struct m_tag *mtag = m_tag_alloc(...);
995 * struct foo *p = (struct foo *)(mtag+1);
996 *
997 * if the alignment of struct m_tag is sufficient for referencing members of
998 * struct foo. Otherwise it is necessary to embed struct m_tag within the
999 * private data structure to insure proper alignment; e.g.,
1000 *
1001 * struct foo {
1002 * struct m_tag tag;
1003 * ...
1004 * };
1005 * struct foo *p = (struct foo *) m_tag_alloc(...);
1006 * struct m_tag *mtag = &p->tag;
1007 */
1008
1009/*
1010 * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise
1011 * tags are expected to ``vanish'' when they pass through a network
1012 * interface. For most interfaces this happens normally as the tags are
1013 * reclaimed when the mbuf is free'd. However in some special cases
1014 * reclaiming must be done manually. An example is packets that pass through
1015 * the loopback interface. Also, one must be careful to do this when
1016 * ``turning around'' packets (e.g., icmp_reflect).
1017 *
1018 * To mark a tag persistent bit-or this flag in when defining the tag id.
1019 * The tag will then be treated as described above.
1020 */
1021#define MTAG_PERSISTENT 0x800
1022
1023#define PACKET_TAG_NONE 0 /* Nadda */
1024
1025/* Packet tags for use with PACKET_ABI_COMPAT. */
1026#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
1027#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
1028#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
1029#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
1030#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */
1031#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */
1032#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */
1033#define PACKET_TAG_GIF 8 /* GIF processing done */
1034#define PACKET_TAG_GRE 9 /* GRE processing done */
1035#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */
1036#define PACKET_TAG_ENCAP 11 /* Encap. processing */
1037#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */
1038#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */
1039#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */
1040#define PACKET_TAG_DUMMYNET 15 /* dummynet info */
1041#define PACKET_TAG_DIVERT 17 /* divert info */
1042#define PACKET_TAG_IPFORWARD 18 /* ipforward info */
1043#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */
1044#define PACKET_TAG_PF 21 /* PF + ALTQ information */
1045#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */
1046#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */
1047#define PACKET_TAG_CARP 28 /* CARP info */
1048#ifdef VBOX
1049# define PACKET_TAG_ALIAS 0xab01
1050# define PACKET_TAG_ETHER 0xab02
1051# define PACKET_SERVICE 0xab03
1052#endif
1053
1054/* Specific cookies and tags. */
1055
1056/* Packet tag routines. */
1057struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
1058void m_tag_delete(struct mbuf *, struct m_tag *);
1059void m_tag_delete_chain(struct mbuf *, struct m_tag *);
1060void m_tag_free_default(struct m_tag *);
1061struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
1062struct m_tag *m_tag_copy(struct m_tag *, int);
1063int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
1064void m_tag_delete_nonpersistent(struct mbuf *);
1065
1066/*
1067 * Initialize the list of tags associated with an mbuf.
1068 */
1069static __inline void
1070m_tag_init(struct mbuf *m)
1071{
1072
1073 SLIST_INIT(&m->m_pkthdr.tags);
1074}
1075
1076/*
1077 * Set up the contents of a tag. Note that this does not fill in the free
1078 * method; the caller is expected to do that.
1079 *
1080 * XXX probably should be called m_tag_init, but that was already taken.
1081 */
1082static __inline void
1083m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
1084{
1085
1086 t->m_tag_id = type;
1087 t->m_tag_len = len;
1088 t->m_tag_cookie = cookie;
1089}
1090
1091/*
1092 * Reclaim resources associated with a tag.
1093 */
1094static __inline void
1095m_tag_free(struct m_tag *t)
1096{
1097
1098 (*t->m_tag_free)(t);
1099}
1100
1101/*
1102 * Return the first tag associated with an mbuf.
1103 */
1104static __inline struct m_tag *
1105m_tag_first(struct mbuf *m)
1106{
1107
1108 return (SLIST_FIRST(&m->m_pkthdr.tags));
1109}
1110
1111/*
1112 * Return the next tag in the list of tags associated with an mbuf.
1113 */
1114static __inline struct m_tag *
1115m_tag_next(struct mbuf *m, struct m_tag *t)
1116{
1117 NOREF(m);
1118 return (SLIST_NEXT(t, m_tag_link));
1119}
1120
1121/*
1122 * Prepend a tag to the list of tags associated with an mbuf.
1123 */
1124static __inline void
1125m_tag_prepend(struct mbuf *m, struct m_tag *t)
1126{
1127
1128 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
1129}
1130
1131/*
1132 * Unlink a tag from the list of tags associated with an mbuf.
1133 */
1134static __inline void
1135m_tag_unlink(struct mbuf *m, struct m_tag *t)
1136{
1137
1138 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
1139}
1140
1141/* These are for OpenBSD compatibility. */
1142#define MTAG_ABI_COMPAT 0 /* compatibility ABI */
1143
1144static __inline struct m_tag *
1145m_tag_get(int type, int length, int fWait)
1146{
1147 return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, fWait));
1148}
1149
1150static __inline struct m_tag *
1151m_tag_find(struct mbuf *m, int type, struct m_tag *start)
1152{
1153 return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
1154 m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
1155}
1156
1157/* XXX temporary FIB methods probably eventually use tags.*/
1158#define M_FIBSHIFT 28
1159#define M_FIBMASK 0x0F
1160
1161/* get the fib from an mbuf and if it is not set, return the default */
1162#define M_GETFIB(_m) \
1163 ((((_m)->m_flags & M_FIB) >> M_FIBSHIFT) & M_FIBMASK)
1164
1165#define M_SETFIB(_m, _fib) do { \
1166 _m->m_flags &= ~M_FIB; \
1167 _m->m_flags |= (((_fib) << M_FIBSHIFT) & M_FIB); \
1168} while (0)
1169
1170#endif /* _KERNEL */
1171
1172#endif /* !_SYS_MBUF_H_ */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette