VirtualBox

source: vbox/trunk/src/VBox/Additions/x11/x11include/xorg-server-1.6.0/compiler.h@ 46019

最後變更 在這個檔案從46019是 17471,由 vboxsync 提交於 16 年 前

export to OSE

  • 屬性 svn:eol-style 設為 native
檔案大小: 46.9 KB
 
1/*
2 * Copyright 1990,91 by Thomas Roell, Dinkelscherben, Germany.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that
7 * copyright notice and this permission notice appear in supporting
8 * documentation, and that the name of Thomas Roell not be used in
9 * advertising or publicity pertaining to distribution of the software without
10 * specific, written prior permission. Thomas Roell makes no representations
11 * about the suitability of this software for any purpose. It is provided
12 * "as is" without express or implied warranty.
13 *
14 * THOMAS ROELL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THOMAS ROELL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
20 * PERFORMANCE OF THIS SOFTWARE.
21 *
22 */
23/*
24 * Copyright (c) 1994-2003 by The XFree86 Project, Inc.
25 *
26 * Permission is hereby granted, free of charge, to any person obtaining a
27 * copy of this software and associated documentation files (the "Software"),
28 * to deal in the Software without restriction, including without limitation
29 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
30 * and/or sell copies of the Software, and to permit persons to whom the
31 * Software is furnished to do so, subject to the following conditions:
32 *
33 * The above copyright notice and this permission notice shall be included in
34 * all copies or substantial portions of the Software.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
39 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
40 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
41 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
42 * OTHER DEALINGS IN THE SOFTWARE.
43 *
44 * Except as contained in this notice, the name of the copyright holder(s)
45 * and author(s) shall not be used in advertising or otherwise to promote
46 * the sale, use or other dealings in this Software without prior written
47 * authorization from the copyright holder(s) and author(s).
48 */
49
50#ifndef _COMPILER_H
51
52# define _COMPILER_H
53
54#if defined(__SUNPRO_C)
55# define DO_PROTOTYPES
56#endif
57
58/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */
59# ifndef __inline__
60# if defined(__GNUC__)
61 /* gcc has __inline__ */
62# elif defined(__HIGHC__)
63# define __inline__ _Inline
64# else
65# define __inline__ /**/
66# endif
67# endif /* __inline__ */
68# ifndef __inline
69# if defined(__GNUC__)
70 /* gcc has __inline */
71# elif defined(__HIGHC__)
72# define __inline _Inline
73# else
74# define __inline /**/
75# endif
76# endif /* __inline */
77
78/* Support gcc's __FUNCTION__ for people using other compilers */
79#if !defined(__GNUC__) && !defined(__FUNCTION__)
80# define __FUNCTION__ __func__ /* C99 */
81#endif
82
83# if defined(NO_INLINE) || defined(DO_PROTOTYPES)
84
85# if !defined(__arm__)
86# if !defined(__sparc__) && !defined(__sparc) && !defined(__arm32__) \
87 && !(defined(__alpha__) && defined(linux)) \
88 && !(defined(__ia64__) && defined(linux)) \
89
90extern void outb(unsigned short, unsigned char);
91extern void outw(unsigned short, unsigned short);
92extern void outl(unsigned short, unsigned int);
93extern unsigned int inb(unsigned short);
94extern unsigned int inw(unsigned short);
95extern unsigned int inl(unsigned short);
96
97# else /* __sparc__, __arm32__, __alpha__*/
98
99extern void outb(unsigned long, unsigned char);
100extern void outw(unsigned long, unsigned short);
101extern void outl(unsigned long, unsigned int);
102extern unsigned int inb(unsigned long);
103extern unsigned int inw(unsigned long);
104extern unsigned int inl(unsigned long);
105
106# endif /* __sparc__, __arm32__, __alpha__ */
107# endif /* __arm__ */
108
109extern unsigned long ldq_u(unsigned long *);
110extern unsigned long ldl_u(unsigned int *);
111extern unsigned long ldw_u(unsigned short *);
112extern void stq_u(unsigned long, unsigned long *);
113extern void stl_u(unsigned long, unsigned int *);
114extern void stw_u(unsigned long, unsigned short *);
115extern void mem_barrier(void);
116extern void write_mem_barrier(void);
117extern void stl_brx(unsigned long, volatile unsigned char *, int);
118extern void stw_brx(unsigned short, volatile unsigned char *, int);
119extern unsigned long ldl_brx(volatile unsigned char *, int);
120extern unsigned short ldw_brx(volatile unsigned char *, int);
121
122# endif
123
124# ifndef NO_INLINE
125# ifdef __GNUC__
126# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__))
127
128# ifdef linux
129/* for Linux on Alpha, we use the LIBC _inx/_outx routines */
130/* note that the appropriate setup via "ioperm" needs to be done */
131/* *before* any inx/outx is done. */
132
133extern void (*_alpha_outb)(char val, unsigned long port);
134static __inline__ void
135outb(unsigned long port, unsigned char val)
136{
137 _alpha_outb(val, port);
138}
139
140extern void (*_alpha_outw)(short val, unsigned long port);
141static __inline__ void
142outw(unsigned long port, unsigned short val)
143{
144 _alpha_outw(val, port);
145}
146
147extern void (*_alpha_outl)(int val, unsigned long port);
148static __inline__ void
149outl(unsigned long port, unsigned int val)
150{
151 _alpha_outl(val, port);
152}
153
154extern unsigned int (*_alpha_inb)(unsigned long port);
155static __inline__ unsigned int
156inb(unsigned long port)
157{
158 return _alpha_inb(port);
159}
160
161extern unsigned int (*_alpha_inw)(unsigned long port);
162static __inline__ unsigned int
163inw(unsigned long port)
164{
165 return _alpha_inw(port);
166}
167
168extern unsigned int (*_alpha_inl)(unsigned long port);
169static __inline__ unsigned int
170inl(unsigned long port)
171{
172 return _alpha_inl(port);
173}
174
175# endif /* linux */
176
177# if (defined(__FreeBSD__) || defined(__OpenBSD__)) \
178 && !defined(DO_PROTOTYPES)
179
180/* for FreeBSD and OpenBSD on Alpha, we use the libio (resp. libalpha) */
181/* inx/outx routines */
182/* note that the appropriate setup via "ioperm" needs to be done */
183/* *before* any inx/outx is done. */
184
185extern void outb(unsigned int port, unsigned char val);
186extern void outw(unsigned int port, unsigned short val);
187extern void outl(unsigned int port, unsigned int val);
188extern unsigned char inb(unsigned int port);
189extern unsigned short inw(unsigned int port);
190extern unsigned int inl(unsigned int port);
191
192# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */
193
194
195#if defined(__NetBSD__)
196#include <machine/pio.h>
197#endif /* __NetBSD__ */
198
199/*
200 * inline functions to do unaligned accesses
201 * from linux/include/asm-alpha/unaligned.h
202 */
203
204/*
205 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
206 * packed structures to talk about such things with.
207 */
208
209struct __una_u64 { unsigned long x __attribute__((packed)); };
210struct __una_u32 { unsigned int x __attribute__((packed)); };
211struct __una_u16 { unsigned short x __attribute__((packed)); };
212
213/*
214 * Elemental unaligned loads
215 */
216/* let's try making these things static */
217
218static __inline__ unsigned long ldq_u(unsigned long * r11)
219{
220# if defined(__GNUC__)
221 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
222 return ptr->x;
223# else
224 unsigned long r1,r2;
225 __asm__("ldq_u %0,%3\n\t"
226 "ldq_u %1,%4\n\t"
227 "extql %0,%2,%0\n\t"
228 "extqh %1,%2,%1"
229 :"=&r" (r1), "=&r" (r2)
230 :"r" (r11),
231 "m" (*r11),
232 "m" (*(const unsigned long *)(7+(char *) r11)));
233 return r1 | r2;
234# endif
235}
236
237static __inline__ unsigned long ldl_u(unsigned int * r11)
238{
239# if defined(__GNUC__)
240 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
241 return ptr->x;
242# else
243 unsigned long r1,r2;
244 __asm__("ldq_u %0,%3\n\t"
245 "ldq_u %1,%4\n\t"
246 "extll %0,%2,%0\n\t"
247 "extlh %1,%2,%1"
248 :"=&r" (r1), "=&r" (r2)
249 :"r" (r11),
250 "m" (*r11),
251 "m" (*(const unsigned long *)(3+(char *) r11)));
252 return r1 | r2;
253# endif
254}
255
256static __inline__ unsigned long ldw_u(unsigned short * r11)
257{
258# if defined(__GNUC__)
259 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
260 return ptr->x;
261# else
262 unsigned long r1,r2;
263 __asm__("ldq_u %0,%3\n\t"
264 "ldq_u %1,%4\n\t"
265 "extwl %0,%2,%0\n\t"
266 "extwh %1,%2,%1"
267 :"=&r" (r1), "=&r" (r2)
268 :"r" (r11),
269 "m" (*r11),
270 "m" (*(const unsigned long *)(1+(char *) r11)));
271 return r1 | r2;
272# endif
273}
274
275/*
276 * Elemental unaligned stores
277 */
278
279static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
280{
281# if defined(__GNUC__)
282 struct __una_u64 *ptr = (struct __una_u64 *) r11;
283 ptr->x = r5;
284# else
285 unsigned long r1,r2,r3,r4;
286
287 __asm__("ldq_u %3,%1\n\t"
288 "ldq_u %2,%0\n\t"
289 "insqh %6,%7,%5\n\t"
290 "insql %6,%7,%4\n\t"
291 "mskqh %3,%7,%3\n\t"
292 "mskql %2,%7,%2\n\t"
293 "bis %3,%5,%3\n\t"
294 "bis %2,%4,%2\n\t"
295 "stq_u %3,%1\n\t"
296 "stq_u %2,%0"
297 :"=m" (*r11),
298 "=m" (*(unsigned long *)(7+(char *) r11)),
299 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
300 :"r" (r5), "r" (r11));
301# endif
302}
303
304static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
305{
306# if defined(__GNUC__)
307 struct __una_u32 *ptr = (struct __una_u32 *) r11;
308 ptr->x = r5;
309# else
310 unsigned long r1,r2,r3,r4;
311
312 __asm__("ldq_u %3,%1\n\t"
313 "ldq_u %2,%0\n\t"
314 "inslh %6,%7,%5\n\t"
315 "insll %6,%7,%4\n\t"
316 "msklh %3,%7,%3\n\t"
317 "mskll %2,%7,%2\n\t"
318 "bis %3,%5,%3\n\t"
319 "bis %2,%4,%2\n\t"
320 "stq_u %3,%1\n\t"
321 "stq_u %2,%0"
322 :"=m" (*r11),
323 "=m" (*(unsigned long *)(3+(char *) r11)),
324 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
325 :"r" (r5), "r" (r11));
326# endif
327}
328
329static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
330{
331# if defined(__GNUC__)
332 struct __una_u16 *ptr = (struct __una_u16 *) r11;
333 ptr->x = r5;
334# else
335 unsigned long r1,r2,r3,r4;
336
337 __asm__("ldq_u %3,%1\n\t"
338 "ldq_u %2,%0\n\t"
339 "inswh %6,%7,%5\n\t"
340 "inswl %6,%7,%4\n\t"
341 "mskwh %3,%7,%3\n\t"
342 "mskwl %2,%7,%2\n\t"
343 "bis %3,%5,%3\n\t"
344 "bis %2,%4,%2\n\t"
345 "stq_u %3,%1\n\t"
346 "stq_u %2,%0"
347 :"=m" (*r11),
348 "=m" (*(unsigned long *)(1+(char *) r11)),
349 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
350 :"r" (r5), "r" (r11));
351# endif
352}
353
354/* to flush the I-cache before jumping to code which just got loaded */
355# define PAL_imb 134
356# define istream_mem_barrier() \
357 __asm__ __volatile__("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
358# define mem_barrier() __asm__ __volatile__("mb" : : : "memory")
359# ifdef __ELF__
360# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory")
361# else /* ECOFF gas 2.6 doesn't know "wmb" :-( */
362# define write_mem_barrier() mem_barrier()
363# endif
364
365
366# elif defined(linux) && defined(__ia64__)
367
368# include <inttypes.h>
369
370# include <sys/io.h>
371
372struct __una_u64 { uint64_t x __attribute__((packed)); };
373struct __una_u32 { uint32_t x __attribute__((packed)); };
374struct __una_u16 { uint16_t x __attribute__((packed)); };
375
376static __inline__ unsigned long
377__uldq (const unsigned long * r11)
378{
379 const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
380 return ptr->x;
381}
382
383static __inline__ unsigned long
384__uldl (const unsigned int * r11)
385{
386 const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
387 return ptr->x;
388}
389
390static __inline__ unsigned long
391__uldw (const unsigned short * r11)
392{
393 const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
394 return ptr->x;
395}
396
397static __inline__ void
398__ustq (unsigned long r5, unsigned long * r11)
399{
400 struct __una_u64 *ptr = (struct __una_u64 *) r11;
401 ptr->x = r5;
402}
403
404static __inline__ void
405__ustl (unsigned long r5, unsigned int * r11)
406{
407 struct __una_u32 *ptr = (struct __una_u32 *) r11;
408 ptr->x = r5;
409}
410
411static __inline__ void
412__ustw (unsigned long r5, unsigned short * r11)
413{
414 struct __una_u16 *ptr = (struct __una_u16 *) r11;
415 ptr->x = r5;
416}
417
418# define ldq_u(p) __uldq(p)
419# define ldl_u(p) __uldl(p)
420# define ldw_u(p) __uldw(p)
421# define stq_u(v,p) __ustq(v,p)
422# define stl_u(v,p) __ustl(v,p)
423# define stw_u(v,p) __ustw(v,p)
424
425# ifndef __INTEL_COMPILER
426# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
427# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
428# else
429# include "ia64intrin.h"
430# define mem_barrier() __mf()
431# define write_mem_barrier() __mf()
432# endif
433
434/*
435 * This is overkill, but for different reasons depending on where it is used.
436 * This is thus general enough to be used everywhere cache flushes are needed.
437 * It doesn't handle memory access serialisation by other processors, though.
438 */
439# ifndef __INTEL_COMPILER
440# define ia64_flush_cache(Addr) \
441 __asm__ __volatile__ ( \
442 "fc.i %0;;;" \
443 "sync.i;;;" \
444 "mf;;;" \
445 "srlz.i;;;" \
446 :: "r"(Addr) : "memory")
447# else
448# define ia64_flush_cache(Addr) { \
449 __fc(Addr);\
450 __synci();\
451 __mf();\
452 __isrlz();\
453 }
454# endif
455# undef outb
456# undef outw
457# undef outl
458# undef inb
459# undef inw
460# undef inl
461extern void outb(unsigned long port, unsigned char val);
462extern void outw(unsigned long port, unsigned short val);
463extern void outl(unsigned long port, unsigned int val);
464extern unsigned int inb(unsigned long port);
465extern unsigned int inw(unsigned long port);
466extern unsigned int inl(unsigned long port);
467
468# elif defined(linux) && defined(__amd64__)
469
470# include <inttypes.h>
471
472# define ldq_u(p) (*((unsigned long *)(p)))
473# define ldl_u(p) (*((unsigned int *)(p)))
474# define ldw_u(p) (*((unsigned short *)(p)))
475# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
476# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
477# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
478
479# define mem_barrier() \
480 __asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory")
481# define write_mem_barrier() \
482 __asm__ __volatile__ ("": : :"memory")
483
484
485static __inline__ void
486outb(unsigned short port, unsigned char val)
487{
488 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
489}
490
491
492static __inline__ void
493outw(unsigned short port, unsigned short val)
494{
495 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
496}
497
498static __inline__ void
499outl(unsigned short port, unsigned int val)
500{
501 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
502}
503
504static __inline__ unsigned int
505inb(unsigned short port)
506{
507 unsigned char ret;
508 __asm__ __volatile__("inb %1,%0" :
509 "=a" (ret) :
510 "d" (port));
511 return ret;
512}
513
514static __inline__ unsigned int
515inw(unsigned short port)
516{
517 unsigned short ret;
518 __asm__ __volatile__("inw %1,%0" :
519 "=a" (ret) :
520 "d" (port));
521 return ret;
522}
523
524static __inline__ unsigned int
525inl(unsigned short port)
526{
527 unsigned int ret;
528 __asm__ __volatile__("inl %1,%0" :
529 "=a" (ret) :
530 "d" (port));
531 return ret;
532}
533
534# elif (defined(linux) || defined(sun) || defined(__OpenBSD__) || defined(__FreeBSD__)) && defined(__sparc__)
535
536# ifndef ASI_PL
537# define ASI_PL 0x88
538# endif
539
540# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory")
541
542static __inline__ void
543outb(unsigned long port, unsigned char val)
544{
545 __asm__ __volatile__("stba %0, [%1] %2"
546 : /* No outputs */
547 : "r" (val), "r" (port), "i" (ASI_PL));
548 barrier();
549}
550
551static __inline__ void
552outw(unsigned long port, unsigned short val)
553{
554 __asm__ __volatile__("stha %0, [%1] %2"
555 : /* No outputs */
556 : "r" (val), "r" (port), "i" (ASI_PL));
557 barrier();
558}
559
560static __inline__ void
561outl(unsigned long port, unsigned int val)
562{
563 __asm__ __volatile__("sta %0, [%1] %2"
564 : /* No outputs */
565 : "r" (val), "r" (port), "i" (ASI_PL));
566 barrier();
567}
568
569static __inline__ unsigned int
570inb(unsigned long port)
571{
572 unsigned int ret;
573 __asm__ __volatile__("lduba [%1] %2, %0"
574 : "=r" (ret)
575 : "r" (port), "i" (ASI_PL));
576 return ret;
577}
578
579static __inline__ unsigned int
580inw(unsigned long port)
581{
582 unsigned int ret;
583 __asm__ __volatile__("lduha [%1] %2, %0"
584 : "=r" (ret)
585 : "r" (port), "i" (ASI_PL));
586 return ret;
587}
588
589static __inline__ unsigned int
590inl(unsigned long port)
591{
592 unsigned int ret;
593 __asm__ __volatile__("lda [%1] %2, %0"
594 : "=r" (ret)
595 : "r" (port), "i" (ASI_PL));
596 return ret;
597}
598
599static __inline__ unsigned char
600xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
601{
602 unsigned long addr = ((unsigned long)base) + offset;
603 unsigned char ret;
604
605 __asm__ __volatile__("lduba [%1] %2, %0"
606 : "=r" (ret)
607 : "r" (addr), "i" (ASI_PL));
608 return ret;
609}
610
611static __inline__ unsigned short
612xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
613{
614 unsigned long addr = ((unsigned long)base) + offset;
615 unsigned short ret;
616
617 __asm__ __volatile__("lduh [%1], %0"
618 : "=r" (ret)
619 : "r" (addr));
620 return ret;
621}
622
623static __inline__ unsigned short
624xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
625{
626 unsigned long addr = ((unsigned long)base) + offset;
627 unsigned short ret;
628
629 __asm__ __volatile__("lduha [%1] %2, %0"
630 : "=r" (ret)
631 : "r" (addr), "i" (ASI_PL));
632 return ret;
633}
634
635static __inline__ unsigned int
636xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
637{
638 unsigned long addr = ((unsigned long)base) + offset;
639 unsigned int ret;
640
641 __asm__ __volatile__("ld [%1], %0"
642 : "=r" (ret)
643 : "r" (addr));
644 return ret;
645}
646
647static __inline__ unsigned int
648xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
649{
650 unsigned long addr = ((unsigned long)base) + offset;
651 unsigned int ret;
652
653 __asm__ __volatile__("lda [%1] %2, %0"
654 : "=r" (ret)
655 : "r" (addr), "i" (ASI_PL));
656 return ret;
657}
658
659static __inline__ void
660xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
661 const unsigned int val)
662{
663 unsigned long addr = ((unsigned long)base) + offset;
664
665 __asm__ __volatile__("stba %0, [%1] %2"
666 : /* No outputs */
667 : "r" (val), "r" (addr), "i" (ASI_PL));
668 barrier();
669}
670
671static __inline__ void
672xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
673 const unsigned int val)
674{
675 unsigned long addr = ((unsigned long)base) + offset;
676
677 __asm__ __volatile__("sth %0, [%1]"
678 : /* No outputs */
679 : "r" (val), "r" (addr));
680 barrier();
681}
682
683static __inline__ void
684xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
685 const unsigned int val)
686{
687 unsigned long addr = ((unsigned long)base) + offset;
688
689 __asm__ __volatile__("stha %0, [%1] %2"
690 : /* No outputs */
691 : "r" (val), "r" (addr), "i" (ASI_PL));
692 barrier();
693}
694
695static __inline__ void
696xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
697 const unsigned int val)
698{
699 unsigned long addr = ((unsigned long)base) + offset;
700
701 __asm__ __volatile__("st %0, [%1]"
702 : /* No outputs */
703 : "r" (val), "r" (addr));
704 barrier();
705}
706
707static __inline__ void
708xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
709 const unsigned int val)
710{
711 unsigned long addr = ((unsigned long)base) + offset;
712
713 __asm__ __volatile__("sta %0, [%1] %2"
714 : /* No outputs */
715 : "r" (val), "r" (addr), "i" (ASI_PL));
716 barrier();
717}
718
719static __inline__ void
720xf86WriteMmio8NB(__volatile__ void *base, const unsigned long offset,
721 const unsigned int val)
722{
723 unsigned long addr = ((unsigned long)base) + offset;
724
725 __asm__ __volatile__("stba %0, [%1] %2"
726 : /* No outputs */
727 : "r" (val), "r" (addr), "i" (ASI_PL));
728}
729
730static __inline__ void
731xf86WriteMmio16BeNB(__volatile__ void *base, const unsigned long offset,
732 const unsigned int val)
733{
734 unsigned long addr = ((unsigned long)base) + offset;
735
736 __asm__ __volatile__("sth %0, [%1]"
737 : /* No outputs */
738 : "r" (val), "r" (addr));
739}
740
741static __inline__ void
742xf86WriteMmio16LeNB(__volatile__ void *base, const unsigned long offset,
743 const unsigned int val)
744{
745 unsigned long addr = ((unsigned long)base) + offset;
746
747 __asm__ __volatile__("stha %0, [%1] %2"
748 : /* No outputs */
749 : "r" (val), "r" (addr), "i" (ASI_PL));
750}
751
752static __inline__ void
753xf86WriteMmio32BeNB(__volatile__ void *base, const unsigned long offset,
754 const unsigned int val)
755{
756 unsigned long addr = ((unsigned long)base) + offset;
757
758 __asm__ __volatile__("st %0, [%1]"
759 : /* No outputs */
760 : "r" (val), "r" (addr));
761}
762
763static __inline__ void
764xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset,
765 const unsigned int val)
766{
767 unsigned long addr = ((unsigned long)base) + offset;
768
769 __asm__ __volatile__("sta %0, [%1] %2"
770 : /* No outputs */
771 : "r" (val), "r" (addr), "i" (ASI_PL));
772}
773
774
775/*
776 * EGCS 1.1 knows about arbitrary unaligned loads. Define some
777 * packed structures to talk about such things with.
778 */
779
780# if defined(__arch64__) || defined(__sparcv9)
781struct __una_u64 { unsigned long x __attribute__((packed)); };
782# endif
783struct __una_u32 { unsigned int x __attribute__((packed)); };
784struct __una_u16 { unsigned short x __attribute__((packed)); };
785
786static __inline__ unsigned long ldq_u(unsigned long *p)
787{
788# if defined(__GNUC__)
789# if defined(__arch64__) || defined(__sparcv9)
790 const struct __una_u64 *ptr = (const struct __una_u64 *) p;
791# else
792 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
793# endif
794 return ptr->x;
795# else
796 unsigned long ret;
797 memmove(&ret, p, sizeof(*p));
798 return ret;
799# endif
800}
801
802static __inline__ unsigned long ldl_u(unsigned int *p)
803{
804# if defined(__GNUC__)
805 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
806 return ptr->x;
807# else
808 unsigned int ret;
809 memmove(&ret, p, sizeof(*p));
810 return ret;
811# endif
812}
813
814static __inline__ unsigned long ldw_u(unsigned short *p)
815{
816# if defined(__GNUC__)
817 const struct __una_u16 *ptr = (const struct __una_u16 *) p;
818 return ptr->x;
819# else
820 unsigned short ret;
821 memmove(&ret, p, sizeof(*p));
822 return ret;
823# endif
824}
825
826static __inline__ void stq_u(unsigned long val, unsigned long *p)
827{
828# if defined(__GNUC__)
829# if defined(__arch64__) || defined(__sparcv9)
830 struct __una_u64 *ptr = (struct __una_u64 *) p;
831# else
832 struct __una_u32 *ptr = (struct __una_u32 *) p;
833# endif
834 ptr->x = val;
835# else
836 unsigned long tmp = val;
837 memmove(p, &tmp, sizeof(*p));
838# endif
839}
840
841static __inline__ void stl_u(unsigned long val, unsigned int *p)
842{
843# if defined(__GNUC__)
844 struct __una_u32 *ptr = (struct __una_u32 *) p;
845 ptr->x = val;
846# else
847 unsigned int tmp = val;
848 memmove(p, &tmp, sizeof(*p));
849# endif
850}
851
852static __inline__ void stw_u(unsigned long val, unsigned short *p)
853{
854# if defined(__GNUC__)
855 struct __una_u16 *ptr = (struct __una_u16 *) p;
856 ptr->x = val;
857# else
858 unsigned short tmp = val;
859 memmove(p, &tmp, sizeof(*p));
860# endif
861}
862
863# define mem_barrier() /* XXX: nop for now */
864# define write_mem_barrier() /* XXX: nop for now */
865
866# elif defined(__mips__) || (defined(__arm32__) && !defined(__linux__))
867# ifdef __arm32__
868# define PORT_SIZE long
869# else
870# define PORT_SIZE short
871# endif
872
873unsigned int IOPortBase; /* Memory mapped I/O port area */
874
875static __inline__ void
876outb(unsigned PORT_SIZE port, unsigned char val)
877{
878 *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
879}
880
881static __inline__ void
882outw(unsigned PORT_SIZE port, unsigned short val)
883{
884 *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
885}
886
887static __inline__ void
888outl(unsigned PORT_SIZE port, unsigned int val)
889{
890 *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
891}
892
893static __inline__ unsigned int
894inb(unsigned PORT_SIZE port)
895{
896 return *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase);
897}
898
899static __inline__ unsigned int
900inw(unsigned PORT_SIZE port)
901{
902 return *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase);
903}
904
905static __inline__ unsigned int
906inl(unsigned PORT_SIZE port)
907{
908 return *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase);
909}
910
911
912# if defined(__mips__)
913static __inline__ unsigned long ldq_u(unsigned long * r11)
914{
915 unsigned long r1;
916 __asm__("lwr %0,%2\n\t"
917 "lwl %0,%3\n\t"
918 :"=&r" (r1)
919 :"r" (r11),
920 "m" (*r11),
921 "m" (*(unsigned long *)(3+(char *) r11)));
922 return r1;
923}
924
925static __inline__ unsigned long ldl_u(unsigned int * r11)
926{
927 unsigned long r1;
928 __asm__("lwr %0,%2\n\t"
929 "lwl %0,%3\n\t"
930 :"=&r" (r1)
931 :"r" (r11),
932 "m" (*r11),
933 "m" (*(unsigned long *)(3+(char *) r11)));
934 return r1;
935}
936
937static __inline__ unsigned long ldw_u(unsigned short * r11)
938{
939 unsigned long r1;
940 __asm__("lwr %0,%2\n\t"
941 "lwl %0,%3\n\t"
942 :"=&r" (r1)
943 :"r" (r11),
944 "m" (*r11),
945 "m" (*(unsigned long *)(1+(char *) r11)));
946 return r1;
947}
948
949# ifdef linux /* don't mess with other OSs */
950
951/*
952 * EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older
953 * versions anyway. Define some packed structures to talk about such things
954 * with.
955 */
956
957struct __una_u32 { unsigned int x __attribute__((packed)); };
958struct __una_u16 { unsigned short x __attribute__((packed)); };
959
960static __inline__ void stw_u(unsigned long val, unsigned short *p)
961{
962 struct __una_u16 *ptr = (struct __una_u16 *) p;
963 ptr->x = val;
964}
965
966static __inline__ void stl_u(unsigned long val, unsigned int *p)
967{
968 struct __una_u32 *ptr = (struct __una_u32 *) p;
969 ptr->x = val;
970}
971
972# if X_BYTE_ORDER == X_BIG_ENDIAN
973static __inline__ unsigned int
974xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
975{
976 unsigned long addr = ((unsigned long)base) + offset;
977 unsigned int ret;
978
979 __asm__ __volatile__("lw %0, 0(%1)"
980 : "=r" (ret)
981 : "r" (addr));
982 return ret;
983}
984
985static __inline__ void
986xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
987 const unsigned int val)
988{
989 unsigned long addr = ((unsigned long)base) + offset;
990
991 __asm__ __volatile__("sw %0, 0(%1)"
992 : /* No outputs */
993 : "r" (val), "r" (addr));
994}
995# endif
996
997# define mem_barrier() \
998 __asm__ __volatile__( \
999 "# prevent instructions being moved around\n\t" \
1000 ".set\tnoreorder\n\t" \
1001 "# 8 nops to fool the R4400 pipeline\n\t" \
1002 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
1003 ".set\treorder" \
1004 : /* no output */ \
1005 : /* no input */ \
1006 : "memory")
1007# define write_mem_barrier() mem_barrier()
1008
1009# else /* !linux */
1010
1011# define stq_u(v,p) stl_u(v,p)
1012# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1013 (*(unsigned char *)(p)+1) = ((v) >> 8); \
1014 (*(unsigned char *)(p)+2) = ((v) >> 16); \
1015 (*(unsigned char *)(p)+3) = ((v) >> 24)
1016
1017# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1018 (*(unsigned char *)(p)+1) = ((v) >> 8)
1019
1020# define mem_barrier() /* NOP */
1021# endif /* !linux */
1022# endif /* __mips__ */
1023
1024# if defined(__arm32__)
1025# define ldq_u(p) (*((unsigned long *)(p)))
1026# define ldl_u(p) (*((unsigned int *)(p)))
1027# define ldw_u(p) (*((unsigned short *)(p)))
1028# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1029# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1030# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1031# define mem_barrier() /* NOP */
1032# define write_mem_barrier() /* NOP */
1033# endif /* __arm32__ */
1034
1035# elif (defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__)
1036
1037# ifndef MAP_FAILED
1038# define MAP_FAILED ((void *)-1)
1039# endif
1040
1041extern volatile unsigned char *ioBase;
1042
1043#if defined(linux) && defined(__powerpc64__)
1044# include <linux/version.h>
1045# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
1046# include <asm/memory.h>
1047# endif
1048#endif /* defined(linux) && defined(__powerpc64__) */
1049#ifndef eieio /* We deal with arch-specific eieio() routines above... */
1050# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
1051#endif /* eieio */
1052
1053static __inline__ unsigned char
1054xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
1055{
1056 register unsigned char val;
1057 __asm__ __volatile__(
1058 "lbzx %0,%1,%2\n\t"
1059 "eieio"
1060 : "=r" (val)
1061 : "b" (base), "r" (offset),
1062 "m" (*((volatile unsigned char *)base+offset)));
1063 return val;
1064}
1065
1066static __inline__ unsigned short
1067xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
1068{
1069 register unsigned short val;
1070 __asm__ __volatile__(
1071 "lhzx %0,%1,%2\n\t"
1072 "eieio"
1073 : "=r" (val)
1074 : "b" (base), "r" (offset),
1075 "m" (*((volatile unsigned char *)base+offset)));
1076 return val;
1077}
1078
1079static __inline__ unsigned short
1080xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
1081{
1082 register unsigned short val;
1083 __asm__ __volatile__(
1084 "lhbrx %0,%1,%2\n\t"
1085 "eieio"
1086 : "=r" (val)
1087 : "b" (base), "r" (offset),
1088 "m" (*((volatile unsigned char *)base+offset)));
1089 return val;
1090}
1091
1092static __inline__ unsigned int
1093xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
1094{
1095 register unsigned int val;
1096 __asm__ __volatile__(
1097 "lwzx %0,%1,%2\n\t"
1098 "eieio"
1099 : "=r" (val)
1100 : "b" (base), "r" (offset),
1101 "m" (*((volatile unsigned char *)base+offset)));
1102 return val;
1103}
1104
1105static __inline__ unsigned int
1106xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
1107{
1108 register unsigned int val;
1109 __asm__ __volatile__(
1110 "lwbrx %0,%1,%2\n\t"
1111 "eieio"
1112 : "=r" (val)
1113 : "b" (base), "r" (offset),
1114 "m" (*((volatile unsigned char *)base+offset)));
1115 return val;
1116}
1117
1118static __inline__ void
1119xf86WriteMmioNB8(__volatile__ void *base, const unsigned long offset,
1120 const unsigned char val)
1121{
1122 __asm__ __volatile__(
1123 "stbx %1,%2,%3\n\t"
1124 : "=m" (*((volatile unsigned char *)base+offset))
1125 : "r" (val), "b" (base), "r" (offset));
1126}
1127
1128static __inline__ void
1129xf86WriteMmioNB16Le(__volatile__ void *base, const unsigned long offset,
1130 const unsigned short val)
1131{
1132 __asm__ __volatile__(
1133 "sthbrx %1,%2,%3\n\t"
1134 : "=m" (*((volatile unsigned char *)base+offset))
1135 : "r" (val), "b" (base), "r" (offset));
1136}
1137
1138static __inline__ void
1139xf86WriteMmioNB16Be(__volatile__ void *base, const unsigned long offset,
1140 const unsigned short val)
1141{
1142 __asm__ __volatile__(
1143 "sthx %1,%2,%3\n\t"
1144 : "=m" (*((volatile unsigned char *)base+offset))
1145 : "r" (val), "b" (base), "r" (offset));
1146}
1147
1148static __inline__ void
1149xf86WriteMmioNB32Le(__volatile__ void *base, const unsigned long offset,
1150 const unsigned int val)
1151{
1152 __asm__ __volatile__(
1153 "stwbrx %1,%2,%3\n\t"
1154 : "=m" (*((volatile unsigned char *)base+offset))
1155 : "r" (val), "b" (base), "r" (offset));
1156}
1157
1158static __inline__ void
1159xf86WriteMmioNB32Be(__volatile__ void *base, const unsigned long offset,
1160 const unsigned int val)
1161{
1162 __asm__ __volatile__(
1163 "stwx %1,%2,%3\n\t"
1164 : "=m" (*((volatile unsigned char *)base+offset))
1165 : "r" (val), "b" (base), "r" (offset));
1166}
1167
1168static __inline__ void
1169xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
1170 const unsigned char val)
1171{
1172 xf86WriteMmioNB8(base, offset, val);
1173 eieio();
1174}
1175
1176static __inline__ void
1177xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
1178 const unsigned short val)
1179{
1180 xf86WriteMmioNB16Le(base, offset, val);
1181 eieio();
1182}
1183
1184static __inline__ void
1185xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
1186 const unsigned short val)
1187{
1188 xf86WriteMmioNB16Be(base, offset, val);
1189 eieio();
1190}
1191
1192static __inline__ void
1193xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
1194 const unsigned int val)
1195{
1196 xf86WriteMmioNB32Le(base, offset, val);
1197 eieio();
1198}
1199
1200static __inline__ void
1201xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
1202 const unsigned int val)
1203{
1204 xf86WriteMmioNB32Be(base, offset, val);
1205 eieio();
1206}
1207
1208
1209static __inline__ void
1210outb(unsigned short port, unsigned char value)
1211{
1212 if(ioBase == MAP_FAILED) return;
1213 xf86WriteMmio8((void *)ioBase, port, value);
1214}
1215
1216static __inline__ void
1217outw(unsigned short port, unsigned short value)
1218{
1219 if(ioBase == MAP_FAILED) return;
1220 xf86WriteMmio16Le((void *)ioBase, port, value);
1221}
1222
1223static __inline__ void
1224outl(unsigned short port, unsigned int value)
1225{
1226 if(ioBase == MAP_FAILED) return;
1227 xf86WriteMmio32Le((void *)ioBase, port, value);
1228}
1229
1230static __inline__ unsigned int
1231inb(unsigned short port)
1232{
1233 if(ioBase == MAP_FAILED) return 0;
1234 return xf86ReadMmio8((void *)ioBase, port);
1235}
1236
1237static __inline__ unsigned int
1238inw(unsigned short port)
1239{
1240 if(ioBase == MAP_FAILED) return 0;
1241 return xf86ReadMmio16Le((void *)ioBase, port);
1242}
1243
1244static __inline__ unsigned int
1245inl(unsigned short port)
1246{
1247 if(ioBase == MAP_FAILED) return 0;
1248 return xf86ReadMmio32Le((void *)ioBase, port);
1249}
1250
1251# define ldq_u(p) ldl_u(p)
1252# define ldl_u(p) ((*(unsigned char *)(p)) | \
1253 (*((unsigned char *)(p)+1)<<8) | \
1254 (*((unsigned char *)(p)+2)<<16) | \
1255 (*((unsigned char *)(p)+3)<<24))
1256# define ldw_u(p) ((*(unsigned char *)(p)) | \
1257 (*((unsigned char *)(p)+1)<<8))
1258
1259# define stq_u(v,p) stl_u(v,p)
1260# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1261 (*((unsigned char *)(p)+1)) = ((v) >> 8); \
1262 (*((unsigned char *)(p)+2)) = ((v) >> 16); \
1263 (*((unsigned char *)(p)+3)) = ((v) >> 24)
1264# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1265 (*((unsigned char *)(p)+1)) = ((v) >> 8)
1266
1267# define mem_barrier() eieio()
1268# define write_mem_barrier() eieio()
1269
1270#elif defined(__arm__) && defined(__linux__)
1271
1272#define ldq_u(p) (*((unsigned long *)(p)))
1273#define ldl_u(p) (*((unsigned int *)(p)))
1274#define ldw_u(p) (*((unsigned short *)(p)))
1275#define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1276#define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1277#define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1278#define mem_barrier() /* NOP */
1279#define write_mem_barrier() /* NOP */
1280
1281/* for Linux on ARM, we use the LIBC inx/outx routines */
1282/* note that the appropriate setup via "ioperm" needs to be done */
1283/* *before* any inx/outx is done. */
1284
1285#include <sys/io.h>
1286
1287static __inline__ void
1288xf_outb(unsigned short port, unsigned char val)
1289{
1290 outb(val, port);
1291}
1292
1293static __inline__ void
1294xf_outw(unsigned short port, unsigned short val)
1295{
1296 outw(val, port);
1297}
1298
1299static __inline__ void
1300xf_outl(unsigned short port, unsigned int val)
1301{
1302 outl(val, port);
1303}
1304
1305#define outb xf_outb
1306#define outw xf_outw
1307#define outl xf_outl
1308
1309#define arm_flush_cache(addr) \
1310do { \
1311 register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \
1312 register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\
1313 register unsigned long _flg __asm ("a3") = 0; \
1314 __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
1315 : "=r" (_beg) \
1316 : "0" (_beg), "r" (_end), "r" (_flg)); \
1317} while (0)
1318
1319# else /* ix86 */
1320
1321# define ldq_u(p) (*((unsigned long *)(p)))
1322# define ldl_u(p) (*((unsigned int *)(p)))
1323# define ldw_u(p) (*((unsigned short *)(p)))
1324# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1325# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1326# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1327# define mem_barrier() /* NOP */
1328# define write_mem_barrier() /* NOP */
1329
1330# if !defined(__SUNPRO_C)
1331# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__s390__) && !defined(__m32r__)
1332# ifdef GCCUSESGAS
1333
1334/*
1335 * If gcc uses gas rather than the native assembler, the syntax of these
1336 * inlines has to be different. DHD
1337 */
1338
1339static __inline__ void
1340outb(unsigned short port, unsigned char val)
1341{
1342 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
1343}
1344
1345
1346static __inline__ void
1347outw(unsigned short port, unsigned short val)
1348{
1349 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
1350}
1351
1352static __inline__ void
1353outl(unsigned short port, unsigned int val)
1354{
1355 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
1356}
1357
1358static __inline__ unsigned int
1359inb(unsigned short port)
1360{
1361 unsigned char ret;
1362 __asm__ __volatile__("inb %1,%0" :
1363 "=a" (ret) :
1364 "d" (port));
1365 return ret;
1366}
1367
1368static __inline__ unsigned int
1369inw(unsigned short port)
1370{
1371 unsigned short ret;
1372 __asm__ __volatile__("inw %1,%0" :
1373 "=a" (ret) :
1374 "d" (port));
1375 return ret;
1376}
1377
1378static __inline__ unsigned int
1379inl(unsigned short port)
1380{
1381 unsigned int ret;
1382 __asm__ __volatile__("inl %1,%0" :
1383 "=a" (ret) :
1384 "d" (port));
1385 return ret;
1386}
1387
1388# else /* GCCUSESGAS */
1389
1390static __inline__ void
1391outb(unsigned short port, unsigned char val)
1392{
1393 __asm__ __volatile__("out%B0 (%1)" : :"a" (val), "d" (port));
1394}
1395
1396static __inline__ void
1397outw(unsigned short port, unsigned short val)
1398{
1399 __asm__ __volatile__("out%W0 (%1)" : :"a" (val), "d" (port));
1400}
1401
1402static __inline__ void
1403outl(unsigned short port, unsigned int val)
1404{
1405 __asm__ __volatile__("out%L0 (%1)" : :"a" (val), "d" (port));
1406}
1407
1408static __inline__ unsigned int
1409inb(unsigned short port)
1410{
1411 unsigned char ret;
1412 __asm__ __volatile__("in%B0 (%1)" :
1413 "=a" (ret) :
1414 "d" (port));
1415 return ret;
1416}
1417
1418static __inline__ unsigned int
1419inw(unsigned short port)
1420{
1421 unsigned short ret;
1422 __asm__ __volatile__("in%W0 (%1)" :
1423 "=a" (ret) :
1424 "d" (port));
1425 return ret;
1426}
1427
1428static __inline__ unsigned int
1429inl(unsigned short port)
1430{
1431 unsigned int ret;
1432 __asm__ __volatile__("in%L0 (%1)" :
1433 "=a" (ret) :
1434 "d" (port));
1435 return ret;
1436}
1437
1438# endif /* GCCUSESGAS */
1439
1440# else /* !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__m32r__) */
1441
1442static __inline__ void
1443outb(unsigned short port, unsigned char val)
1444{
1445}
1446
1447static __inline__ void
1448outw(unsigned short port, unsigned short val)
1449{
1450}
1451
1452static __inline__ void
1453outl(unsigned short port, unsigned int val)
1454{
1455}
1456
1457static __inline__ unsigned int
1458inb(unsigned short port)
1459{
1460 return 0;
1461}
1462
1463static __inline__ unsigned int
1464inw(unsigned short port)
1465{
1466 return 0;
1467}
1468
1469static __inline__ unsigned int
1470inl(unsigned short port)
1471{
1472 return 0;
1473}
1474
1475# endif /* FAKEIT */
1476# endif /* __SUNPRO_C */
1477
1478# endif /* ix86 */
1479
1480# else /* !GNUC */
1481# if defined(__STDC__) && (__STDC__ == 1)
1482# ifndef asm
1483# define asm __asm
1484# endif
1485# endif
1486# ifndef SCO325
1487# if defined(__UNIXWARE__)
1488# /* avoid including <sys/types.h> for <sys/inline.h> on UnixWare */
1489# define ushort unsigned short
1490# define ushort_t unsigned short
1491# define ulong unsigned long
1492# define ulong_t unsigned long
1493# define uint_t unsigned int
1494# define uchar_t unsigned char
1495# endif /* __UNIXWARE__ */
1496# if !defined(__SUNPRO_C)
1497# include <sys/inline.h>
1498# endif
1499# else
1500# include "scoasm.h"
1501# endif
1502# if !defined(__HIGHC__) && !defined(__SUNPRO_C) || \
1503 defined(__USLC__)
1504# pragma asm partial_optimization outl
1505# pragma asm partial_optimization outw
1506# pragma asm partial_optimization outb
1507# pragma asm partial_optimization inl
1508# pragma asm partial_optimization inw
1509# pragma asm partial_optimization inb
1510# endif
1511# define ldq_u(p) (*((unsigned long *)(p)))
1512# define ldl_u(p) (*((unsigned int *)(p)))
1513# define ldw_u(p) (*((unsigned short *)(p)))
1514# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1515# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1516# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1517# define mem_barrier() /* NOP */
1518# define write_mem_barrier() /* NOP */
1519# endif /* __GNUC__ */
1520
1521# endif /* NO_INLINE */
1522
1523# ifdef __alpha__
1524/* entry points for Mmio memory access routines */
1525extern int (*xf86ReadMmio8)(void *, unsigned long);
1526extern int (*xf86ReadMmio16)(void *, unsigned long);
1527# ifndef STANDALONE_MMIO
1528extern int (*xf86ReadMmio32)(void *, unsigned long);
1529# else
1530/* Some DRI 3D drivers need MMIO_IN32. */
1531static __inline__ int
1532xf86ReadMmio32(void *Base, unsigned long Offset)
1533{
1534 __asm__ __volatile__("mb" : : : "memory");
1535 return *(volatile unsigned int*)((unsigned long)Base+(Offset));
1536}
1537# endif
1538extern void (*xf86WriteMmio8)(int, void *, unsigned long);
1539extern void (*xf86WriteMmio16)(int, void *, unsigned long);
1540extern void (*xf86WriteMmio32)(int, void *, unsigned long);
1541extern void (*xf86WriteMmioNB8)(int, void *, unsigned long);
1542extern void (*xf86WriteMmioNB16)(int, void *, unsigned long);
1543extern void (*xf86WriteMmioNB32)(int, void *, unsigned long);
1544extern void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int);
1545extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
1546
1547/* Some macros to hide the system dependencies for MMIO accesses */
1548/* Changed to kill noise generated by gcc's -Wcast-align */
1549# define MMIO_IN8(base, offset) (*xf86ReadMmio8)(base, offset)
1550# define MMIO_IN16(base, offset) (*xf86ReadMmio16)(base, offset)
1551# ifndef STANDALONE_MMIO
1552# define MMIO_IN32(base, offset) (*xf86ReadMmio32)(base, offset)
1553# else
1554# define MMIO_IN32(base, offset) xf86ReadMmio32(base, offset)
1555# endif
1556
1557# define MMIO_OUT32(base, offset, val) \
1558 do { \
1559 write_mem_barrier(); \
1560 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val); \
1561 } while (0)
1562# define MMIO_ONB32(base, offset, val) \
1563 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1564
1565# define MMIO_OUT8(base, offset, val) \
1566 (*xf86WriteMmio8)((CARD8)(val), base, offset)
1567# define MMIO_OUT16(base, offset, val) \
1568 (*xf86WriteMmio16)((CARD16)(val), base, offset)
1569# define MMIO_ONB8(base, offset, val) \
1570 (*xf86WriteMmioNB8)((CARD8)(val), base, offset)
1571# define MMIO_ONB16(base, offset, val) \
1572 (*xf86WriteMmioNB16)((CARD16)(val), base, offset)
1573# define MMIO_MOVE32(base, offset, val) \
1574 MMIO_OUT32(base, offset, val)
1575
1576# elif defined(__powerpc__)
1577 /*
1578 * we provide byteswapping and no byteswapping functions here
1579 * with byteswapping as default,
1580 * drivers that don't need byteswapping should define PPC_MMIO_IS_BE
1581 */
1582# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1583# define MMIO_OUT8(base, offset, val) \
1584 xf86WriteMmio8(base, offset, (CARD8)(val))
1585# define MMIO_ONB8(base, offset, val) \
1586 xf86WriteMmioNB8(base, offset, (CARD8)(val))
1587
1588# if defined(PPC_MMIO_IS_BE) /* No byteswapping */
1589# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1590# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1591# define MMIO_OUT16(base, offset, val) \
1592 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1593# define MMIO_OUT32(base, offset, val) \
1594 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1595# define MMIO_ONB16(base, offset, val) \
1596 xf86WriteMmioNB16Be(base, offset, (CARD16)(val))
1597# define MMIO_ONB32(base, offset, val) \
1598 xf86WriteMmioNB32Be(base, offset, (CARD32)(val))
1599# else /* byteswapping is the default */
1600# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1601# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1602# define MMIO_OUT16(base, offset, val) \
1603 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1604# define MMIO_OUT32(base, offset, val) \
1605 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1606# define MMIO_ONB16(base, offset, val) \
1607 xf86WriteMmioNB16Le(base, offset, (CARD16)(val))
1608# define MMIO_ONB32(base, offset, val) \
1609 xf86WriteMmioNB32Le(base, offset, (CARD32)(val))
1610# endif
1611
1612# define MMIO_MOVE32(base, offset, val) \
1613 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1614
1615static __inline__ void ppc_flush_icache(char *addr)
1616{
1617 __asm__ volatile (
1618 "dcbf 0,%0;"
1619 "sync;"
1620 "icbi 0,%0;"
1621 "sync;"
1622 "isync;"
1623 : : "r"(addr) : "memory");
1624}
1625
1626# elif defined(__sparc__) || defined(sparc) || defined(__sparc)
1627 /*
1628 * Like powerpc, we provide byteswapping and no byteswapping functions
1629 * here with byteswapping as default, drivers that don't need byteswapping
1630 * should define SPARC_MMIO_IS_BE (perhaps create a generic macro so that we
1631 * do not need to use PPC_MMIO_IS_BE and the sparc one in all the same places
1632 * of drivers?).
1633 */
1634# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1635# define MMIO_OUT8(base, offset, val) \
1636 xf86WriteMmio8(base, offset, (CARD8)(val))
1637# define MMIO_ONB8(base, offset, val) \
1638 xf86WriteMmio8NB(base, offset, (CARD8)(val))
1639
1640# if defined(SPARC_MMIO_IS_BE) /* No byteswapping */
1641# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1642# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1643# define MMIO_OUT16(base, offset, val) \
1644 xf86WriteMmio16Be(base, offset, (CARD16)(val))
1645# define MMIO_OUT32(base, offset, val) \
1646 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1647# define MMIO_ONB16(base, offset, val) \
1648 xf86WriteMmio16BeNB(base, offset, (CARD16)(val))
1649# define MMIO_ONB32(base, offset, val) \
1650 xf86WriteMmio32BeNB(base, offset, (CARD32)(val))
1651# else /* byteswapping is the default */
1652# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1653# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1654# define MMIO_OUT16(base, offset, val) \
1655 xf86WriteMmio16Le(base, offset, (CARD16)(val))
1656# define MMIO_OUT32(base, offset, val) \
1657 xf86WriteMmio32Le(base, offset, (CARD32)(val))
1658# define MMIO_ONB16(base, offset, val) \
1659 xf86WriteMmio16LeNB(base, offset, (CARD16)(val))
1660# define MMIO_ONB32(base, offset, val) \
1661 xf86WriteMmio32LeNB(base, offset, (CARD32)(val))
1662# endif
1663
1664# define MMIO_MOVE32(base, offset, val) \
1665 xf86WriteMmio32Be(base, offset, (CARD32)(val))
1666
1667# else /* !__alpha__ && !__powerpc__ && !__sparc__ */
1668
1669# define MMIO_IN8(base, offset) \
1670 *(volatile CARD8 *)(((CARD8*)(base)) + (offset))
1671# define MMIO_IN16(base, offset) \
1672 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset))
1673# define MMIO_IN32(base, offset) \
1674 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset))
1675# define MMIO_OUT8(base, offset, val) \
1676 *(volatile CARD8 *)(((CARD8*)(base)) + (offset)) = (val)
1677# define MMIO_OUT16(base, offset, val) \
1678 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1679# define MMIO_OUT32(base, offset, val) \
1680 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1681# define MMIO_ONB8(base, offset, val) MMIO_OUT8(base, offset, val)
1682# define MMIO_ONB16(base, offset, val) MMIO_OUT16(base, offset, val)
1683# define MMIO_ONB32(base, offset, val) MMIO_OUT32(base, offset, val)
1684
1685# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val)
1686
1687# endif /* __alpha__ */
1688
1689/*
1690 * With Intel, the version in os-support/misc/SlowBcopy.s is used.
1691 * This avoids port I/O during the copy (which causes problems with
1692 * some hardware).
1693 */
1694# ifdef __alpha__
1695# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count)
1696# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count)
1697# else /* __alpha__ */
1698# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count)
1699# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count)
1700# endif /* __alpha__ */
1701
1702#endif /* _COMPILER_H */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette