Fawkes API  Fawkes Development Version
mmx.h
1 
2 /***************************************************************************
3  * mmx.h - MMX CPU extension support
4  *
5  * Copyright 1997-2001 H. Dietz and R. Fisher (copied form FFmpeg)
6  *
7  ****************************************************************************/
8 
9 /* This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU Library General Public License for more details.
18  *
19  * Read the full text in the LICENSE.GPL file in the doc directory.
20  */
21 
22 #ifndef _FIREVISION_FVUTILS_CPU_MMX_H_
23 #define _FIREVISION_FVUTILS_CPU_MMX_H_
24 
25 namespace firevision {
26 
27 /// @cond MMX
28 
29 /*
30  * The type of an value that fits in an MMX register (note that long
31  * long constant values MUST be suffixed by LL and unsigned long long
32  * values by ULL, lest they be truncated by the compiler)
33  */
34 
35 typedef union {
36  long long q; /* Quadword (64-bit) value */
37  unsigned long long uq; /* Unsigned Quadword */
38  int d[2]; /* 2 Doubleword (32-bit) values */
39  unsigned int ud[2]; /* 2 Unsigned Doubleword */
40  short w[4]; /* 4 Word (16-bit) values */
41  unsigned short uw[4]; /* 4 Unsigned Word */
42  char b[8]; /* 8 Byte (8-bit) values */
43  unsigned char ub[8]; /* 8 Unsigned Byte */
44  float s[2]; /* Single-precision (32-bit) value */
45 } mmx_t; /* On an 8-byte (64-bit) boundary */
46 
47 #define mmx_i2r(op, imm, reg) \
48  __asm__ __volatile__(#op " %0, %%" #reg \
49  : /* nothing */ \
50  : "i"(imm))
51 
52 #define mmx_m2r(op, mem, reg) \
53  __asm__ __volatile__(#op " %0, %%" #reg \
54  : /* nothing */ \
55  : "m"(mem))
56 
57 #define mmx_r2m(op, reg, mem) \
58  __asm__ __volatile__(#op " %%" #reg ", %0" : "=m"(mem) : /* nothing */)
59 
60 #define mmx_r2r(op, regs, regd) __asm__ __volatile__(#op " %" #regs ", %" #regd)
61 
62 #define emms() __asm__ __volatile__("emms")
63 
64 #define movd_m2r(var, reg) mmx_m2r(movd, var, reg)
65 #define movd_r2m(reg, var) mmx_r2m(movd, reg, var)
66 #define movd_r2r(regs, regd) mmx_r2r(movd, regs, regd)
67 
68 #define movq_m2r(var, reg) mmx_m2r(movq, var, reg)
69 #define movq_r2m(reg, var) mmx_r2m(movq, reg, var)
70 #define movq_r2r(regs, regd) mmx_r2r(movq, regs, regd)
71 
72 #define packssdw_m2r(var, reg) mmx_m2r(packssdw, var, reg)
73 #define packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
74 #define packsswb_m2r(var, reg) mmx_m2r(packsswb, var, reg)
75 #define packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
76 
77 #define packuswb_m2r(var, reg) mmx_m2r(packuswb, var, reg)
78 #define packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
79 
80 #define paddb_m2r(var, reg) mmx_m2r(paddb, var, reg)
81 #define paddb_r2r(regs, regd) mmx_r2r(paddb, regs, regd)
82 #define paddd_m2r(var, reg) mmx_m2r(paddd, var, reg)
83 #define paddd_r2r(regs, regd) mmx_r2r(paddd, regs, regd)
84 #define paddw_m2r(var, reg) mmx_m2r(paddw, var, reg)
85 #define paddw_r2r(regs, regd) mmx_r2r(paddw, regs, regd)
86 
87 #define paddsb_m2r(var, reg) mmx_m2r(paddsb, var, reg)
88 #define paddsb_r2r(regs, regd) mmx_r2r(paddsb, regs, regd)
89 #define paddsw_m2r(var, reg) mmx_m2r(paddsw, var, reg)
90 #define paddsw_r2r(regs, regd) mmx_r2r(paddsw, regs, regd)
91 
92 #define paddusb_m2r(var, reg) mmx_m2r(paddusb, var, reg)
93 #define paddusb_r2r(regs, regd) mmx_r2r(paddusb, regs, regd)
94 #define paddusw_m2r(var, reg) mmx_m2r(paddusw, var, reg)
95 #define paddusw_r2r(regs, regd) mmx_r2r(paddusw, regs, regd)
96 
97 #define pand_m2r(var, reg) mmx_m2r(pand, var, reg)
98 #define pand_r2r(regs, regd) mmx_r2r(pand, regs, regd)
99 
100 #define pandn_m2r(var, reg) mmx_m2r(pandn, var, reg)
101 #define pandn_r2r(regs, regd) mmx_r2r(pandn, regs, regd)
102 
103 #define pcmpeqb_m2r(var, reg) mmx_m2r(pcmpeqb, var, reg)
104 #define pcmpeqb_r2r(regs, regd) mmx_r2r(pcmpeqb, regs, regd)
105 #define pcmpeqd_m2r(var, reg) mmx_m2r(pcmpeqd, var, reg)
106 #define pcmpeqd_r2r(regs, regd) mmx_r2r(pcmpeqd, regs, regd)
107 #define pcmpeqw_m2r(var, reg) mmx_m2r(pcmpeqw, var, reg)
108 #define pcmpeqw_r2r(regs, regd) mmx_r2r(pcmpeqw, regs, regd)
109 
110 #define pcmpgtb_m2r(var, reg) mmx_m2r(pcmpgtb, var, reg)
111 #define pcmpgtb_r2r(regs, regd) mmx_r2r(pcmpgtb, regs, regd)
112 #define pcmpgtd_m2r(var, reg) mmx_m2r(pcmpgtd, var, reg)
113 #define pcmpgtd_r2r(regs, regd) mmx_r2r(pcmpgtd, regs, regd)
114 #define pcmpgtw_m2r(var, reg) mmx_m2r(pcmpgtw, var, reg)
115 #define pcmpgtw_r2r(regs, regd) mmx_r2r(pcmpgtw, regs, regd)
116 
117 #define pmaddwd_m2r(var, reg) mmx_m2r(pmaddwd, var, reg)
118 #define pmaddwd_r2r(regs, regd) mmx_r2r(pmaddwd, regs, regd)
119 
120 #define pmulhw_m2r(var, reg) mmx_m2r(pmulhw, var, reg)
121 #define pmulhw_r2r(regs, regd) mmx_r2r(pmulhw, regs, regd)
122 
123 #define pmullw_m2r(var, reg) mmx_m2r(pmullw, var, reg)
124 #define pmullw_r2r(regs, regd) mmx_r2r(pmullw, regs, regd)
125 
126 #define por_m2r(var, reg) mmx_m2r(por, var, reg)
127 #define por_r2r(regs, regd) mmx_r2r(por, regs, regd)
128 
129 #define pslld_i2r(imm, reg) mmx_i2r(pslld, imm, reg)
130 #define pslld_m2r(var, reg) mmx_m2r(pslld, var, reg)
131 #define pslld_r2r(regs, regd) mmx_r2r(pslld, regs, regd)
132 #define psllq_i2r(imm, reg) mmx_i2r(psllq, imm, reg)
133 #define psllq_m2r(var, reg) mmx_m2r(psllq, var, reg)
134 #define psllq_r2r(regs, regd) mmx_r2r(psllq, regs, regd)
135 #define psllw_i2r(imm, reg) mmx_i2r(psllw, imm, reg)
136 #define psllw_m2r(var, reg) mmx_m2r(psllw, var, reg)
137 #define psllw_r2r(regs, regd) mmx_r2r(psllw, regs, regd)
138 
139 #define psrad_i2r(imm, reg) mmx_i2r(psrad, imm, reg)
140 #define psrad_m2r(var, reg) mmx_m2r(psrad, var, reg)
141 #define psrad_r2r(regs, regd) mmx_r2r(psrad, regs, regd)
142 #define psraw_i2r(imm, reg) mmx_i2r(psraw, imm, reg)
143 #define psraw_m2r(var, reg) mmx_m2r(psraw, var, reg)
144 #define psraw_r2r(regs, regd) mmx_r2r(psraw, regs, regd)
145 
146 #define psrld_i2r(imm, reg) mmx_i2r(psrld, imm, reg)
147 #define psrld_m2r(var, reg) mmx_m2r(psrld, var, reg)
148 #define psrld_r2r(regs, regd) mmx_r2r(psrld, regs, regd)
149 #define psrlq_i2r(imm, reg) mmx_i2r(psrlq, imm, reg)
150 #define psrlq_m2r(var, reg) mmx_m2r(psrlq, var, reg)
151 #define psrlq_r2r(regs, regd) mmx_r2r(psrlq, regs, regd)
152 #define psrlw_i2r(imm, reg) mmx_i2r(psrlw, imm, reg)
153 #define psrlw_m2r(var, reg) mmx_m2r(psrlw, var, reg)
154 #define psrlw_r2r(regs, regd) mmx_r2r(psrlw, regs, regd)
155 
156 #define psubb_m2r(var, reg) mmx_m2r(psubb, var, reg)
157 #define psubb_r2r(regs, regd) mmx_r2r(psubb, regs, regd)
158 #define psubd_m2r(var, reg) mmx_m2r(psubd, var, reg)
159 #define psubd_r2r(regs, regd) mmx_r2r(psubd, regs, regd)
160 #define psubw_m2r(var, reg) mmx_m2r(psubw, var, reg)
161 #define psubw_r2r(regs, regd) mmx_r2r(psubw, regs, regd)
162 
163 #define psubsb_m2r(var, reg) mmx_m2r(psubsb, var, reg)
164 #define psubsb_r2r(regs, regd) mmx_r2r(psubsb, regs, regd)
165 #define psubsw_m2r(var, reg) mmx_m2r(psubsw, var, reg)
166 #define psubsw_r2r(regs, regd) mmx_r2r(psubsw, regs, regd)
167 
168 #define psubusb_m2r(var, reg) mmx_m2r(psubusb, var, reg)
169 #define psubusb_r2r(regs, regd) mmx_r2r(psubusb, regs, regd)
170 #define psubusw_m2r(var, reg) mmx_m2r(psubusw, var, reg)
171 #define psubusw_r2r(regs, regd) mmx_r2r(psubusw, regs, regd)
172 
173 #define punpckhbw_m2r(var, reg) mmx_m2r(punpckhbw, var, reg)
174 #define punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
175 #define punpckhdq_m2r(var, reg) mmx_m2r(punpckhdq, var, reg)
176 #define punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
177 #define punpckhwd_m2r(var, reg) mmx_m2r(punpckhwd, var, reg)
178 #define punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
179 
180 #define punpcklbw_m2r(var, reg) mmx_m2r(punpcklbw, var, reg)
181 #define punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
182 #define punpckldq_m2r(var, reg) mmx_m2r(punpckldq, var, reg)
183 #define punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
184 #define punpcklwd_m2r(var, reg) mmx_m2r(punpcklwd, var, reg)
185 #define punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
186 
187 #define pxor_m2r(var, reg) mmx_m2r(pxor, var, reg)
188 #define pxor_r2r(regs, regd) mmx_r2r(pxor, regs, regd)
189 
190 /* 3DNOW extensions */
191 
192 #define pavgusb_m2r(var, reg) mmx_m2r(pavgusb, var, reg)
193 #define pavgusb_r2r(regs, regd) mmx_r2r(pavgusb, regs, regd)
194 
195 /* AMD MMX extensions - also available in intel SSE */
196 
197 #define mmx_m2ri(op, mem, reg, imm) \
198  __asm__ __volatile__(#op " %1, %0, %%" #reg \
199  : /* nothing */ \
200  : "m"(mem), "i"(imm))
201 #define mmx_r2ri(op, regs, regd, imm) \
202  __asm__ __volatile__(#op " %0, %%" #regs ", %%" #regd \
203  : /* nothing */ \
204  : "i"(imm))
205 
206 #define mmx_fetch(mem, hint) \
207  __asm__ __volatile__("prefetch" #hint " %0" \
208  : /* nothing */ \
209  : "m"(mem))
210 
211 #define maskmovq(regs, maskreg) mmx_r2ri(maskmovq, regs, maskreg)
212 
213 #define movntq_r2m(mmreg, var) mmx_r2m(movntq, mmreg, var)
214 
215 #define pavgb_m2r(var, reg) mmx_m2r(pavgb, var, reg)
216 #define pavgb_r2r(regs, regd) mmx_r2r(pavgb, regs, regd)
217 #define pavgw_m2r(var, reg) mmx_m2r(pavgw, var, reg)
218 #define pavgw_r2r(regs, regd) mmx_r2r(pavgw, regs, regd)
219 
220 #define pextrw_r2r(mmreg, reg, imm) mmx_r2ri(pextrw, mmreg, reg, imm)
221 
222 #define pinsrw_r2r(reg, mmreg, imm) mmx_r2ri(pinsrw, reg, mmreg, imm)
223 
224 #define pmaxsw_m2r(var, reg) mmx_m2r(pmaxsw, var, reg)
225 #define pmaxsw_r2r(regs, regd) mmx_r2r(pmaxsw, regs, regd)
226 
227 #define pmaxub_m2r(var, reg) mmx_m2r(pmaxub, var, reg)
228 #define pmaxub_r2r(regs, regd) mmx_r2r(pmaxub, regs, regd)
229 
230 #define pminsw_m2r(var, reg) mmx_m2r(pminsw, var, reg)
231 #define pminsw_r2r(regs, regd) mmx_r2r(pminsw, regs, regd)
232 
233 #define pminub_m2r(var, reg) mmx_m2r(pminub, var, reg)
234 #define pminub_r2r(regs, regd) mmx_r2r(pminub, regs, regd)
235 
236 #define pmovmskb(mmreg, reg) __asm__ __volatile__("movmskps %" #mmreg ", %" #reg)
237 
238 #define pmulhuw_m2r(var, reg) mmx_m2r(pmulhuw, var, reg)
239 #define pmulhuw_r2r(regs, regd) mmx_r2r(pmulhuw, regs, regd)
240 
241 #define prefetcht0(mem) mmx_fetch(mem, t0)
242 #define prefetcht1(mem) mmx_fetch(mem, t1)
243 #define prefetcht2(mem) mmx_fetch(mem, t2)
244 #define prefetchnta(mem) mmx_fetch(mem, nta)
245 
246 #define psadbw_m2r(var, reg) mmx_m2r(psadbw, var, reg)
247 #define psadbw_r2r(regs, regd) mmx_r2r(psadbw, regs, regd)
248 
249 #define pshufw_m2r(var, reg, imm) mmx_m2ri(pshufw, var, reg, imm)
250 #define pshufw_r2r(regs, regd, imm) mmx_r2ri(pshufw, regs, regd, imm)
251 
252 #define sfence() __asm__ __volatile__("sfence\n\t")
253 
254 /* SSE2 */
255 #define pshufhw_m2r(var, reg, imm) mmx_m2ri(pshufhw, var, reg, imm)
256 #define pshufhw_r2r(regs, regd, imm) mmx_r2ri(pshufhw, regs, regd, imm)
257 #define pshuflw_m2r(var, reg, imm) mmx_m2ri(pshuflw, var, reg, imm)
258 #define pshuflw_r2r(regs, regd, imm) mmx_r2ri(pshuflw, regs, regd, imm)
259 
260 #define pshufd_r2r(regs, regd, imm) mmx_r2ri(pshufd, regs, regd, imm)
261 
262 #define movdqa_m2r(var, reg) mmx_m2r(movdqa, var, reg)
263 #define movdqa_r2m(reg, var) mmx_r2m(movdqa, reg, var)
264 #define movdqa_r2r(regs, regd) mmx_r2r(movdqa, regs, regd)
265 #define movdqu_m2r(var, reg) mmx_m2r(movdqu, var, reg)
266 #define movdqu_r2m(reg, var) mmx_r2m(movdqu, reg, var)
267 #define movdqu_r2r(regs, regd) mmx_r2r(movdqu, regs, regd)
268 
269 #define pmullw_r2m(reg, var) mmx_r2m(pmullw, reg, var)
270 
271 #define pslldq_i2r(imm, reg) mmx_i2r(pslldq, imm, reg)
272 #define psrldq_i2r(imm, reg) mmx_i2r(psrldq, imm, reg)
273 
274 #define punpcklqdq_r2r(regs, regd) mmx_r2r(punpcklqdq, regs, regd)
275 #define punpckhqdq_r2r(regs, regd) mmx_r2r(punpckhqdq, regs, regd)
276 
277 /// @endcond
278 
279 } // end namespace firevision
280 
281 #endif