ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/uae_cpu/compiler/codegen_x86.h
Revision: 1.18
Committed: 2005-04-24T23:00:08Z (19 years, 5 months ago) by gbeauche
Content type: text/plain
Branch: MAIN
CVS Tags: nigel-build-17
Changes since 1.17: +4 -4 lines
Log Message:
Fix build with gcc4 on x86-64: ignore errors when casting pointers to int
from regs & fpu members + code cache. This is possible because data is
allocated in 32-bit space and we force allocation of translation cache to
those bounds too.

File Contents

# User Rev Content
1 gbeauche 1.2 /******************** -*- mode: C; tab-width: 8 -*- ********************
2 gbeauche 1.1 *
3 gbeauche 1.11 * Run-time assembler for IA-32 and AMD64
4 gbeauche 1.1 *
5     ***********************************************************************/
6    
7    
8     /***********************************************************************
9     *
10 gbeauche 1.11 * This file is derived from CCG.
11 gbeauche 1.1 *
12     * Copyright 1999, 2000, 2001, 2002, 2003 Ian Piumarta
13     *
14 gbeauche 1.11 * Adaptations and enhancements for AMD64 support, Copyright 2003
15 gbeauche 1.1 * Gwenole Beauchesne
16     *
17 gbeauche 1.17 * Basilisk II (C) 1997-2005 Christian Bauer
18 gbeauche 1.1 *
19     * This program is free software; you can redistribute it and/or modify
20     * it under the terms of the GNU General Public License as published by
21     * the Free Software Foundation; either version 2 of the License, or
22     * (at your option) any later version.
23     *
24     * This program is distributed in the hope that it will be useful,
25     * but WITHOUT ANY WARRANTY; without even the implied warranty of
26     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27     * GNU General Public License for more details.
28     *
29     * You should have received a copy of the GNU General Public License
30     * along with this program; if not, write to the Free Software
31     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32     *
33     ***********************************************************************/
34    
35     #ifndef X86_RTASM_H
36     #define X86_RTASM_H
37    
38     /* NOTES
39     *
40     * o Best viewed on a 1024x768 screen with fixed-6x10 font ;-)
41     *
42     * TODO
43     *
44     * o Fix FIXMEs
45     * o i387 FPU instructions
46     * o SSE instructions
47     * o Optimize for cases where register numbers are not integral constants
48     */
49    
50     /* --- Configuration ------------------------------------------------------- */
51    
52     /* Define to settle a "flat" register set, i.e. different regno for
53     each size variant. */
54     #ifndef X86_FLAT_REGISTERS
55     #define X86_FLAT_REGISTERS 1
56     #endif
57    
58     /* Define to generate x86-64 code. */
59     #ifndef X86_TARGET_64BIT
60     #define X86_TARGET_64BIT 0
61     #endif
62    
63     /* Define to optimize ALU instructions. */
64     #ifndef X86_OPTIMIZE_ALU
65     #define X86_OPTIMIZE_ALU 1
66     #endif
67    
68     /* Define to optimize rotate/shift instructions. */
69     #ifndef X86_OPTIMIZE_ROTSHI
70     #define X86_OPTIMIZE_ROTSHI 1
71     #endif
72    
73 gbeauche 1.15 /* Define to optimize absolute addresses for RIP relative addressing. */
74     #ifndef X86_RIP_RELATIVE_ADDR
75 gbeauche 1.16 #define X86_RIP_RELATIVE_ADDR 1
76 gbeauche 1.15 #endif
77    
78 gbeauche 1.1
79     /* --- Macros -------------------------------------------------------------- */
80    
81     /* Functions used to emit code.
82     *
83     * x86_emit_byte(B)
84     * x86_emit_word(W)
85     * x86_emit_long(L)
86     */
87    
88     /* Get pointer to current code
89     *
90     * x86_get_target()
91     */
92    
93     /* Abort assembler, fatal failure.
94     *
95     * x86_emit_failure(MSG)
96     */
97    
98 gbeauche 1.6 #define x86_emit_failure0(MSG) (x86_emit_failure(MSG),0)
99    
100 gbeauche 1.1
101     /* --- Register set -------------------------------------------------------- */
102    
103 gbeauche 1.2 enum {
104 gbeauche 1.4 X86_RIP = -2,
105 gbeauche 1.1 #if X86_FLAT_REGISTERS
106 gbeauche 1.3 X86_NOREG = 0,
107     X86_Reg8L_Base = 0x10,
108     X86_Reg8H_Base = 0x20,
109     X86_Reg16_Base = 0x30,
110     X86_Reg32_Base = 0x40,
111     X86_Reg64_Base = 0x50,
112     X86_RegMMX_Base = 0x60,
113     X86_RegXMM_Base = 0x70,
114 gbeauche 1.1 #else
115 gbeauche 1.3 X86_NOREG = -1,
116     X86_Reg8L_Base = 0,
117     X86_Reg8H_Base = 16,
118     X86_Reg16_Base = 0,
119     X86_Reg32_Base = 0,
120     X86_Reg64_Base = 0,
121     X86_RegMMX_Base = 0,
122     X86_RegXMM_Base = 0,
123 gbeauche 1.1 #endif
124 gbeauche 1.2 };
125 gbeauche 1.1
126 gbeauche 1.2 enum {
127 gbeauche 1.1 X86_AL = X86_Reg8L_Base,
128     X86_CL, X86_DL, X86_BL,
129 gbeauche 1.14 X86_SPL, X86_BPL, X86_SIL, X86_DIL,
130 gbeauche 1.1 X86_R8B, X86_R9B, X86_R10B, X86_R11B,
131     X86_R12B, X86_R13B, X86_R14B, X86_R15B,
132 gbeauche 1.14 X86_AH = X86_Reg8H_Base + 4,
133     X86_CH, X86_DH, X86_BH
134 gbeauche 1.2 };
135 gbeauche 1.1
136 gbeauche 1.2 enum {
137 gbeauche 1.1 X86_AX = X86_Reg16_Base,
138     X86_CX, X86_DX, X86_BX,
139     X86_SP, X86_BP, X86_SI, X86_DI,
140     X86_R8W, X86_R9W, X86_R10W, X86_R11W,
141     X86_R12W, X86_R13W, X86_R14W, X86_R15W
142 gbeauche 1.2 };
143 gbeauche 1.1
144 gbeauche 1.2 enum {
145 gbeauche 1.1 X86_EAX = X86_Reg32_Base,
146     X86_ECX, X86_EDX, X86_EBX,
147     X86_ESP, X86_EBP, X86_ESI, X86_EDI,
148     X86_R8D, X86_R9D, X86_R10D, X86_R11D,
149     X86_R12D, X86_R13D, X86_R14D, X86_R15D
150 gbeauche 1.2 };
151 gbeauche 1.1
152 gbeauche 1.2 enum {
153 gbeauche 1.1 X86_RAX = X86_Reg64_Base,
154     X86_RCX, X86_RDX, X86_RBX,
155     X86_RSP, X86_RBP, X86_RSI, X86_RDI,
156     X86_R8, X86_R9, X86_R10, X86_R11,
157     X86_R12, X86_R13, X86_R14, X86_R15
158 gbeauche 1.2 };
159 gbeauche 1.1
160 gbeauche 1.3 enum {
161     X86_MM0 = X86_RegMMX_Base,
162     X86_MM1, X86_MM2, X86_MM3,
163     X86_MM4, X86_MM5, X86_MM6, X86_MM7,
164     };
165    
166     enum {
167     X86_XMM0 = X86_RegXMM_Base,
168     X86_XMM1, X86_XMM2, X86_XMM3,
169     X86_XMM4, X86_XMM5, X86_XMM6, X86_XMM7,
170     X86_XMM8, X86_XMM9, X86_XMM10, X86_XMM11,
171     X86_XMM12, X86_XMM13, X86_XMM14, X86_XMM15
172     };
173    
174 gbeauche 1.1 /* Register control and access
175     *
176 gbeauche 1.3 * _r0P(R) Null register?
177 gbeauche 1.4 * _rIP(R) RIP register?
178 gbeauche 1.3 * _rXP(R) Extended register?
179 gbeauche 1.1 *
180 gbeauche 1.3 * _rC(R) Class of register (only valid if X86_FLAT_REGISTERS)
181 gbeauche 1.1 * _rR(R) Full register number
182     * _rN(R) Short register number for encoding
183     *
184     * _r1(R) 8-bit register ID
185     * _r2(R) 16-bit register ID
186     * _r4(R) 32-bit register ID
187     * _r8(R) 64-bit register ID
188 gbeauche 1.3 * _rM(R) MMX register ID
189     * _rX(R) XMM register ID
190     * _rA(R) Address register ID used for EA calculation
191 gbeauche 1.1 */
192    
193 gbeauche 1.6 #define _r0P(R) ((int)(R) == (int)X86_NOREG)
194     #define _rIP(R) ((int)(R) == (int)X86_RIP)
195 gbeauche 1.1
196 gbeauche 1.16 #if X86_FLAT_REGISTERS
197 gbeauche 1.3 #define _rC(R) ((R) & 0xf0)
198     #define _rR(R) ((R) & 0x0f)
199     #define _rN(R) ((R) & 0x07)
200 gbeauche 1.11 #define _rXP(R) ((R) > 0 && _rR(R) > 7)
201 gbeauche 1.16 #else
202     #define _rN(R) ((R) & 0x07)
203     #define _rR(R) (int(R))
204     #define _rXP(R) (int(R) > 7)
205     #endif
206 gbeauche 1.1
207 gbeauche 1.3 #if !defined(_ASM_SAFETY) || ! X86_FLAT_REGISTERS
208 gbeauche 1.1 #define _r1(R) _rN(R)
209     #define _r2(R) _rN(R)
210     #define _r4(R) _rN(R)
211     #define _r8(R) _rN(R)
212 gbeauche 1.3 #define _rA(R) _rN(R)
213     #define _rM(R) _rN(R)
214     #define _rX(R) _rN(R)
215 gbeauche 1.1 #else
216 gbeauche 1.6 #define _r1(R) ( ((_rC(R) & (X86_Reg8L_Base | X86_Reg8H_Base)) != 0) ? _rN(R) : x86_emit_failure0( "8-bit register required"))
217     #define _r2(R) ( (_rC(R) == X86_Reg16_Base) ? _rN(R) : x86_emit_failure0("16-bit register required"))
218     #define _r4(R) ( (_rC(R) == X86_Reg32_Base) ? _rN(R) : x86_emit_failure0("32-bit register required"))
219     #define _r8(R) ( (_rC(R) == X86_Reg64_Base) ? _rN(R) : x86_emit_failure0("64-bit register required"))
220 gbeauche 1.3 #define _rA(R) ( X86_TARGET_64BIT ? \
221 gbeauche 1.6 ( (_rC(R) == X86_Reg64_Base) ? _rN(R) : x86_emit_failure0("not a valid 64-bit base/index expression")) : \
222     ( (_rC(R) == X86_Reg32_Base) ? _rN(R) : x86_emit_failure0("not a valid 32-bit base/index expression")) )
223     #define _rM(R) ( (_rC(R) == X86_RegMMX_Base) ? _rN(R) : x86_emit_failure0("MMX register required"))
224     #define _rX(R) ( (_rC(R) == X86_RegXMM_Base) ? _rN(R) : x86_emit_failure0("SSE register required"))
225 gbeauche 1.1 #endif
226    
227 gbeauche 1.6 #define _rSP() (X86_TARGET_64BIT ? (int)X86_RSP : (int)X86_ESP)
228 gbeauche 1.14 #define _r1e8lP(R) (int(R) >= X86_SPL && int(R) <= X86_DIL)
229 gbeauche 1.3 #define _rbpP(R) (_rR(R) == _rR(X86_RBP))
230     #define _rspP(R) (_rR(R) == _rR(X86_RSP))
231 gbeauche 1.13 #define _rbp13P(R) (_rN(R) == _rN(X86_RBP))
232 gbeauche 1.3 #define _rsp12P(R) (_rN(R) == _rN(X86_RSP))
233 gbeauche 1.1
234    
235     /* ========================================================================= */
236     /* --- UTILITY ------------------------------------------------------------- */
237     /* ========================================================================= */
238    
239 gbeauche 1.10 typedef signed char _sc;
240 gbeauche 1.1 typedef unsigned char _uc;
241 gbeauche 1.10 typedef signed short _ss;
242 gbeauche 1.1 typedef unsigned short _us;
243 gbeauche 1.10 typedef signed int _sl;
244 gbeauche 1.1 typedef unsigned int _ul;
245    
246 gbeauche 1.18 #define _UC(X) ((_uc )(unsigned long)(X))
247     #define _US(X) ((_us )(unsigned long)(X))
248     #define _SL(X) ((_sl )(unsigned long)(X))
249     #define _UL(X) ((_ul )(unsigned long)(X))
250 gbeauche 1.1
251     # define _PUC(X) ((_uc *)(X))
252     # define _PUS(X) ((_us *)(X))
253     # define _PSL(X) ((_sl *)(X))
254     # define _PUL(X) ((_ul *)(X))
255    
256     #define _B(B) x86_emit_byte((B))
257     #define _W(W) x86_emit_word((W))
258     #define _L(L) x86_emit_long((L))
259 gbeauche 1.15 #define _Q(Q) x86_emit_quad((Q))
260 gbeauche 1.1
261     #define _MASK(N) ((unsigned)((1<<(N)))-1)
262     #define _siP(N,I) (!((((unsigned)(I))^(((unsigned)(I))<<1))&~_MASK(N)))
263     #define _uiP(N,I) (!(((unsigned)(I))&~_MASK(N)))
264     #define _suiP(N,I) (_siP(N,I) | _uiP(N,I))
265    
266     #ifndef _ASM_SAFETY
267     #define _ck_s(W,I) (_UL(I) & _MASK(W))
268     #define _ck_u(W,I) (_UL(I) & _MASK(W))
269     #define _ck_su(W,I) (_UL(I) & _MASK(W))
270     #define _ck_d(W,I) (_UL(I) & _MASK(W))
271     #else
272 gbeauche 1.6 #define _ck_s(W,I) (_siP(W,I) ? (_UL(I) & _MASK(W)) : x86_emit_failure0( "signed integer `"#I"' too large for "#W"-bit field"))
273     #define _ck_u(W,I) (_uiP(W,I) ? (_UL(I) & _MASK(W)) : x86_emit_failure0("unsigned integer `"#I"' too large for "#W"-bit field"))
274     #define _ck_su(W,I) (_suiP(W,I) ? (_UL(I) & _MASK(W)) : x86_emit_failure0( "integer `"#I"' too large for "#W"-bit field"))
275     #define _ck_d(W,I) (_siP(W,I) ? (_UL(I) & _MASK(W)) : x86_emit_failure0( "displacement `"#I"' too large for "#W"-bit field"))
276 gbeauche 1.1 #endif
277    
278     #define _s0P(I) ((I)==0)
279     #define _s8P(I) _siP(8,I)
280     #define _s16P(I) _siP(16,I)
281     #define _u8P(I) _uiP(8,I)
282     #define _u16P(I) _uiP(16,I)
283    
284     #define _su8(I) _ck_su(8,I)
285     #define _su16(I) _ck_su(16,I)
286    
287     #define _s1(I) _ck_s( 1,I)
288     #define _s2(I) _ck_s( 2,I)
289     #define _s3(I) _ck_s( 3,I)
290     #define _s4(I) _ck_s( 4,I)
291     #define _s5(I) _ck_s( 5,I)
292     #define _s6(I) _ck_s( 6,I)
293     #define _s7(I) _ck_s( 7,I)
294     #define _s8(I) _ck_s( 8,I)
295     #define _s9(I) _ck_s( 9,I)
296     #define _s10(I) _ck_s(10,I)
297     #define _s11(I) _ck_s(11,I)
298     #define _s12(I) _ck_s(12,I)
299     #define _s13(I) _ck_s(13,I)
300     #define _s14(I) _ck_s(14,I)
301     #define _s15(I) _ck_s(15,I)
302     #define _s16(I) _ck_s(16,I)
303     #define _s17(I) _ck_s(17,I)
304     #define _s18(I) _ck_s(18,I)
305     #define _s19(I) _ck_s(19,I)
306     #define _s20(I) _ck_s(20,I)
307     #define _s21(I) _ck_s(21,I)
308     #define _s22(I) _ck_s(22,I)
309     #define _s23(I) _ck_s(23,I)
310     #define _s24(I) _ck_s(24,I)
311     #define _s25(I) _ck_s(25,I)
312     #define _s26(I) _ck_s(26,I)
313     #define _s27(I) _ck_s(27,I)
314     #define _s28(I) _ck_s(28,I)
315     #define _s29(I) _ck_s(29,I)
316     #define _s30(I) _ck_s(30,I)
317     #define _s31(I) _ck_s(31,I)
318     #define _u1(I) _ck_u( 1,I)
319     #define _u2(I) _ck_u( 2,I)
320     #define _u3(I) _ck_u( 3,I)
321     #define _u4(I) _ck_u( 4,I)
322     #define _u5(I) _ck_u( 5,I)
323     #define _u6(I) _ck_u( 6,I)
324     #define _u7(I) _ck_u( 7,I)
325     #define _u8(I) _ck_u( 8,I)
326     #define _u9(I) _ck_u( 9,I)
327     #define _u10(I) _ck_u(10,I)
328     #define _u11(I) _ck_u(11,I)
329     #define _u12(I) _ck_u(12,I)
330     #define _u13(I) _ck_u(13,I)
331     #define _u14(I) _ck_u(14,I)
332     #define _u15(I) _ck_u(15,I)
333     #define _u16(I) _ck_u(16,I)
334     #define _u17(I) _ck_u(17,I)
335     #define _u18(I) _ck_u(18,I)
336     #define _u19(I) _ck_u(19,I)
337     #define _u20(I) _ck_u(20,I)
338     #define _u21(I) _ck_u(21,I)
339     #define _u22(I) _ck_u(22,I)
340     #define _u23(I) _ck_u(23,I)
341     #define _u24(I) _ck_u(24,I)
342     #define _u25(I) _ck_u(25,I)
343     #define _u26(I) _ck_u(26,I)
344     #define _u27(I) _ck_u(27,I)
345     #define _u28(I) _ck_u(28,I)
346     #define _u29(I) _ck_u(29,I)
347     #define _u30(I) _ck_u(30,I)
348     #define _u31(I) _ck_u(31,I)
349    
350     /* ========================================================================= */
351     /* --- ASSEMBLER ----------------------------------------------------------- */
352     /* ========================================================================= */
353    
354     #define _b00 0
355     #define _b01 1
356     #define _b10 2
357     #define _b11 3
358    
359     #define _b000 0
360     #define _b001 1
361     #define _b010 2
362     #define _b011 3
363     #define _b100 4
364     #define _b101 5
365     #define _b110 6
366     #define _b111 7
367    
368     #define _OFF4(D) (_UL(D) - _UL(x86_get_target()))
369     #define _CKD8(D) _ck_d(8, ((_uc) _OFF4(D)) )
370    
371     #define _D8(D) (_B(0), ((*(_PUC(x86_get_target())-1))= _CKD8(D)))
372     #define _D32(D) (_L(0), ((*(_PUL(x86_get_target())-1))= _OFF4(D)))
373    
374     #ifndef _ASM_SAFETY
375     # define _M(M) (M)
376     # define _r(R) (R)
377     # define _m(M) (M)
378     # define _s(S) (S)
379     # define _i(I) (I)
380     # define _b(B) (B)
381     #else
382 gbeauche 1.6 # define _M(M) (((M)>3) ? x86_emit_failure0("internal error: mod = " #M) : (M))
383     # define _r(R) (((R)>7) ? x86_emit_failure0("internal error: reg = " #R) : (R))
384     # define _m(M) (((M)>7) ? x86_emit_failure0("internal error: r/m = " #M) : (M))
385     # define _s(S) (((S)>3) ? x86_emit_failure0("internal error: memory scale = " #S) : (S))
386     # define _i(I) (((I)>7) ? x86_emit_failure0("internal error: memory index = " #I) : (I))
387     # define _b(B) (((B)>7) ? x86_emit_failure0("internal error: memory base = " #B) : (B))
388 gbeauche 1.1 #endif
389    
390     #define _Mrm(Md,R,M) _B((_M(Md)<<6)|(_r(R)<<3)|_m(M))
391     #define _SIB(Sc,I, B) _B((_s(Sc)<<6)|(_i(I)<<3)|_b(B))
392    
393     #define _SCL(S) ((((S)==1) ? _b00 : \
394     (((S)==2) ? _b01 : \
395     (((S)==4) ? _b10 : \
396 gbeauche 1.6 (((S)==8) ? _b11 : x86_emit_failure0("illegal scale: " #S))))))
397 gbeauche 1.1
398    
399     /* --- Memory subformats - urgh! ------------------------------------------- */
400    
401 gbeauche 1.4 /* _r_D() is RIP addressing mode if X86_TARGET_64BIT, use _r_DSIB() instead */
402 gbeauche 1.3 #define _r_D( R, D ) (_Mrm(_b00,_rN(R),_b101 ) ,_L((long)(D)))
403 gbeauche 1.4 #define _r_DSIB(R, D ) (_Mrm(_b00,_rN(R),_b100 ),_SIB(_SCL(1),_b100 ,_b101 ),_L((long)(D)))
404 gbeauche 1.3 #define _r_0B( R, B ) (_Mrm(_b00,_rN(R),_rA(B)) )
405     #define _r_0BIS(R, B,I,S) (_Mrm(_b00,_rN(R),_b100 ),_SIB(_SCL(S),_rA(I),_rA(B)) )
406     #define _r_1B( R, D,B ) (_Mrm(_b01,_rN(R),_rA(B)) ,_B((long)(D)))
407     #define _r_1BIS(R, D,B,I,S) (_Mrm(_b01,_rN(R),_b100 ),_SIB(_SCL(S),_rA(I),_rA(B)),_B((long)(D)))
408     #define _r_4B( R, D,B ) (_Mrm(_b10,_rN(R),_rA(B)) ,_L((long)(D)))
409     #define _r_4IS( R, D,I,S) (_Mrm(_b00,_rN(R),_b100 ),_SIB(_SCL(S),_rA(I),_b101 ),_L((long)(D)))
410     #define _r_4BIS(R, D,B,I,S) (_Mrm(_b10,_rN(R),_b100 ),_SIB(_SCL(S),_rA(I),_rA(B)),_L((long)(D)))
411    
412 gbeauche 1.13 #define _r_DB( R, D,B ) ((_s0P(D) && (!_rbp13P(B)) ? _r_0B (R, B ) : (_s8P(D) ? _r_1B( R,D,B ) : _r_4B( R,D,B ))))
413     #define _r_DBIS(R, D,B,I,S) ((_s0P(D) && (!_rbp13P(B)) ? _r_0BIS(R, B,I,S) : (_s8P(D) ? _r_1BIS(R,D,B,I,S) : _r_4BIS(R,D,B,I,S))))
414 gbeauche 1.3
415 gbeauche 1.15 /* Use RIP-addressing in 64-bit mode, if possible */
416     #define _x86_RIP_addressing_possible(D,O) (X86_RIP_RELATIVE_ADDR && \
417     ((uintptr)x86_get_target() + 4 + (O) - (D) <= 0xffffffff))
418    
419     #define _r_X( R, D,B,I,S,O) (_r0P(I) ? (_r0P(B) ? (!X86_TARGET_64BIT ? _r_D(R,D) : \
420     (_x86_RIP_addressing_possible(D, O) ? \
421     _r_D(R, (D) - ((uintptr)x86_get_target() + 4 + (O))) : \
422     _r_DSIB(R,D))) : \
423 gbeauche 1.11 (_rIP(B) ? _r_D (R,D ) : \
424 gbeauche 1.3 (_rsp12P(B) ? _r_DBIS(R,D,_rSP(),_rSP(),1) : \
425 gbeauche 1.4 _r_DB (R,D, B )))) : \
426 gbeauche 1.3 (_r0P(B) ? _r_4IS (R,D, I,S) : \
427     (!_rspP(I) ? _r_DBIS(R,D, B, I,S) : \
428     x86_emit_failure("illegal index register: %esp"))))
429 gbeauche 1.1
430    
431     /* --- Instruction formats ------------------------------------------------- */
432    
433     #define _m32only(X) (! X86_TARGET_64BIT ? X : x86_emit_failure("invalid instruction in 64-bit mode"))
434     #define _m64only(X) ( X86_TARGET_64BIT ? X : x86_emit_failure("invalid instruction in 32-bit mode"))
435     #define _m64(X) ( X86_TARGET_64BIT ? X : ((void)0) )
436    
437     /* _format Opcd ModR/M dN(rB,rI,Sc) imm... */
438    
439     #define _d16() ( _B(0x66 ) )
440     #define _O( OP ) ( _B( OP ) )
441     #define _Or( OP,R ) ( _B( (OP)|_r(R)) )
442     #define _OO( OP ) ( _B((OP)>>8), _B( (OP) ) )
443     #define _OOr( OP,R ) ( _B((OP)>>8), _B( (OP)|_r(R)) )
444     #define _Os( OP,B ) ( _s8P(B) ? _B(((OP)|_b10)) : _B(OP) )
445     #define _sW( W ) ( _s8P(W) ? _B(W):_W(W) )
446     #define _sL( L ) ( _s8P(L) ? _B(L):_L(L) )
447 gbeauche 1.15 #define _sWO( W ) ( _s8P(W) ? 1 : 2 )
448     #define _sLO( L ) ( _s8P(L) ? 1 : 4 )
449 gbeauche 1.1 #define _O_B( OP ,B ) ( _O ( OP ) ,_B(B) )
450     #define _O_W( OP ,W ) ( _O ( OP ) ,_W(W) )
451     #define _O_L( OP ,L ) ( _O ( OP ) ,_L(L) )
452     #define _O_D8( OP ,D ) ( _O ( OP ) ,_D8(D) )
453     #define _O_D32( OP ,D ) ( _O ( OP ) ,_D32(D) )
454     #define _OO_D32( OP ,D ) ( _OO ( OP ) ,_D32(D) )
455     #define _Os_sW( OP ,W ) ( _Os ( OP,W) ,_sW(W) )
456     #define _Os_sL( OP ,L ) ( _Os ( OP,L) ,_sL(L) )
457     #define _O_W_B( OP ,W,B) ( _O ( OP ) ,_W(W),_B(B))
458     #define _Or_B( OP,R ,B ) ( _Or ( OP,R) ,_B(B) )
459     #define _Or_W( OP,R ,W ) ( _Or ( OP,R) ,_W(W) )
460     #define _Or_L( OP,R ,L ) ( _Or ( OP,R) ,_L(L) )
461 gbeauche 1.15 #define _Or_Q( OP,R ,Q ) ( _Or ( OP,R) ,_Q(Q) )
462 gbeauche 1.1 #define _O_Mrm( OP ,MO,R,M ) ( _O ( OP ),_Mrm(MO,R,M ) )
463     #define _OO_Mrm( OP ,MO,R,M ) ( _OO ( OP ),_Mrm(MO,R,M ) )
464     #define _O_Mrm_B( OP ,MO,R,M ,B ) ( _O ( OP ),_Mrm(MO,R,M ) ,_B(B) )
465     #define _O_Mrm_W( OP ,MO,R,M ,W ) ( _O ( OP ),_Mrm(MO,R,M ) ,_W(W) )
466     #define _O_Mrm_L( OP ,MO,R,M ,L ) ( _O ( OP ),_Mrm(MO,R,M ) ,_L(L) )
467     #define _OO_Mrm_B( OP ,MO,R,M ,B ) ( _OO ( OP ),_Mrm(MO,R,M ) ,_B(B) )
468     #define _Os_Mrm_sW(OP ,MO,R,M ,W ) ( _Os ( OP,W),_Mrm(MO,R,M ),_sW(W) )
469     #define _Os_Mrm_sL(OP ,MO,R,M ,L ) ( _Os ( OP,L),_Mrm(MO,R,M ),_sL(L) )
470 gbeauche 1.15 #define _O_r_X( OP ,R ,MD,MB,MI,MS ) ( _O ( OP ),_r_X( R ,MD,MB,MI,MS,0) )
471     #define _OO_r_X( OP ,R ,MD,MB,MI,MS ) ( _OO ( OP ),_r_X( R ,MD,MB,MI,MS,0) )
472     #define _O_r_X_B( OP ,R ,MD,MB,MI,MS,B ) ( _O ( OP ),_r_X( R ,MD,MB,MI,MS,1) ,_B(B) )
473     #define _O_r_X_W( OP ,R ,MD,MB,MI,MS,W ) ( _O ( OP ),_r_X( R ,MD,MB,MI,MS,2) ,_W(W) )
474     #define _O_r_X_L( OP ,R ,MD,MB,MI,MS,L ) ( _O ( OP ),_r_X( R ,MD,MB,MI,MS,4) ,_L(L) )
475     #define _OO_r_X_B( OP ,R ,MD,MB,MI,MS,B ) ( _OO ( OP ),_r_X( R ,MD,MB,MI,MS,1) ,_B(B) )
476     #define _Os_r_X_sW(OP ,R ,MD,MB,MI,MS,W ) ( _Os ( OP,W),_r_X( R ,MD,MB,MI,MS,_sWO(W)),_sW(W))
477     #define _Os_r_X_sL(OP ,R ,MD,MB,MI,MS,L ) ( _Os ( OP,L),_r_X( R ,MD,MB,MI,MS,_sLO(L)),_sL(L))
478 gbeauche 1.1 #define _O_X_B( OP ,MD,MB,MI,MS,B ) ( _O_r_X_B( OP ,0 ,MD,MB,MI,MS ,B) )
479     #define _O_X_W( OP ,MD,MB,MI,MS,W ) ( _O_r_X_W( OP ,0 ,MD,MB,MI,MS ,W) )
480     #define _O_X_L( OP ,MD,MB,MI,MS,L ) ( _O_r_X_L( OP ,0 ,MD,MB,MI,MS ,L) )
481    
482    
483     /* --- REX prefixes -------------------------------------------------------- */
484    
485     #define _VOID() ((void)0)
486     #define _BIT(X) (!!(X))
487     #define _d64(W,R,X,B) (_B(0x40|(W)<<3|(R)<<2|(X)<<1|(B)))
488    
489     #define __REXwrxb(L,W,R,X,B) ((W|R|X|B) || (L) ? _d64(W,R,X,B) : _VOID())
490 gbeauche 1.4 #define __REXwrx_(L,W,R,X,MR) (__REXwrxb(L,W,R,X,_BIT(_rIP(MR)?0:_rXP(MR))))
491 gbeauche 1.1 #define __REXw_x_(L,W,R,X,MR) (__REXwrx_(L,W,_BIT(_rXP(R)),X,MR))
492 gbeauche 1.13 #define __REX_reg(RR) (__REXwrxb(0,0,0,00,_BIT(_rXP(RR))))
493 gbeauche 1.16 #define __REX_mem(MB,MI) (__REXwrxb(0,0,0,_BIT(_rXP(MI)),_BIT(_rXP(MB))))
494 gbeauche 1.1
495     // FIXME: can't mix new (SPL,BPL,SIL,DIL) with (AH,BH,CH,DH)
496 gbeauche 1.14 #define _REXBrr(RR,MR) _m64(__REXw_x_(_r1e8lP(RR)||_r1e8lP(MR),0,RR,0,MR))
497     #define _REXBmr(MB,MI,RD) _m64(__REXw_x_(_r1e8lP(RD)||_r1e8lP(MB),0,RD,_BIT(_rXP(MI)),MB))
498 gbeauche 1.1 #define _REXBrm(RS,MB,MI) _REXBmr(MB,MI,RS)
499    
500 gbeauche 1.14 #define _REXBLrr(RR,MR) _m64(__REXw_x_(_r1e8lP(MR),0,RR,0,MR))
501 gbeauche 1.1 #define _REXLrr(RR,MR) _m64(__REXw_x_(0,0,RR,0,MR))
502     #define _REXLmr(MB,MI,RD) _m64(__REXw_x_(0,0,RD,_BIT(_rXP(MI)),MB))
503     #define _REXLrm(RS,MB,MI) _REXLmr(MB,MI,RS)
504 gbeauche 1.13 #define _REXLr(RR) _m64(__REX_reg(RR))
505     #define _REXLm(MB,MI) _m64(__REX_mem(MB,MI))
506 gbeauche 1.1
507     #define _REXQrr(RR,MR) _m64only(__REXw_x_(0,1,RR,0,MR))
508     #define _REXQmr(MB,MI,RD) _m64only(__REXw_x_(0,1,RD,_BIT(_rXP(MI)),MB))
509     #define _REXQrm(RS,MB,MI) _REXQmr(MB,MI,RS)
510 gbeauche 1.13 #define _REXQr(RR) _m64only(__REX_reg(RR))
511     #define _REXQm(MB,MI) _m64only(__REX_mem(MB,MI))
512 gbeauche 1.1
513    
514     /* ========================================================================= */
515     /* --- Fully-qualified intrinsic instructions ------------------------------ */
516     /* ========================================================================= */
517    
518     /* OPCODE + i = immediate operand
519     * + r = register operand
520     * + m = memory operand (disp,base,index,scale)
521     * + sr/sm = a star preceding a register or memory
522 gbeauche 1.2 * + 0 = top of stack register (for FPU instructions)
523 gbeauche 1.4 *
524     * NOTE in x86-64 mode: a memory operand with only a valid
525     * displacement value will lead to the expect absolute mode. If
526     * RIP addressing is necessary, X86_RIP shall be used as the base
527     * register argument.
528 gbeauche 1.1 */
529    
530     /* --- ALU instructions ---------------------------------------------------- */
531    
532 gbeauche 1.2 enum {
533 gbeauche 1.1 X86_ADD = 0,
534     X86_OR = 1,
535     X86_ADC = 2,
536     X86_SBB = 3,
537     X86_AND = 4,
538     X86_SUB = 5,
539     X86_XOR = 6,
540     X86_CMP = 7,
541 gbeauche 1.2 };
542 gbeauche 1.1
543     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
544    
545     #define _ALUBrr(OP,RS, RD) (_REXBrr(RS, RD), _O_Mrm (((OP) << 3) ,_b11,_r1(RS),_r1(RD) ))
546     #define _ALUBmr(OP, MD, MB, MI, MS, RD) (_REXBmr(MB, MI, RD), _O_r_X (((OP) << 3) + 2,_r1(RD) ,MD,MB,MI,MS ))
547     #define _ALUBrm(OP, RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _O_r_X (((OP) << 3) , ,_r1(RS) ,MD,MB,MI,MS ))
548     #define _ALUBir(OP, IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_AL) ? \
549     (_REXBrr(0, RD), _O_B (((OP) << 3) + 4 ,_su8(IM))) : \
550     (_REXBrr(0, RD), _O_Mrm_B (0x80 ,_b11,OP ,_r1(RD) ,_su8(IM))) )
551     #define _ALUBim(OP, IM, MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_r_X_B (0x80 ,OP ,MD,MB,MI,MS ,_su8(IM)))
552    
553     #define _ALUWrr(OP, RS, RD) (_d16(), _REXLrr(RS, RD), _O_Mrm (((OP) << 3) + 1,_b11,_r2(RS),_r2(RD) ))
554 gbeauche 1.11 #define _ALUWmr(OP, MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _O_r_X (((OP) << 3) + 3 ,_r2(RD) ,MD,MB,MI,MS ))
555     #define _ALUWrm(OP, RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _O_r_X (((OP) << 3) + 1 ,_r2(RS) ,MD,MB,MI,MS ))
556 gbeauche 1.1 #define _ALUWir(OP, IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_AX) ? \
557     (_d16(), _REXLrr(0, RD), _O_W (((OP) << 3) + 5 ,_su16(IM))) : \
558     (_d16(), _REXLrr(0, RD), _Os_Mrm_sW (0x81 ,_b11,OP ,_r2(RD) ,_su16(IM))) )
559 gbeauche 1.11 #define _ALUWim(OP, IM, MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _Os_r_X_sW (0x81 ,OP ,MD,MB,MI,MS ,_su16(IM)))
560 gbeauche 1.1
561     #define _ALULrr(OP, RS, RD) (_REXLrr(RS, RD), _O_Mrm (((OP) << 3) + 1,_b11,_r4(RS),_r4(RD) ))
562     #define _ALULmr(OP, MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _O_r_X (((OP) << 3) + 3 ,_r4(RD) ,MD,MB,MI,MS ))
563     #define _ALULrm(OP, RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _O_r_X (((OP) << 3) + 1 ,_r4(RS) ,MD,MB,MI,MS ))
564     #define _ALULir(OP, IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_EAX) ? \
565     (_REXLrr(0, RD), _O_L (((OP) << 3) + 5 ,IM )) : \
566     (_REXLrr(0, RD), _Os_Mrm_sL (0x81 ,_b11,OP ,_r4(RD) ,IM )) )
567     #define _ALULim(OP, IM, MD, MB, MI, MS) (_REXLrm(0, MB, MI), _Os_r_X_sL (0x81 ,OP ,MD,MB,MI,MS ,IM ))
568    
569     #define _ALUQrr(OP, RS, RD) (_REXQrr(RS, RD), _O_Mrm (((OP) << 3) + 1,_b11,_r8(RS),_r8(RD) ))
570     #define _ALUQmr(OP, MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _O_r_X (((OP) << 3) + 3 ,_r8(RD) ,MD,MB,MI,MS ))
571     #define _ALUQrm(OP, RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _O_r_X (((OP) << 3) + 1 ,_r8(RS) ,MD,MB,MI,MS ))
572     #define _ALUQir(OP, IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_RAX) ? \
573     (_REXQrr(0, RD), _O_L (((OP) << 3) + 5 ,IM )) : \
574     (_REXQrr(0, RD), _Os_Mrm_sL (0x81 ,_b11,OP ,_r8(RD) ,IM )) )
575     #define _ALUQim(OP, IM, MD, MB, MI, MS) (_REXQrm(0, MB, MI), _Os_r_X_sL (0x81 ,OP ,MD,MB,MI,MS ,IM ))
576    
577     #define ADCBrr(RS, RD) _ALUBrr(X86_ADC, RS, RD)
578     #define ADCBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_ADC, MD, MB, MI, MS, RD)
579     #define ADCBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_ADC, RS, MD, MB, MI, MS)
580     #define ADCBir(IM, RD) _ALUBir(X86_ADC, IM, RD)
581     #define ADCBim(IM, MD, MB, MI, MS) _ALUBim(X86_ADC, IM, MD, MB, MI, MS)
582    
583     #define ADCWrr(RS, RD) _ALUWrr(X86_ADC, RS, RD)
584     #define ADCWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_ADC, MD, MB, MI, MS, RD)
585     #define ADCWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_ADC, RS, MD, MB, MI, MS)
586     #define ADCWir(IM, RD) _ALUWir(X86_ADC, IM, RD)
587     #define ADCWim(IM, MD, MB, MI, MS) _ALUWim(X86_ADC, IM, MD, MB, MI, MS)
588    
589     #define ADCLrr(RS, RD) _ALULrr(X86_ADC, RS, RD)
590     #define ADCLmr(MD, MB, MI, MS, RD) _ALULmr(X86_ADC, MD, MB, MI, MS, RD)
591     #define ADCLrm(RS, MD, MB, MI, MS) _ALULrm(X86_ADC, RS, MD, MB, MI, MS)
592     #define ADCLir(IM, RD) _ALULir(X86_ADC, IM, RD)
593     #define ADCLim(IM, MD, MB, MI, MS) _ALULim(X86_ADC, IM, MD, MB, MI, MS)
594    
595     #define ADCQrr(RS, RD) _ALUQrr(X86_ADC, RS, RD)
596     #define ADCQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_ADC, MD, MB, MI, MS, RD)
597     #define ADCQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_ADC, RS, MD, MB, MI, MS)
598     #define ADCQir(IM, RD) _ALUQir(X86_ADC, IM, RD)
599     #define ADCQim(IM, MD, MB, MI, MS) _ALUQim(X86_ADC, IM, MD, MB, MI, MS)
600    
601     #define ADDBrr(RS, RD) _ALUBrr(X86_ADD, RS, RD)
602     #define ADDBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_ADD, MD, MB, MI, MS, RD)
603     #define ADDBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_ADD, RS, MD, MB, MI, MS)
604     #define ADDBir(IM, RD) _ALUBir(X86_ADD, IM, RD)
605     #define ADDBim(IM, MD, MB, MI, MS) _ALUBim(X86_ADD, IM, MD, MB, MI, MS)
606    
607     #define ADDWrr(RS, RD) _ALUWrr(X86_ADD, RS, RD)
608     #define ADDWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_ADD, MD, MB, MI, MS, RD)
609     #define ADDWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_ADD, RS, MD, MB, MI, MS)
610     #define ADDWir(IM, RD) _ALUWir(X86_ADD, IM, RD)
611     #define ADDWim(IM, MD, MB, MI, MS) _ALUWim(X86_ADD, IM, MD, MB, MI, MS)
612    
613     #define ADDLrr(RS, RD) _ALULrr(X86_ADD, RS, RD)
614     #define ADDLmr(MD, MB, MI, MS, RD) _ALULmr(X86_ADD, MD, MB, MI, MS, RD)
615     #define ADDLrm(RS, MD, MB, MI, MS) _ALULrm(X86_ADD, RS, MD, MB, MI, MS)
616     #define ADDLir(IM, RD) _ALULir(X86_ADD, IM, RD)
617     #define ADDLim(IM, MD, MB, MI, MS) _ALULim(X86_ADD, IM, MD, MB, MI, MS)
618    
619     #define ADDQrr(RS, RD) _ALUQrr(X86_ADD, RS, RD)
620     #define ADDQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_ADD, MD, MB, MI, MS, RD)
621     #define ADDQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_ADD, RS, MD, MB, MI, MS)
622     #define ADDQir(IM, RD) _ALUQir(X86_ADD, IM, RD)
623     #define ADDQim(IM, MD, MB, MI, MS) _ALUQim(X86_ADD, IM, MD, MB, MI, MS)
624    
625     #define ANDBrr(RS, RD) _ALUBrr(X86_AND, RS, RD)
626     #define ANDBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_AND, MD, MB, MI, MS, RD)
627     #define ANDBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_AND, RS, MD, MB, MI, MS)
628     #define ANDBir(IM, RD) _ALUBir(X86_AND, IM, RD)
629     #define ANDBim(IM, MD, MB, MI, MS) _ALUBim(X86_AND, IM, MD, MB, MI, MS)
630    
631     #define ANDWrr(RS, RD) _ALUWrr(X86_AND, RS, RD)
632     #define ANDWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_AND, MD, MB, MI, MS, RD)
633     #define ANDWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_AND, RS, MD, MB, MI, MS)
634     #define ANDWir(IM, RD) _ALUWir(X86_AND, IM, RD)
635     #define ANDWim(IM, MD, MB, MI, MS) _ALUWim(X86_AND, IM, MD, MB, MI, MS)
636    
637     #define ANDLrr(RS, RD) _ALULrr(X86_AND, RS, RD)
638     #define ANDLmr(MD, MB, MI, MS, RD) _ALULmr(X86_AND, MD, MB, MI, MS, RD)
639     #define ANDLrm(RS, MD, MB, MI, MS) _ALULrm(X86_AND, RS, MD, MB, MI, MS)
640     #define ANDLir(IM, RD) _ALULir(X86_AND, IM, RD)
641     #define ANDLim(IM, MD, MB, MI, MS) _ALULim(X86_AND, IM, MD, MB, MI, MS)
642    
643     #define ANDQrr(RS, RD) _ALUQrr(X86_AND, RS, RD)
644     #define ANDQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_AND, MD, MB, MI, MS, RD)
645     #define ANDQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_AND, RS, MD, MB, MI, MS)
646     #define ANDQir(IM, RD) _ALUQir(X86_AND, IM, RD)
647     #define ANDQim(IM, MD, MB, MI, MS) _ALUQim(X86_AND, IM, MD, MB, MI, MS)
648    
649     #define CMPBrr(RS, RD) _ALUBrr(X86_CMP, RS, RD)
650     #define CMPBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_CMP, MD, MB, MI, MS, RD)
651     #define CMPBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_CMP, RS, MD, MB, MI, MS)
652     #define CMPBir(IM, RD) _ALUBir(X86_CMP, IM, RD)
653     #define CMPBim(IM, MD, MB, MI, MS) _ALUBim(X86_CMP, IM, MD, MB, MI, MS)
654    
655     #define CMPWrr(RS, RD) _ALUWrr(X86_CMP, RS, RD)
656     #define CMPWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_CMP, MD, MB, MI, MS, RD)
657     #define CMPWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_CMP, RS, MD, MB, MI, MS)
658     #define CMPWir(IM, RD) _ALUWir(X86_CMP, IM, RD)
659     #define CMPWim(IM, MD, MB, MI, MS) _ALUWim(X86_CMP, IM, MD, MB, MI, MS)
660    
661     #define CMPLrr(RS, RD) _ALULrr(X86_CMP, RS, RD)
662     #define CMPLmr(MD, MB, MI, MS, RD) _ALULmr(X86_CMP, MD, MB, MI, MS, RD)
663     #define CMPLrm(RS, MD, MB, MI, MS) _ALULrm(X86_CMP, RS, MD, MB, MI, MS)
664     #define CMPLir(IM, RD) _ALULir(X86_CMP, IM, RD)
665     #define CMPLim(IM, MD, MB, MI, MS) _ALULim(X86_CMP, IM, MD, MB, MI, MS)
666    
667     #define CMPQrr(RS, RD) _ALUQrr(X86_CMP, RS, RD)
668     #define CMPQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_CMP, MD, MB, MI, MS, RD)
669     #define CMPQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_CMP, RS, MD, MB, MI, MS)
670     #define CMPQir(IM, RD) _ALUQir(X86_CMP, IM, RD)
671     #define CMPQim(IM, MD, MB, MI, MS) _ALUQim(X86_CMP, IM, MD, MB, MI, MS)
672    
673     #define ORBrr(RS, RD) _ALUBrr(X86_OR, RS, RD)
674     #define ORBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_OR, MD, MB, MI, MS, RD)
675     #define ORBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_OR, RS, MD, MB, MI, MS)
676     #define ORBir(IM, RD) _ALUBir(X86_OR, IM, RD)
677     #define ORBim(IM, MD, MB, MI, MS) _ALUBim(X86_OR, IM, MD, MB, MI, MS)
678    
679     #define ORWrr(RS, RD) _ALUWrr(X86_OR, RS, RD)
680     #define ORWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_OR, MD, MB, MI, MS, RD)
681     #define ORWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_OR, RS, MD, MB, MI, MS)
682     #define ORWir(IM, RD) _ALUWir(X86_OR, IM, RD)
683     #define ORWim(IM, MD, MB, MI, MS) _ALUWim(X86_OR, IM, MD, MB, MI, MS)
684    
685     #define ORLrr(RS, RD) _ALULrr(X86_OR, RS, RD)
686     #define ORLmr(MD, MB, MI, MS, RD) _ALULmr(X86_OR, MD, MB, MI, MS, RD)
687     #define ORLrm(RS, MD, MB, MI, MS) _ALULrm(X86_OR, RS, MD, MB, MI, MS)
688     #define ORLir(IM, RD) _ALULir(X86_OR, IM, RD)
689     #define ORLim(IM, MD, MB, MI, MS) _ALULim(X86_OR, IM, MD, MB, MI, MS)
690    
691     #define ORQrr(RS, RD) _ALUQrr(X86_OR, RS, RD)
692     #define ORQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_OR, MD, MB, MI, MS, RD)
693     #define ORQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_OR, RS, MD, MB, MI, MS)
694     #define ORQir(IM, RD) _ALUQir(X86_OR, IM, RD)
695     #define ORQim(IM, MD, MB, MI, MS) _ALUQim(X86_OR, IM, MD, MB, MI, MS)
696    
697     #define SBBBrr(RS, RD) _ALUBrr(X86_SBB, RS, RD)
698     #define SBBBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_SBB, MD, MB, MI, MS, RD)
699     #define SBBBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_SBB, RS, MD, MB, MI, MS)
700     #define SBBBir(IM, RD) _ALUBir(X86_SBB, IM, RD)
701     #define SBBBim(IM, MD, MB, MI, MS) _ALUBim(X86_SBB, IM, MD, MB, MI, MS)
702    
703     #define SBBWrr(RS, RD) _ALUWrr(X86_SBB, RS, RD)
704     #define SBBWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_SBB, MD, MB, MI, MS, RD)
705     #define SBBWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_SBB, RS, MD, MB, MI, MS)
706     #define SBBWir(IM, RD) _ALUWir(X86_SBB, IM, RD)
707     #define SBBWim(IM, MD, MB, MI, MS) _ALUWim(X86_SBB, IM, MD, MB, MI, MS)
708    
709     #define SBBLrr(RS, RD) _ALULrr(X86_SBB, RS, RD)
710     #define SBBLmr(MD, MB, MI, MS, RD) _ALULmr(X86_SBB, MD, MB, MI, MS, RD)
711     #define SBBLrm(RS, MD, MB, MI, MS) _ALULrm(X86_SBB, RS, MD, MB, MI, MS)
712     #define SBBLir(IM, RD) _ALULir(X86_SBB, IM, RD)
713     #define SBBLim(IM, MD, MB, MI, MS) _ALULim(X86_SBB, IM, MD, MB, MI, MS)
714    
715     #define SBBQrr(RS, RD) _ALUQrr(X86_SBB, RS, RD)
716     #define SBBQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_SBB, MD, MB, MI, MS, RD)
717     #define SBBQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_SBB, RS, MD, MB, MI, MS)
718     #define SBBQir(IM, RD) _ALUQir(X86_SBB, IM, RD)
719     #define SBBQim(IM, MD, MB, MI, MS) _ALUQim(X86_SBB, IM, MD, MB, MI, MS)
720    
721     #define SUBBrr(RS, RD) _ALUBrr(X86_SUB, RS, RD)
722     #define SUBBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_SUB, MD, MB, MI, MS, RD)
723     #define SUBBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_SUB, RS, MD, MB, MI, MS)
724     #define SUBBir(IM, RD) _ALUBir(X86_SUB, IM, RD)
725     #define SUBBim(IM, MD, MB, MI, MS) _ALUBim(X86_SUB, IM, MD, MB, MI, MS)
726    
727     #define SUBWrr(RS, RD) _ALUWrr(X86_SUB, RS, RD)
728     #define SUBWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_SUB, MD, MB, MI, MS, RD)
729     #define SUBWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_SUB, RS, MD, MB, MI, MS)
730     #define SUBWir(IM, RD) _ALUWir(X86_SUB, IM, RD)
731     #define SUBWim(IM, MD, MB, MI, MS) _ALUWim(X86_SUB, IM, MD, MB, MI, MS)
732    
733     #define SUBLrr(RS, RD) _ALULrr(X86_SUB, RS, RD)
734     #define SUBLmr(MD, MB, MI, MS, RD) _ALULmr(X86_SUB, MD, MB, MI, MS, RD)
735     #define SUBLrm(RS, MD, MB, MI, MS) _ALULrm(X86_SUB, RS, MD, MB, MI, MS)
736     #define SUBLir(IM, RD) _ALULir(X86_SUB, IM, RD)
737     #define SUBLim(IM, MD, MB, MI, MS) _ALULim(X86_SUB, IM, MD, MB, MI, MS)
738    
739     #define SUBQrr(RS, RD) _ALUQrr(X86_SUB, RS, RD)
740     #define SUBQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_SUB, MD, MB, MI, MS, RD)
741     #define SUBQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_SUB, RS, MD, MB, MI, MS)
742     #define SUBQir(IM, RD) _ALUQir(X86_SUB, IM, RD)
743     #define SUBQim(IM, MD, MB, MI, MS) _ALUQim(X86_SUB, IM, MD, MB, MI, MS)
744    
745     #define XORBrr(RS, RD) _ALUBrr(X86_XOR, RS, RD)
746     #define XORBmr(MD, MB, MI, MS, RD) _ALUBmr(X86_XOR, MD, MB, MI, MS, RD)
747     #define XORBrm(RS, MD, MB, MI, MS) _ALUBrm(X86_XOR, RS, MD, MB, MI, MS)
748     #define XORBir(IM, RD) _ALUBir(X86_XOR, IM, RD)
749     #define XORBim(IM, MD, MB, MI, MS) _ALUBim(X86_XOR, IM, MD, MB, MI, MS)
750    
751     #define XORWrr(RS, RD) _ALUWrr(X86_XOR, RS, RD)
752     #define XORWmr(MD, MB, MI, MS, RD) _ALUWmr(X86_XOR, MD, MB, MI, MS, RD)
753     #define XORWrm(RS, MD, MB, MI, MS) _ALUWrm(X86_XOR, RS, MD, MB, MI, MS)
754     #define XORWir(IM, RD) _ALUWir(X86_XOR, IM, RD)
755     #define XORWim(IM, MD, MB, MI, MS) _ALUWim(X86_XOR, IM, MD, MB, MI, MS)
756    
757     #define XORLrr(RS, RD) _ALULrr(X86_XOR, RS, RD)
758     #define XORLmr(MD, MB, MI, MS, RD) _ALULmr(X86_XOR, MD, MB, MI, MS, RD)
759     #define XORLrm(RS, MD, MB, MI, MS) _ALULrm(X86_XOR, RS, MD, MB, MI, MS)
760     #define XORLir(IM, RD) _ALULir(X86_XOR, IM, RD)
761     #define XORLim(IM, MD, MB, MI, MS) _ALULim(X86_XOR, IM, MD, MB, MI, MS)
762    
763     #define XORQrr(RS, RD) _ALUQrr(X86_XOR, RS, RD)
764     #define XORQmr(MD, MB, MI, MS, RD) _ALUQmr(X86_XOR, MD, MB, MI, MS, RD)
765     #define XORQrm(RS, MD, MB, MI, MS) _ALUQrm(X86_XOR, RS, MD, MB, MI, MS)
766     #define XORQir(IM, RD) _ALUQir(X86_XOR, IM, RD)
767     #define XORQim(IM, MD, MB, MI, MS) _ALUQim(X86_XOR, IM, MD, MB, MI, MS)
768    
769    
770     /* --- Shift/Rotate instructions ------------------------------------------- */
771    
772 gbeauche 1.2 enum {
773 gbeauche 1.1 X86_ROL = 0,
774     X86_ROR = 1,
775     X86_RCL = 2,
776     X86_RCR = 3,
777     X86_SHL = 4,
778     X86_SHR = 5,
779     X86_SAR = 7,
780 gbeauche 1.2 };
781 gbeauche 1.1
782     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
783    
784     #define _ROTSHIBir(OP,IM,RD) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
785     (_REXBrr(0, RD), _O_Mrm (0xd0 ,_b11,OP,_r1(RD) )) : \
786     (_REXBrr(0, RD), _O_Mrm_B (0xc0 ,_b11,OP,_r1(RD) ,_u8(IM))) )
787     #define _ROTSHIBim(OP,IM,MD,MB,MI,MS) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
788     (_REXBrm(0, MB, MI), _O_r_X (0xd0 ,OP ,MD,MB,MI,MS )) : \
789     (_REXBrm(0, MB, MI), _O_r_X_B (0xc0 ,OP ,MD,MB,MI,MS ,_u8(IM))) )
790     #define _ROTSHIBrr(OP,RS,RD) (((RS) == X86_CL) ? \
791     (_REXBrr(RS, RD), _O_Mrm (0xd2 ,_b11,OP,_r1(RD) )) : \
792     x86_emit_failure("source register must be CL" ) )
793     #define _ROTSHIBrm(OP,RS,MD,MB,MI,MS) (((RS) == X86_CL) ? \
794     (_REXBrm(RS, MB, MI), _O_r_X (0xd2 ,OP ,MD,MB,MI,MS )) : \
795     x86_emit_failure("source register must be CL" ) )
796    
797     #define _ROTSHIWir(OP,IM,RD) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
798     (_d16(), _REXLrr(0, RD), _O_Mrm (0xd1 ,_b11,OP,_r2(RD) )) : \
799     (_d16(), _REXLrr(0, RD), _O_Mrm_B (0xc1 ,_b11,OP,_r2(RD) ,_u8(IM))) )
800     #define _ROTSHIWim(OP,IM,MD,MB,MI,MS) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
801     (_d16(), _REXLrm(0, MB, MI), _O_r_X (0xd1 ,OP ,MD,MB,MI,MS )) : \
802     (_d16(), _REXLrm(0, MB, MI), _O_r_X_B (0xc1 ,OP ,MD,MB,MI,MS ,_u8(IM))) )
803     #define _ROTSHIWrr(OP,RS,RD) (((RS) == X86_CL) ? \
804     (_d16(), _REXLrr(RS, RD), _O_Mrm (0xd3 ,_b11,OP,_r2(RD) )) : \
805     x86_emit_failure("source register must be CL" ) )
806     #define _ROTSHIWrm(OP,RS,MD,MB,MI,MS) (((RS) == X86_CL) ? \
807     (_d16(), _REXLrm(RS, MB, MI), _O_r_X (0xd3 ,OP ,MD,MB,MI,MS )) : \
808     x86_emit_failure("source register must be CL" ) )
809    
810     #define _ROTSHILir(OP,IM,RD) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
811     (_REXLrr(0, RD), _O_Mrm (0xd1 ,_b11,OP,_r4(RD) )) : \
812     (_REXLrr(0, RD), _O_Mrm_B (0xc1 ,_b11,OP,_r4(RD) ,_u8(IM))) )
813     #define _ROTSHILim(OP,IM,MD,MB,MI,MS) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
814     (_REXLrm(0, MB, MI), _O_r_X (0xd1 ,OP ,MD,MB,MI,MS )) : \
815     (_REXLrm(0, MB, MI), _O_r_X_B (0xc1 ,OP ,MD,MB,MI,MS ,_u8(IM))) )
816     #define _ROTSHILrr(OP,RS,RD) (((RS) == X86_CL) ? \
817     (_REXLrr(RS, RD), _O_Mrm (0xd3 ,_b11,OP,_r4(RD) )) : \
818     x86_emit_failure("source register must be CL" ) )
819     #define _ROTSHILrm(OP,RS,MD,MB,MI,MS) (((RS) == X86_CL) ? \
820     (_REXLrm(RS, MB, MI), _O_r_X (0xd3 ,OP ,MD,MB,MI,MS )) : \
821     x86_emit_failure("source register must be CL" ) )
822    
823     #define _ROTSHIQir(OP,IM,RD) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
824     (_REXQrr(0, RD), _O_Mrm (0xd1 ,_b11,OP,_r8(RD) )) : \
825     (_REXQrr(0, RD), _O_Mrm_B (0xc1 ,_b11,OP,_r8(RD) ,_u8(IM))) )
826     #define _ROTSHIQim(OP,IM,MD,MB,MI,MS) (X86_OPTIMIZE_ROTSHI && ((IM) == 1) ? \
827     (_REXQrm(0, MB, MI), _O_r_X (0xd1 ,OP ,MD,MB,MI,MS )) : \
828     (_REXQrm(0, MB, MI), _O_r_X_B (0xc1 ,OP ,MD,MB,MI,MS ,_u8(IM))) )
829     #define _ROTSHIQrr(OP,RS,RD) (((RS) == X86_CL) ? \
830     (_REXQrr(RS, RD), _O_Mrm (0xd3 ,_b11,OP,_r8(RD) )) : \
831     x86_emit_failure("source register must be CL" ) )
832     #define _ROTSHIQrm(OP,RS,MD,MB,MI,MS) (((RS) == X86_CL) ? \
833     (_REXQrm(RS, MB, MI), _O_r_X (0xd3 ,OP ,MD,MB,MI,MS )) : \
834     x86_emit_failure("source register must be CL" ) )
835    
836     #define ROLBir(IM, RD) _ROTSHIBir(X86_ROL, IM, RD)
837     #define ROLBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_ROL, IM, MD, MB, MI, MS)
838     #define ROLBrr(RS, RD) _ROTSHIBrr(X86_ROL, RS, RD)
839     #define ROLBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_ROL, RS, MD, MB, MI, MS)
840    
841     #define ROLWir(IM, RD) _ROTSHIWir(X86_ROL, IM, RD)
842     #define ROLWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_ROL, IM, MD, MB, MI, MS)
843     #define ROLWrr(RS, RD) _ROTSHIWrr(X86_ROL, RS, RD)
844     #define ROLWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_ROL, RS, MD, MB, MI, MS)
845    
846     #define ROLLir(IM, RD) _ROTSHILir(X86_ROL, IM, RD)
847     #define ROLLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_ROL, IM, MD, MB, MI, MS)
848     #define ROLLrr(RS, RD) _ROTSHILrr(X86_ROL, RS, RD)
849     #define ROLLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_ROL, RS, MD, MB, MI, MS)
850    
851     #define ROLQir(IM, RD) _ROTSHIQir(X86_ROL, IM, RD)
852     #define ROLQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_ROL, IM, MD, MB, MI, MS)
853     #define ROLQrr(RS, RD) _ROTSHIQrr(X86_ROL, RS, RD)
854     #define ROLQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_ROL, RS, MD, MB, MI, MS)
855    
856     #define RORBir(IM, RD) _ROTSHIBir(X86_ROR, IM, RD)
857     #define RORBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_ROR, IM, MD, MB, MI, MS)
858     #define RORBrr(RS, RD) _ROTSHIBrr(X86_ROR, RS, RD)
859     #define RORBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_ROR, RS, MD, MB, MI, MS)
860    
861     #define RORWir(IM, RD) _ROTSHIWir(X86_ROR, IM, RD)
862     #define RORWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_ROR, IM, MD, MB, MI, MS)
863     #define RORWrr(RS, RD) _ROTSHIWrr(X86_ROR, RS, RD)
864     #define RORWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_ROR, RS, MD, MB, MI, MS)
865    
866     #define RORLir(IM, RD) _ROTSHILir(X86_ROR, IM, RD)
867     #define RORLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_ROR, IM, MD, MB, MI, MS)
868     #define RORLrr(RS, RD) _ROTSHILrr(X86_ROR, RS, RD)
869     #define RORLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_ROR, RS, MD, MB, MI, MS)
870    
871     #define RORQir(IM, RD) _ROTSHIQir(X86_ROR, IM, RD)
872     #define RORQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_ROR, IM, MD, MB, MI, MS)
873     #define RORQrr(RS, RD) _ROTSHIQrr(X86_ROR, RS, RD)
874     #define RORQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_ROR, RS, MD, MB, MI, MS)
875    
876     #define RCLBir(IM, RD) _ROTSHIBir(X86_RCL, IM, RD)
877     #define RCLBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_RCL, IM, MD, MB, MI, MS)
878     #define RCLBrr(RS, RD) _ROTSHIBrr(X86_RCL, RS, RD)
879     #define RCLBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_RCL, RS, MD, MB, MI, MS)
880    
881     #define RCLWir(IM, RD) _ROTSHIWir(X86_RCL, IM, RD)
882     #define RCLWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_RCL, IM, MD, MB, MI, MS)
883     #define RCLWrr(RS, RD) _ROTSHIWrr(X86_RCL, RS, RD)
884     #define RCLWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_RCL, RS, MD, MB, MI, MS)
885    
886     #define RCLLir(IM, RD) _ROTSHILir(X86_RCL, IM, RD)
887     #define RCLLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_RCL, IM, MD, MB, MI, MS)
888     #define RCLLrr(RS, RD) _ROTSHILrr(X86_RCL, RS, RD)
889     #define RCLLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_RCL, RS, MD, MB, MI, MS)
890    
891     #define RCLQir(IM, RD) _ROTSHIQir(X86_RCL, IM, RD)
892     #define RCLQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_RCL, IM, MD, MB, MI, MS)
893     #define RCLQrr(RS, RD) _ROTSHIQrr(X86_RCL, RS, RD)
894     #define RCLQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_RCL, RS, MD, MB, MI, MS)
895    
896     #define RCRBir(IM, RD) _ROTSHIBir(X86_RCR, IM, RD)
897     #define RCRBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_RCR, IM, MD, MB, MI, MS)
898     #define RCRBrr(RS, RD) _ROTSHIBrr(X86_RCR, RS, RD)
899     #define RCRBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_RCR, RS, MD, MB, MI, MS)
900    
901     #define RCRWir(IM, RD) _ROTSHIWir(X86_RCR, IM, RD)
902     #define RCRWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_RCR, IM, MD, MB, MI, MS)
903     #define RCRWrr(RS, RD) _ROTSHIWrr(X86_RCR, RS, RD)
904     #define RCRWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_RCR, RS, MD, MB, MI, MS)
905    
906     #define RCRLir(IM, RD) _ROTSHILir(X86_RCR, IM, RD)
907     #define RCRLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_RCR, IM, MD, MB, MI, MS)
908     #define RCRLrr(RS, RD) _ROTSHILrr(X86_RCR, RS, RD)
909     #define RCRLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_RCR, RS, MD, MB, MI, MS)
910    
911     #define RCRQir(IM, RD) _ROTSHIQir(X86_RCR, IM, RD)
912     #define RCRQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_RCR, IM, MD, MB, MI, MS)
913     #define RCRQrr(RS, RD) _ROTSHIQrr(X86_RCR, RS, RD)
914     #define RCRQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_RCR, RS, MD, MB, MI, MS)
915    
916     #define SHLBir(IM, RD) _ROTSHIBir(X86_SHL, IM, RD)
917     #define SHLBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_SHL, IM, MD, MB, MI, MS)
918     #define SHLBrr(RS, RD) _ROTSHIBrr(X86_SHL, RS, RD)
919     #define SHLBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_SHL, RS, MD, MB, MI, MS)
920    
921     #define SHLWir(IM, RD) _ROTSHIWir(X86_SHL, IM, RD)
922     #define SHLWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_SHL, IM, MD, MB, MI, MS)
923     #define SHLWrr(RS, RD) _ROTSHIWrr(X86_SHL, RS, RD)
924     #define SHLWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_SHL, RS, MD, MB, MI, MS)
925    
926     #define SHLLir(IM, RD) _ROTSHILir(X86_SHL, IM, RD)
927     #define SHLLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_SHL, IM, MD, MB, MI, MS)
928     #define SHLLrr(RS, RD) _ROTSHILrr(X86_SHL, RS, RD)
929     #define SHLLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_SHL, RS, MD, MB, MI, MS)
930    
931     #define SHLQir(IM, RD) _ROTSHIQir(X86_SHL, IM, RD)
932     #define SHLQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_SHL, IM, MD, MB, MI, MS)
933     #define SHLQrr(RS, RD) _ROTSHIQrr(X86_SHL, RS, RD)
934     #define SHLQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_SHL, RS, MD, MB, MI, MS)
935    
936     #define SHRBir(IM, RD) _ROTSHIBir(X86_SHR, IM, RD)
937     #define SHRBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_SHR, IM, MD, MB, MI, MS)
938     #define SHRBrr(RS, RD) _ROTSHIBrr(X86_SHR, RS, RD)
939     #define SHRBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_SHR, RS, MD, MB, MI, MS)
940    
941     #define SHRWir(IM, RD) _ROTSHIWir(X86_SHR, IM, RD)
942     #define SHRWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_SHR, IM, MD, MB, MI, MS)
943     #define SHRWrr(RS, RD) _ROTSHIWrr(X86_SHR, RS, RD)
944     #define SHRWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_SHR, RS, MD, MB, MI, MS)
945    
946     #define SHRLir(IM, RD) _ROTSHILir(X86_SHR, IM, RD)
947     #define SHRLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_SHR, IM, MD, MB, MI, MS)
948     #define SHRLrr(RS, RD) _ROTSHILrr(X86_SHR, RS, RD)
949     #define SHRLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_SHR, RS, MD, MB, MI, MS)
950    
951     #define SHRQir(IM, RD) _ROTSHIQir(X86_SHR, IM, RD)
952     #define SHRQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_SHR, IM, MD, MB, MI, MS)
953     #define SHRQrr(RS, RD) _ROTSHIQrr(X86_SHR, RS, RD)
954     #define SHRQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_SHR, RS, MD, MB, MI, MS)
955    
956     #define SALBir SHLBir
957     #define SALBim SHLBim
958     #define SALBrr SHLBrr
959     #define SALBrm SHLBrm
960    
961     #define SALWir SHLWir
962     #define SALWim SHLWim
963     #define SALWrr SHLWrr
964     #define SALWrm SHLWrm
965    
966     #define SALLir SHLLir
967     #define SALLim SHLLim
968     #define SALLrr SHLLrr
969     #define SALLrm SHLLrm
970    
971     #define SALQir SHLQir
972     #define SALQim SHLQim
973     #define SALQrr SHLQrr
974     #define SALQrm SHLQrm
975    
976     #define SARBir(IM, RD) _ROTSHIBir(X86_SAR, IM, RD)
977     #define SARBim(IM, MD, MB, MI, MS) _ROTSHIBim(X86_SAR, IM, MD, MB, MI, MS)
978     #define SARBrr(RS, RD) _ROTSHIBrr(X86_SAR, RS, RD)
979     #define SARBrm(RS, MD, MB, MI, MS) _ROTSHIBrm(X86_SAR, RS, MD, MB, MI, MS)
980    
981     #define SARWir(IM, RD) _ROTSHIWir(X86_SAR, IM, RD)
982     #define SARWim(IM, MD, MB, MI, MS) _ROTSHIWim(X86_SAR, IM, MD, MB, MI, MS)
983     #define SARWrr(RS, RD) _ROTSHIWrr(X86_SAR, RS, RD)
984     #define SARWrm(RS, MD, MB, MI, MS) _ROTSHIWrm(X86_SAR, RS, MD, MB, MI, MS)
985    
986     #define SARLir(IM, RD) _ROTSHILir(X86_SAR, IM, RD)
987     #define SARLim(IM, MD, MB, MI, MS) _ROTSHILim(X86_SAR, IM, MD, MB, MI, MS)
988     #define SARLrr(RS, RD) _ROTSHILrr(X86_SAR, RS, RD)
989     #define SARLrm(RS, MD, MB, MI, MS) _ROTSHILrm(X86_SAR, RS, MD, MB, MI, MS)
990    
991     #define SARQir(IM, RD) _ROTSHIQir(X86_SAR, IM, RD)
992     #define SARQim(IM, MD, MB, MI, MS) _ROTSHIQim(X86_SAR, IM, MD, MB, MI, MS)
993     #define SARQrr(RS, RD) _ROTSHIQrr(X86_SAR, RS, RD)
994     #define SARQrm(RS, MD, MB, MI, MS) _ROTSHIQrm(X86_SAR, RS, MD, MB, MI, MS)
995    
996    
997     /* --- Bit test instructions ----------------------------------------------- */
998    
999 gbeauche 1.2 enum {
1000 gbeauche 1.1 X86_BT = 4,
1001     X86_BTS = 5,
1002     X86_BTR = 6,
1003     X86_BTC = 7,
1004 gbeauche 1.2 };
1005 gbeauche 1.1
1006     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1007    
1008     #define _BTWir(OP, IM, RD) (_d16(), _REXLrr(0, RD), _OO_Mrm_B (0x0fba ,_b11,OP ,_r2(RD) ,_u8(IM)))
1009     #define _BTWim(OP, IM, MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _OO_r_X_B (0x0fba ,OP ,MD,MB,MI,MS ,_u8(IM)))
1010     #define _BTWrr(OP, RS, RD) (_d16(), _REXLrr(RS, RD), _OO_Mrm (0x0f83|((OP)<<3),_b11,_r2(RS),_r2(RD) ))
1011     #define _BTWrm(OP, RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _OO_r_X (0x0f83|((OP)<<3) ,_r2(RS) ,MD,MB,MI,MS ))
1012    
1013     #define _BTLir(OP, IM, RD) (_REXLrr(0, RD), _OO_Mrm_B (0x0fba ,_b11,OP ,_r4(RD) ,_u8(IM)))
1014     #define _BTLim(OP, IM, MD, MB, MI, MS) (_REXLrm(0, MB, MI), _OO_r_X_B (0x0fba ,OP ,MD,MB,MI,MS ,_u8(IM)))
1015     #define _BTLrr(OP, RS, RD) (_REXLrr(RS, RD), _OO_Mrm (0x0f83|((OP)<<3),_b11,_r4(RS),_r4(RD) ))
1016     #define _BTLrm(OP, RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _OO_r_X (0x0f83|((OP)<<3) ,_r4(RS) ,MD,MB,MI,MS ))
1017    
1018     #define _BTQir(OP, IM, RD) (_REXQrr(0, RD), _OO_Mrm_B (0x0fba ,_b11,OP ,_r8(RD) ,_u8(IM)))
1019     #define _BTQim(OP, IM, MD, MB, MI, MS) (_REXQrm(0, MB, MI), _OO_r_X_B (0x0fba ,OP ,MD,MB,MI,MS ,_u8(IM)))
1020     #define _BTQrr(OP, RS, RD) (_REXQrr(RS, RD), _OO_Mrm (0x0f83|((OP)<<3),_b11,_r8(RS),_r8(RD) ))
1021     #define _BTQrm(OP, RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _OO_r_X (0x0f83|((OP)<<3) ,_r8(RS) ,MD,MB,MI,MS ))
1022    
1023     #define BTWir(IM, RD) _BTWir(X86_BT, IM, RD)
1024     #define BTWim(IM, MD, MB, MI, MS) _BTWim(X86_BT, IM, MD, MI, MS)
1025     #define BTWrr(RS, RD) _BTWrr(X86_BT, RS, RD)
1026     #define BTWrm(RS, MD, MB, MI, MS) _BTWrm(X86_BT, RS, MD, MB, MI, MS)
1027    
1028     #define BTLir(IM, RD) _BTLir(X86_BT, IM, RD)
1029     #define BTLim(IM, MD, MB, MI, MS) _BTLim(X86_BT, IM, MD, MB, MI, MS)
1030     #define BTLrr(RS, RD) _BTLrr(X86_BT, RS, RD)
1031     #define BTLrm(RS, MD, MB, MI, MS) _BTLrm(X86_BT, RS, MD, MB, MI, MS)
1032    
1033     #define BTQir(IM, RD) _BTQir(X86_BT, IM, RD)
1034     #define BTQim(IM, MD, MB, MI, MS) _BTQim(X86_BT, IM, MD, MB, MI, MS)
1035     #define BTQrr(RS, RD) _BTQrr(X86_BT, RS, RD)
1036     #define BTQrm(RS, MD, MB, MI, MS) _BTQrm(X86_BT, RS, MD, MB, MI, MS)
1037    
1038     #define BTCWir(IM, RD) _BTWir(X86_BTC, IM, RD)
1039     #define BTCWim(IM, MD, MB, MI, MS) _BTWim(X86_BTC, IM, MD, MI, MS)
1040     #define BTCWrr(RS, RD) _BTWrr(X86_BTC, RS, RD)
1041     #define BTCWrm(RS, MD, MB, MI, MS) _BTWrm(X86_BTC, RS, MD, MB, MI, MS)
1042    
1043     #define BTCLir(IM, RD) _BTLir(X86_BTC, IM, RD)
1044     #define BTCLim(IM, MD, MB, MI, MS) _BTLim(X86_BTC, IM, MD, MB, MI, MS)
1045     #define BTCLrr(RS, RD) _BTLrr(X86_BTC, RS, RD)
1046     #define BTCLrm(RS, MD, MB, MI, MS) _BTLrm(X86_BTC, RS, MD, MB, MI, MS)
1047    
1048     #define BTCQir(IM, RD) _BTQir(X86_BTC, IM, RD)
1049     #define BTCQim(IM, MD, MB, MI, MS) _BTQim(X86_BTC, IM, MD, MB, MI, MS)
1050     #define BTCQrr(RS, RD) _BTQrr(X86_BTC, RS, RD)
1051     #define BTCQrm(RS, MD, MB, MI, MS) _BTQrm(X86_BTC, RS, MD, MB, MI, MS)
1052    
1053     #define BTRWir(IM, RD) _BTWir(X86_BTR, IM, RD)
1054     #define BTRWim(IM, MD, MB, MI, MS) _BTWim(X86_BTR, IM, MD, MI, MS)
1055     #define BTRWrr(RS, RD) _BTWrr(X86_BTR, RS, RD)
1056     #define BTRWrm(RS, MD, MB, MI, MS) _BTWrm(X86_BTR, RS, MD, MB, MI, MS)
1057    
1058     #define BTRLir(IM, RD) _BTLir(X86_BTR, IM, RD)
1059     #define BTRLim(IM, MD, MB, MI, MS) _BTLim(X86_BTR, IM, MD, MB, MI, MS)
1060     #define BTRLrr(RS, RD) _BTLrr(X86_BTR, RS, RD)
1061     #define BTRLrm(RS, MD, MB, MI, MS) _BTLrm(X86_BTR, RS, MD, MB, MI, MS)
1062    
1063     #define BTRQir(IM, RD) _BTQir(X86_BTR, IM, RD)
1064     #define BTRQim(IM, MD, MB, MI, MS) _BTQim(X86_BTR, IM, MD, MB, MI, MS)
1065     #define BTRQrr(RS, RD) _BTQrr(X86_BTR, RS, RD)
1066     #define BTRQrm(RS, MD, MB, MI, MS) _BTQrm(X86_BTR, RS, MD, MB, MI, MS)
1067    
1068     #define BTSWir(IM, RD) _BTWir(X86_BTS, IM, RD)
1069     #define BTSWim(IM, MD, MB, MI, MS) _BTWim(X86_BTS, IM, MD, MI, MS)
1070     #define BTSWrr(RS, RD) _BTWrr(X86_BTS, RS, RD)
1071     #define BTSWrm(RS, MD, MB, MI, MS) _BTWrm(X86_BTS, RS, MD, MB, MI, MS)
1072    
1073     #define BTSLir(IM, RD) _BTLir(X86_BTS, IM, RD)
1074     #define BTSLim(IM, MD, MB, MI, MS) _BTLim(X86_BTS, IM, MD, MB, MI, MS)
1075     #define BTSLrr(RS, RD) _BTLrr(X86_BTS, RS, RD)
1076     #define BTSLrm(RS, MD, MB, MI, MS) _BTLrm(X86_BTS, RS, MD, MB, MI, MS)
1077    
1078     #define BTSQir(IM, RD) _BTQir(X86_BTS, IM, RD)
1079     #define BTSQim(IM, MD, MB, MI, MS) _BTQim(X86_BTS, IM, MD, MB, MI, MS)
1080     #define BTSQrr(RS, RD) _BTQrr(X86_BTS, RS, RD)
1081     #define BTSQrm(RS, MD, MB, MI, MS) _BTQrm(X86_BTS, RS, MD, MB, MI, MS)
1082    
1083    
1084     /* --- Move instructions --------------------------------------------------- */
1085    
1086     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1087    
1088 gbeauche 1.9 #define MOVBrr(RS, RD) (_REXBrr(RS, RD), _O_Mrm (0x88 ,_b11,_r1(RS),_r1(RD) ))
1089 gbeauche 1.1 #define MOVBmr(MD, MB, MI, MS, RD) (_REXBmr(MB, MI, RD), _O_r_X (0x8a ,_r1(RD) ,MD,MB,MI,MS ))
1090     #define MOVBrm(RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _O_r_X (0x88 ,_r1(RS) ,MD,MB,MI,MS ))
1091     #define MOVBir(IM, R) (_REXBrr(0, R), _Or_B (0xb0,_r1(R) ,_su8(IM)))
1092     #define MOVBim(IM, MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_X_B (0xc6 ,MD,MB,MI,MS ,_su8(IM)))
1093    
1094     #define MOVWrr(RS, RD) (_d16(), _REXLrr(RS, RD), _O_Mrm (0x89 ,_b11,_r2(RS),_r2(RD) ))
1095 gbeauche 1.11 #define MOVWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _O_r_X (0x8b ,_r2(RD) ,MD,MB,MI,MS ))
1096     #define MOVWrm(RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _O_r_X (0x89 ,_r2(RS) ,MD,MB,MI,MS ))
1097 gbeauche 1.1 #define MOVWir(IM, R) (_d16(), _REXLrr(0, R), _Or_W (0xb8,_r2(R) ,_su16(IM)))
1098 gbeauche 1.11 #define MOVWim(IM, MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _O_X_W (0xc7 ,MD,MB,MI,MS ,_su16(IM)))
1099 gbeauche 1.1
1100     #define MOVLrr(RS, RD) (_REXLrr(RS, RD), _O_Mrm (0x89 ,_b11,_r4(RS),_r4(RD) ))
1101     #define MOVLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _O_r_X (0x8b ,_r4(RD) ,MD,MB,MI,MS ))
1102     #define MOVLrm(RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _O_r_X (0x89 ,_r4(RS) ,MD,MB,MI,MS ))
1103     #define MOVLir(IM, R) (_REXLrr(0, R), _Or_L (0xb8,_r4(R) ,IM ))
1104     #define MOVLim(IM, MD, MB, MI, MS) (_REXLrm(0, MB, MI), _O_X_L (0xc7 ,MD,MB,MI,MS ,IM ))
1105    
1106     #define MOVQrr(RS, RD) (_REXQrr(RS, RD), _O_Mrm (0x89 ,_b11,_r8(RS),_r8(RD) ))
1107     #define MOVQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _O_r_X (0x8b ,_r8(RD) ,MD,MB,MI,MS ))
1108     #define MOVQrm(RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _O_r_X (0x89 ,_r8(RS) ,MD,MB,MI,MS ))
1109 gbeauche 1.15 #define MOVQir(IM, R) (_REXQrr(0, R), _Or_Q (0xb8,_r8(R) ,IM ))
1110 gbeauche 1.1 #define MOVQim(IM, MD, MB, MI, MS) (_REXQrm(0, MB, MI), _O_X_L (0xc7 ,MD,MB,MI,MS ,IM ))
1111    
1112    
1113     /* --- Unary and Multiply/Divide instructions ------------------------------ */
1114    
1115 gbeauche 1.2 enum {
1116 gbeauche 1.1 X86_NOT = 2,
1117     X86_NEG = 3,
1118     X86_MUL = 4,
1119     X86_IMUL = 5,
1120     X86_DIV = 6,
1121     X86_IDIV = 7,
1122 gbeauche 1.2 };
1123 gbeauche 1.1
1124     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1125    
1126     #define _UNARYBr(OP, RS) (_REXBrr(0, RS), _O_Mrm (0xf6 ,_b11,OP ,_r1(RS) ))
1127     #define _UNARYBm(OP, MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_r_X (0xf6 ,OP ,MD,MB,MI,MS ))
1128     #define _UNARYWr(OP, RS) (_d16(), _REXLrr(0, RS), _O_Mrm (0xf7 ,_b11,OP ,_r2(RS) ))
1129     #define _UNARYWm(OP, MD, MB, MI, MS) (_d16(), _REXLmr(MB, MI, 0), _O_r_X (0xf7 ,OP ,MD,MB,MI,MS ))
1130     #define _UNARYLr(OP, RS) (_REXLrr(0, RS), _O_Mrm (0xf7 ,_b11,OP ,_r4(RS) ))
1131     #define _UNARYLm(OP, MD, MB, MI, MS) (_REXLmr(MB, MI, 0), _O_r_X (0xf7 ,OP ,MD,MB,MI,MS ))
1132     #define _UNARYQr(OP, RS) (_REXQrr(0, RS), _O_Mrm (0xf7 ,_b11,OP ,_r8(RS) ))
1133     #define _UNARYQm(OP, MD, MB, MI, MS) (_REXQmr(MB, MI, 0), _O_r_X (0xf7 ,OP ,MD,MB,MI,MS ))
1134    
1135     #define NOTBr(RS) _UNARYBr(X86_NOT, RS)
1136     #define NOTBm(MD, MB, MI, MS) _UNARYBm(X86_NOT, MD, MB, MI, MS)
1137     #define NOTWr(RS) _UNARYWr(X86_NOT, RS)
1138     #define NOTWm(MD, MB, MI, MS) _UNARYWm(X86_NOT, MD, MB, MI, MS)
1139     #define NOTLr(RS) _UNARYLr(X86_NOT, RS)
1140     #define NOTLm(MD, MB, MI, MS) _UNARYLm(X86_NOT, MD, MB, MI, MS)
1141     #define NOTQr(RS) _UNARYQr(X86_NOT, RS)
1142     #define NOTQm(MD, MB, MI, MS) _UNARYQm(X86_NOT, MD, MB, MI, MS)
1143    
1144     #define NEGBr(RS) _UNARYBr(X86_NEG, RS)
1145     #define NEGBm(MD, MB, MI, MS) _UNARYBm(X86_NEG, MD, MB, MI, MS)
1146     #define NEGWr(RS) _UNARYWr(X86_NEG, RS)
1147     #define NEGWm(MD, MB, MI, MS) _UNARYWm(X86_NEG, MD, MB, MI, MS)
1148     #define NEGLr(RS) _UNARYLr(X86_NEG, RS)
1149     #define NEGLm(MD, MB, MI, MS) _UNARYLm(X86_NEG, MD, MB, MI, MS)
1150     #define NEGQr(RS) _UNARYQr(X86_NEG, RS)
1151     #define NEGQm(MD, MB, MI, MS) _UNARYQm(X86_NEG, MD, MB, MI, MS)
1152    
1153     #define MULBr(RS) _UNARYBr(X86_MUL, RS)
1154     #define MULBm(MD, MB, MI, MS) _UNARYBm(X86_MUL, MD, MB, MI, MS)
1155     #define MULWr(RS) _UNARYWr(X86_MUL, RS)
1156     #define MULWm(MD, MB, MI, MS) _UNARYWm(X86_MUL, MD, MB, MI, MS)
1157     #define MULLr(RS) _UNARYLr(X86_MUL, RS)
1158     #define MULLm(MD, MB, MI, MS) _UNARYLm(X86_MUL, MD, MB, MI, MS)
1159     #define MULQr(RS) _UNARYQr(X86_MUL, RS)
1160     #define MULQm(MD, MB, MI, MS) _UNARYQm(X86_MUL, MD, MB, MI, MS)
1161    
1162     #define IMULBr(RS) _UNARYBr(X86_IMUL, RS)
1163     #define IMULBm(MD, MB, MI, MS) _UNARYBm(X86_IMUL, MD, MB, MI, MS)
1164     #define IMULWr(RS) _UNARYWr(X86_IMUL, RS)
1165     #define IMULWm(MD, MB, MI, MS) _UNARYWm(X86_IMUL, MD, MB, MI, MS)
1166     #define IMULLr(RS) _UNARYLr(X86_IMUL, RS)
1167     #define IMULLm(MD, MB, MI, MS) _UNARYLm(X86_IMUL, MD, MB, MI, MS)
1168     #define IMULQr(RS) _UNARYQr(X86_IMUL, RS)
1169     #define IMULQm(MD, MB, MI, MS) _UNARYQm(X86_IMUL, MD, MB, MI, MS)
1170    
1171     #define DIVBr(RS) _UNARYBr(X86_DIV, RS)
1172     #define DIVBm(MD, MB, MI, MS) _UNARYBm(X86_DIV, MD, MB, MI, MS)
1173     #define DIVWr(RS) _UNARYWr(X86_DIV, RS)
1174     #define DIVWm(MD, MB, MI, MS) _UNARYWm(X86_DIV, MD, MB, MI, MS)
1175     #define DIVLr(RS) _UNARYLr(X86_DIV, RS)
1176     #define DIVLm(MD, MB, MI, MS) _UNARYLm(X86_DIV, MD, MB, MI, MS)
1177     #define DIVQr(RS) _UNARYQr(X86_DIV, RS)
1178     #define DIVQm(MD, MB, MI, MS) _UNARYQm(X86_DIV, MD, MB, MI, MS)
1179    
1180     #define IDIVBr(RS) _UNARYBr(X86_IDIV, RS)
1181     #define IDIVBm(MD, MB, MI, MS) _UNARYBm(X86_IDIV, MD, MB, MI, MS)
1182     #define IDIVWr(RS) _UNARYWr(X86_IDIV, RS)
1183     #define IDIVWm(MD, MB, MI, MS) _UNARYWm(X86_IDIV, MD, MB, MI, MS)
1184     #define IDIVLr(RS) _UNARYLr(X86_IDIV, RS)
1185     #define IDIVLm(MD, MB, MI, MS) _UNARYLm(X86_IDIV, MD, MB, MI, MS)
1186     #define IDIVQr(RS) _UNARYQr(X86_IDIV, RS)
1187     #define IDIVQm(MD, MB, MI, MS) _UNARYQm(X86_IDIV, MD, MB, MI, MS)
1188    
1189     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1190    
1191 gbeauche 1.15 #define IMULWrr(RS, RD) (_d16(), _REXLrr(RD, RS), _OO_Mrm (0x0faf ,_b11,_r2(RD),_r2(RS) ))
1192 gbeauche 1.1 #define IMULWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0faf ,_r2(RD) ,MD,MB,MI,MS ))
1193    
1194     #define IMULWirr(IM,RS,RD) (_d16(), _REXLrr(RS, RD), _Os_Mrm_sW (0x69 ,_b11,_r2(RS),_r2(RD) ,_su16(IM) ))
1195     #define IMULWimr(IM,MD,MB,MI,MS,RD) (_d16(), _REXLmr(MB, MI, RD), _Os_r_X_sW (0x69 ,_r2(RD) ,MD,MB,MI,MS ,_su16(IM) ))
1196    
1197     #define IMULLir(IM, RD) (_REXLrr(0, RD), _Os_Mrm_sL (0x69 ,_b11,_r4(RD),_r4(RD) ,IM ))
1198 gbeauche 1.15 #define IMULLrr(RS, RD) (_REXLrr(RD, RS), _OO_Mrm (0x0faf ,_b11,_r4(RD),_r4(RS) ))
1199 gbeauche 1.1 #define IMULLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0faf ,_r4(RD) ,MD,MB,MI,MS ))
1200    
1201     #define IMULQir(IM, RD) (_REXQrr(0, RD), _Os_Mrm_sL (0x69 ,_b11,_r8(RD),_r8(RD) ,IM ))
1202 gbeauche 1.15 #define IMULQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0faf ,_b11,_r8(RD),_r8(RS) ))
1203 gbeauche 1.1 #define IMULQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0faf ,_r8(RD) ,MD,MB,MI,MS ))
1204    
1205     #define IMULLirr(IM,RS,RD) (_REXLrr(RS, RD), _Os_Mrm_sL (0x69 ,_b11,_r4(RS),_r4(RD) ,IM ))
1206     #define IMULLimr(IM,MD,MB,MI,MS,RD) (_REXLmr(MB, MI, RD), _Os_r_X_sL (0x69 ,_r4(RD) ,MD,MB,MI,MS ,IM ))
1207    
1208     #define IMULQirr(IM,RS,RD) (_REXQrr(RS, RD), _Os_Mrm_sL (0x69 ,_b11,_r8(RS),_r8(RD) ,IM ))
1209     #define IMULQimr(IM,MD,MB,MI,MS,RD) (_REXQmr(MB, MI, RD), _Os_r_X_sL (0x69 ,_r8(RD) ,MD,MB,MI,MS ,IM ))
1210    
1211    
1212     /* --- Control Flow related instructions ----------------------------------- */
1213    
1214 gbeauche 1.5 enum {
1215     X86_CC_O = 0x0,
1216     X86_CC_NO = 0x1,
1217     X86_CC_NAE = 0x2,
1218     X86_CC_B = 0x2,
1219     X86_CC_C = 0x2,
1220     X86_CC_AE = 0x3,
1221     X86_CC_NB = 0x3,
1222     X86_CC_NC = 0x3,
1223     X86_CC_E = 0x4,
1224     X86_CC_Z = 0x4,
1225     X86_CC_NE = 0x5,
1226     X86_CC_NZ = 0x5,
1227     X86_CC_BE = 0x6,
1228     X86_CC_NA = 0x6,
1229     X86_CC_A = 0x7,
1230     X86_CC_NBE = 0x7,
1231     X86_CC_S = 0x8,
1232     X86_CC_NS = 0x9,
1233     X86_CC_P = 0xa,
1234     X86_CC_PE = 0xa,
1235     X86_CC_NP = 0xb,
1236     X86_CC_PO = 0xb,
1237     X86_CC_L = 0xc,
1238     X86_CC_NGE = 0xc,
1239     X86_CC_GE = 0xd,
1240     X86_CC_NL = 0xd,
1241     X86_CC_LE = 0xe,
1242     X86_CC_NG = 0xe,
1243     X86_CC_G = 0xf,
1244     X86_CC_NLE = 0xf,
1245     };
1246    
1247 gbeauche 1.1 /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1248    
1249     // FIXME: no prefix is availble to encode a 32-bit operand size in 64-bit mode
1250     #define CALLm(M) _O_D32 (0xe8 ,(int)(M) )
1251 gbeauche 1.13 #define _CALLLsr(R) (_REXLrr(0, R), _O_Mrm (0xff ,_b11,_b010,_r4(R) ))
1252     #define _CALLQsr(R) (_REXQrr(0, R), _O_Mrm (0xff ,_b11,_b010,_r8(R) ))
1253     #define CALLsr(R) ( X86_TARGET_64BIT ? _CALLQsr(R) : _CALLLsr(R))
1254 gbeauche 1.1 #define CALLsm(D,B,I,S) (_REXLrm(0, B, I), _O_r_X (0xff ,_b010 ,(int)(D),B,I,S ))
1255    
1256     // FIXME: no prefix is availble to encode a 32-bit operand size in 64-bit mode
1257 gbeauche 1.13 #define JMPSm(M) _O_D8 (0xeb ,(int)(M) )
1258     #define JMPm(M) _O_D32 (0xe9 ,(int)(M) )
1259     #define _JMPLsr(R) (_REXLrr(0, R), _O_Mrm (0xff ,_b11,_b100,_r4(R) ))
1260     #define _JMPQsr(R) (_REXQrr(0, R), _O_Mrm (0xff ,_b11,_b100,_r8(R) ))
1261     #define JMPsr(R) ( X86_TARGET_64BIT ? _JMPQsr(R) : _JMPLsr(R))
1262 gbeauche 1.1 #define JMPsm(D,B,I,S) (_REXLrm(0, B, I), _O_r_X (0xff ,_b100 ,(int)(D),B,I,S ))
1263    
1264     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1265 gbeauche 1.10 #define JCCSii(CC, D) _O_B (0x70|(CC) ,(_sc)(int)(D) )
1266 gbeauche 1.1 #define JCCSim(CC, D) _O_D8 (0x70|(CC) ,(int)(D) )
1267     #define JOSm(D) JCCSim(0x0, D)
1268     #define JNOSm(D) JCCSim(0x1, D)
1269     #define JBSm(D) JCCSim(0x2, D)
1270     #define JNAESm(D) JCCSim(0x2, D)
1271     #define JNBSm(D) JCCSim(0x3, D)
1272     #define JAESm(D) JCCSim(0x3, D)
1273     #define JESm(D) JCCSim(0x4, D)
1274     #define JZSm(D) JCCSim(0x4, D)
1275     #define JNESm(D) JCCSim(0x5, D)
1276     #define JNZSm(D) JCCSim(0x5, D)
1277     #define JBESm(D) JCCSim(0x6, D)
1278     #define JNASm(D) JCCSim(0x6, D)
1279     #define JNBESm(D) JCCSim(0x7, D)
1280     #define JASm(D) JCCSim(0x7, D)
1281     #define JSSm(D) JCCSim(0x8, D)
1282     #define JNSSm(D) JCCSim(0x9, D)
1283     #define JPSm(D) JCCSim(0xa, D)
1284     #define JPESm(D) JCCSim(0xa, D)
1285     #define JNPSm(D) JCCSim(0xb, D)
1286     #define JPOSm(D) JCCSim(0xb, D)
1287     #define JLSm(D) JCCSim(0xc, D)
1288     #define JNGESm(D) JCCSim(0xc, D)
1289     #define JNLSm(D) JCCSim(0xd, D)
1290     #define JGESm(D) JCCSim(0xd, D)
1291     #define JLESm(D) JCCSim(0xe, D)
1292     #define JNGSm(D) JCCSim(0xe, D)
1293     #define JNLESm(D) JCCSim(0xf, D)
1294     #define JGSm(D) JCCSim(0xf, D)
1295    
1296     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1297 gbeauche 1.10 #define JCCii(CC, D) _OO_L (0x0f80|(CC) ,(int)(D) )
1298 gbeauche 1.1 #define JCCim(CC, D) _OO_D32 (0x0f80|(CC) ,(int)(D) )
1299     #define JOm(D) JCCim(0x0, D)
1300     #define JNOm(D) JCCim(0x1, D)
1301     #define JBm(D) JCCim(0x2, D)
1302     #define JNAEm(D) JCCim(0x2, D)
1303     #define JNBm(D) JCCim(0x3, D)
1304     #define JAEm(D) JCCim(0x3, D)
1305     #define JEm(D) JCCim(0x4, D)
1306     #define JZm(D) JCCim(0x4, D)
1307     #define JNEm(D) JCCim(0x5, D)
1308     #define JNZm(D) JCCim(0x5, D)
1309     #define JBEm(D) JCCim(0x6, D)
1310     #define JNAm(D) JCCim(0x6, D)
1311     #define JNBEm(D) JCCim(0x7, D)
1312     #define JAm(D) JCCim(0x7, D)
1313     #define JSm(D) JCCim(0x8, D)
1314     #define JNSm(D) JCCim(0x9, D)
1315     #define JPm(D) JCCim(0xa, D)
1316     #define JPEm(D) JCCim(0xa, D)
1317     #define JNPm(D) JCCim(0xb, D)
1318     #define JPOm(D) JCCim(0xb, D)
1319     #define JLm(D) JCCim(0xc, D)
1320     #define JNGEm(D) JCCim(0xc, D)
1321     #define JNLm(D) JCCim(0xd, D)
1322     #define JGEm(D) JCCim(0xd, D)
1323     #define JLEm(D) JCCim(0xe, D)
1324     #define JNGm(D) JCCim(0xe, D)
1325     #define JNLEm(D) JCCim(0xf, D)
1326     #define JGm(D) JCCim(0xf, D)
1327    
1328     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1329     #define SETCCir(CC, RD) (_REXBrr(0, RD), _OO_Mrm (0x0f90|(CC) ,_b11,_b000,_r1(RD) ))
1330     #define SETOr(RD) SETCCir(0x0,RD)
1331     #define SETNOr(RD) SETCCir(0x1,RD)
1332     #define SETBr(RD) SETCCir(0x2,RD)
1333     #define SETNAEr(RD) SETCCir(0x2,RD)
1334     #define SETNBr(RD) SETCCir(0x3,RD)
1335     #define SETAEr(RD) SETCCir(0x3,RD)
1336     #define SETEr(RD) SETCCir(0x4,RD)
1337     #define SETZr(RD) SETCCir(0x4,RD)
1338     #define SETNEr(RD) SETCCir(0x5,RD)
1339     #define SETNZr(RD) SETCCir(0x5,RD)
1340     #define SETBEr(RD) SETCCir(0x6,RD)
1341     #define SETNAr(RD) SETCCir(0x6,RD)
1342     #define SETNBEr(RD) SETCCir(0x7,RD)
1343     #define SETAr(RD) SETCCir(0x7,RD)
1344     #define SETSr(RD) SETCCir(0x8,RD)
1345     #define SETNSr(RD) SETCCir(0x9,RD)
1346     #define SETPr(RD) SETCCir(0xa,RD)
1347     #define SETPEr(RD) SETCCir(0xa,RD)
1348     #define SETNPr(RD) SETCCir(0xb,RD)
1349     #define SETPOr(RD) SETCCir(0xb,RD)
1350     #define SETLr(RD) SETCCir(0xc,RD)
1351     #define SETNGEr(RD) SETCCir(0xc,RD)
1352     #define SETNLr(RD) SETCCir(0xd,RD)
1353     #define SETGEr(RD) SETCCir(0xd,RD)
1354     #define SETLEr(RD) SETCCir(0xe,RD)
1355     #define SETNGr(RD) SETCCir(0xe,RD)
1356     #define SETNLEr(RD) SETCCir(0xf,RD)
1357     #define SETGr(RD) SETCCir(0xf,RD)
1358    
1359     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1360     #define SETCCim(CC,MD,MB,MI,MS) (_REXBrm(0, MB, MI), _OO_r_X (0x0f90|(CC) ,_b000 ,MD,MB,MI,MS ))
1361     #define SETOm(D, B, I, S) SETCCim(0x0, D, B, I, S)
1362     #define SETNOm(D, B, I, S) SETCCim(0x1, D, B, I, S)
1363     #define SETBm(D, B, I, S) SETCCim(0x2, D, B, I, S)
1364     #define SETNAEm(D, B, I, S) SETCCim(0x2, D, B, I, S)
1365     #define SETNBm(D, B, I, S) SETCCim(0x3, D, B, I, S)
1366     #define SETAEm(D, B, I, S) SETCCim(0x3, D, B, I, S)
1367     #define SETEm(D, B, I, S) SETCCim(0x4, D, B, I, S)
1368     #define SETZm(D, B, I, S) SETCCim(0x4, D, B, I, S)
1369     #define SETNEm(D, B, I, S) SETCCim(0x5, D, B, I, S)
1370     #define SETNZm(D, B, I, S) SETCCim(0x5, D, B, I, S)
1371     #define SETBEm(D, B, I, S) SETCCim(0x6, D, B, I, S)
1372     #define SETNAm(D, B, I, S) SETCCim(0x6, D, B, I, S)
1373     #define SETNBEm(D, B, I, S) SETCCim(0x7, D, B, I, S)
1374     #define SETAm(D, B, I, S) SETCCim(0x7, D, B, I, S)
1375     #define SETSm(D, B, I, S) SETCCim(0x8, D, B, I, S)
1376     #define SETNSm(D, B, I, S) SETCCim(0x9, D, B, I, S)
1377     #define SETPm(D, B, I, S) SETCCim(0xa, D, B, I, S)
1378     #define SETPEm(D, B, I, S) SETCCim(0xa, D, B, I, S)
1379     #define SETNPm(D, B, I, S) SETCCim(0xb, D, B, I, S)
1380     #define SETPOm(D, B, I, S) SETCCim(0xb, D, B, I, S)
1381     #define SETLm(D, B, I, S) SETCCim(0xc, D, B, I, S)
1382     #define SETNGEm(D, B, I, S) SETCCim(0xc, D, B, I, S)
1383     #define SETNLm(D, B, I, S) SETCCim(0xd, D, B, I, S)
1384     #define SETGEm(D, B, I, S) SETCCim(0xd, D, B, I, S)
1385     #define SETLEm(D, B, I, S) SETCCim(0xe, D, B, I, S)
1386     #define SETNGm(D, B, I, S) SETCCim(0xe, D, B, I, S)
1387     #define SETNLEm(D, B, I, S) SETCCim(0xf, D, B, I, S)
1388     #define SETGm(D, B, I, S) SETCCim(0xf, D, B, I, S)
1389    
1390 gbeauche 1.5 /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1391     #define CMOVWrr(CC,RS,RD) (_d16(), _REXLrr(RD, RS), _OO_Mrm (0x0f40|(CC) ,_b11,_r2(RD),_r2(RS) ))
1392     #define CMOVWmr(CC,MD,MB,MI,MS,RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0f40|(CC) ,_r2(RD) ,MD,MB,MI,MS ))
1393     #define CMOVLrr(CC,RS,RD) (_REXLrr(RD, RS), _OO_Mrm (0x0f40|(CC) ,_b11,_r4(RD),_r4(RS) ))
1394     #define CMOVLmr(CC,MD,MB,MI,MS,RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0f40|(CC) ,_r4(RD) ,MD,MB,MI,MS ))
1395     #define CMOVQrr(CC,RS,RD) (_REXQrr(RD, RS), _OO_Mrm (0x0f40|(CC) ,_b11,_r8(RD),_r8(RS) ))
1396     #define CMOVQmr(CC,MD,MB,MI,MS,RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0f40|(CC) ,_r8(RD) ,MD,MB,MI,MS ))
1397    
1398 gbeauche 1.1
1399     /* --- Push/Pop instructions ----------------------------------------------- */
1400    
1401     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1402    
1403     #define POPWr(RD) _m32only((_d16(), _Or (0x58,_r2(RD) )))
1404     #define POPWm(MD, MB, MI, MS) _m32only((_d16(), _O_r_X (0x8f ,_b000 ,MD,MB,MI,MS )))
1405    
1406     #define POPLr(RD) _m32only( _Or (0x58,_r4(RD) ))
1407     #define POPLm(MD, MB, MI, MS) _m32only( _O_r_X (0x8f ,_b000 ,MD,MB,MI,MS ))
1408    
1409 gbeauche 1.13 #define POPQr(RD) _m64only((_REXQr(RD), _Or (0x58,_r8(RD) )))
1410     #define POPQm(MD, MB, MI, MS) _m64only((_REXQm(MB, MI), _O_r_X (0x8f ,_b000 ,MD,MB,MI,MS )))
1411 gbeauche 1.1
1412     #define PUSHWr(RS) _m32only((_d16(), _Or (0x50,_r2(RS) )))
1413     #define PUSHWm(MD, MB, MI, MS) _m32only((_d16(), _O_r_X (0xff, ,_b110 ,MD,MB,MI,MS )))
1414     #define PUSHWi(IM) _m32only((_d16(), _Os_sW (0x68 ,IM )))
1415    
1416     #define PUSHLr(RS) _m32only( _Or (0x50,_r4(RS) ))
1417     #define PUSHLm(MD, MB, MI, MS) _m32only( _O_r_X (0xff ,_b110 ,MD,MB,MI,MS ))
1418     #define PUSHLi(IM) _m32only( _Os_sL (0x68 ,IM ))
1419    
1420 gbeauche 1.13 #define PUSHQr(RS) _m64only((_REXQr(RS), _Or (0x50,_r8(RS) )))
1421     #define PUSHQm(MD, MB, MI, MS) _m64only((_REXQm(MB, MI), _O_r_X (0xff ,_b110 ,MD,MB,MI,MS )))
1422 gbeauche 1.1 #define PUSHQi(IM) _m64only( _Os_sL (0x68 ,IM ))
1423    
1424     #define POPA() (_d16(), _O (0x61 ))
1425     #define POPAD() _O (0x61 )
1426    
1427     #define PUSHA() (_d16(), _O (0x60 ))
1428     #define PUSHAD() _O (0x60 )
1429    
1430 gbeauche 1.11 #define POPF() _O (0x9d )
1431 gbeauche 1.1 #define PUSHF() _O (0x9c )
1432    
1433    
1434     /* --- Test instructions --------------------------------------------------- */
1435    
1436     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1437    
1438     #define TESTBrr(RS, RD) (_REXBrr(RS, RD), _O_Mrm (0x84 ,_b11,_r1(RS),_r1(RD) ))
1439     #define TESTBrm(RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _O_r_X (0x84 ,_r1(RS) ,MD,MB,MI,MS ))
1440 gbeauche 1.10 #define TESTBir(IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_AL) ? \
1441     (_REXBrr(0, RD), _O_B (0xa8 ,_u8(IM))) : \
1442     (_REXBrr(0, RD), _O_Mrm_B (0xf6 ,_b11,_b000 ,_r1(RD) ,_u8(IM))) )
1443 gbeauche 1.1 #define TESTBim(IM, MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_r_X_B (0xf6 ,_b000 ,MD,MB,MI,MS ,_u8(IM)))
1444    
1445     #define TESTWrr(RS, RD) (_d16(), _REXLrr(RS, RD), _O_Mrm (0x85 ,_b11,_r2(RS),_r2(RD) ))
1446 gbeauche 1.11 #define TESTWrm(RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _O_r_X (0x85 ,_r2(RS) ,MD,MB,MI,MS ))
1447 gbeauche 1.10 #define TESTWir(IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_AX) ? \
1448     (_d16(), _REXLrr(0, RD), _O_W (0xa9 ,_u16(IM))) : \
1449     (_d16(), _REXLrr(0, RD), _O_Mrm_W (0xf7 ,_b11,_b000 ,_r2(RD) ,_u16(IM))) )
1450 gbeauche 1.11 #define TESTWim(IM, MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _O_r_X_W (0xf7 ,_b000 ,MD,MB,MI,MS ,_u16(IM)))
1451 gbeauche 1.1
1452     #define TESTLrr(RS, RD) (_REXLrr(RS, RD), _O_Mrm (0x85 ,_b11,_r4(RS),_r4(RD) ))
1453     #define TESTLrm(RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _O_r_X (0x85 ,_r4(RS) ,MD,MB,MI,MS ))
1454 gbeauche 1.10 #define TESTLir(IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_EAX) ? \
1455     (_REXLrr(0, RD), _O_L (0xa9 ,IM )) : \
1456     (_REXLrr(0, RD), _O_Mrm_L (0xf7 ,_b11,_b000 ,_r4(RD) ,IM )) )
1457 gbeauche 1.1 #define TESTLim(IM, MD, MB, MI, MS) (_REXLrm(0, MB, MI), _O_r_X_L (0xf7 ,_b000 ,MD,MB,MI,MS ,IM ))
1458    
1459     #define TESTQrr(RS, RD) (_REXQrr(RS, RD), _O_Mrm (0x85 ,_b11,_r8(RS),_r8(RD) ))
1460     #define TESTQrm(RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _O_r_X (0x85 ,_r8(RS) ,MD,MB,MI,MS ))
1461 gbeauche 1.10 #define TESTQir(IM, RD) (X86_OPTIMIZE_ALU && ((RD) == X86_RAX) ? \
1462     (_REXQrr(0, RD), _O_L (0xa9 ,IM )) : \
1463     (_REXQrr(0, RD), _O_Mrm_L (0xf7 ,_b11,_b000 ,_r8(RD) ,IM )) )
1464 gbeauche 1.1 #define TESTQim(IM, MD, MB, MI, MS) (_REXQrm(0, MB, MI), _O_r_X_L (0xf7 ,_b000 ,MD,MB,MI,MS ,IM ))
1465    
1466    
1467     /* --- Exchange instructions ----------------------------------------------- */
1468    
1469     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1470    
1471     #define CMPXCHGBrr(RS, RD) (_REXBrr(RS, RD), _OO_Mrm (0x0fb0 ,_b11,_r1(RS),_r1(RD) ))
1472     #define CMPXCHGBrm(RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _OO_r_X (0x0fb0 ,_r1(RS) ,MD,MB,MI,MS ))
1473    
1474     #define CMPXCHGWrr(RS, RD) (_d16(), _REXLrr(RS, RD), _OO_Mrm (0x0fb1 ,_b11,_r2(RS),_r2(RD) ))
1475     #define CMPXCHGWrm(RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _OO_r_X (0x0fb1 ,_r2(RS) ,MD,MB,MI,MS ))
1476    
1477     #define CMPXCHGLrr(RS, RD) (_REXLrr(RS, RD), _OO_Mrm (0x0fb1 ,_b11,_r4(RS),_r4(RD) ))
1478     #define CMPXCHGLrm(RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _OO_r_X (0x0fb1 ,_r4(RS) ,MD,MB,MI,MS ))
1479    
1480     #define CMPXCHGQrr(RS, RD) (_REXQrr(RS, RD), _OO_Mrm (0x0fb1 ,_b11,_r8(RS),_r8(RD) ))
1481     #define CMPXCHGQrm(RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _OO_r_X (0x0fb1 ,_r8(RS) ,MD,MB,MI,MS ))
1482    
1483     #define XADDBrr(RS, RD) (_REXBrr(RS, RD), _OO_Mrm (0x0fc0 ,_b11,_r1(RS),_r1(RD) ))
1484     #define XADDBrm(RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _OO_r_X (0x0fc0 ,_r1(RS) ,MD,MB,MI,MS ))
1485    
1486     #define XADDWrr(RS, RD) (_d16(), _REXLrr(RS, RD), _OO_Mrm (0x0fc1 ,_b11,_r2(RS),_r2(RD) ))
1487     #define XADDWrm(RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _OO_r_X (0x0fc1 ,_r2(RS) ,MD,MB,MI,MS ))
1488    
1489     #define XADDLrr(RS, RD) (_REXLrr(RS, RD), _OO_Mrm (0x0fc1 ,_b11,_r4(RS),_r4(RD) ))
1490     #define XADDLrm(RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _OO_r_X (0x0fc1 ,_r4(RS) ,MD,MB,MI,MS ))
1491    
1492     #define XADDQrr(RS, RD) (_REXQrr(RS, RD), _OO_Mrm (0x0fc1 ,_b11,_r8(RS),_r8(RD) ))
1493     #define XADDQrm(RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _OO_r_X (0x0fc1 ,_r8(RS) ,MD,MB,MI,MS ))
1494    
1495     #define XCHGBrr(RS, RD) (_REXBrr(RS, RD), _O_Mrm (0x86 ,_b11,_r1(RS),_r1(RD) ))
1496     #define XCHGBrm(RS, MD, MB, MI, MS) (_REXBrm(RS, MB, MI), _O_r_X (0x86 ,_r1(RS) ,MD,MB,MI,MS ))
1497    
1498     #define XCHGWrr(RS, RD) (_d16(), _REXLrr(RS, RD), _O_Mrm (0x87 ,_b11,_r2(RS),_r2(RD) ))
1499     #define XCHGWrm(RS, MD, MB, MI, MS) (_d16(), _REXLrm(RS, MB, MI), _O_r_X (0x87 ,_r2(RS) ,MD,MB,MI,MS ))
1500    
1501     #define XCHGLrr(RS, RD) (_REXLrr(RS, RD), _O_Mrm (0x87 ,_b11,_r4(RS),_r4(RD) ))
1502     #define XCHGLrm(RS, MD, MB, MI, MS) (_REXLrm(RS, MB, MI), _O_r_X (0x87 ,_r4(RS) ,MD,MB,MI,MS ))
1503    
1504     #define XCHGQrr(RS, RD) (_REXQrr(RS, RD), _O_Mrm (0x87 ,_b11,_r8(RS),_r8(RD) ))
1505     #define XCHGQrm(RS, MD, MB, MI, MS) (_REXQrm(RS, MB, MI), _O_r_X (0x87 ,_r8(RS) ,MD,MB,MI,MS ))
1506    
1507    
1508     /* --- Increment/Decrement instructions ------------------------------------ */
1509    
1510     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1511    
1512     #define DECBm(MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_r_X (0xfe ,_b001 ,MD,MB,MI,MS ))
1513     #define DECBr(RD) (_REXBrr(0, RD), _O_Mrm (0xfe ,_b11,_b001 ,_r1(RD) ))
1514    
1515     #define DECWm(MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _O_r_X (0xff ,_b001 ,MD,MB,MI,MS ))
1516     #define DECWr(RD) (! X86_TARGET_64BIT ? (_d16(), _Or (0x48,_r2(RD) )) : \
1517     (_d16(), _REXLrr(0, RD), _O_Mrm (0xff ,_b11,_b001 ,_r2(RD) )))
1518    
1519     #define DECLm(MD, MB, MI, MS) (_REXLrm(0, MB, MI), _O_r_X (0xff ,_b001 ,MD,MB,MI,MS ))
1520     #define DECLr(RD) (! X86_TARGET_64BIT ? _Or (0x48,_r4(RD) ) : \
1521     (_REXLrr(0, RD), _O_Mrm (0xff ,_b11,_b001 ,_r4(RD) )))
1522    
1523     #define DECQm(MD, MB, MI, MS) (_REXQrm(0, MB, MI), _O_r_X (0xff ,_b001 ,MD,MB,MI,MS ))
1524     #define DECQr(RD) (_REXQrr(0, RD), _O_Mrm (0xff ,_b11,_b001 ,_r8(RD) ))
1525    
1526     #define INCBm(MD, MB, MI, MS) (_REXBrm(0, MB, MI), _O_r_X (0xfe ,_b000 ,MD,MB,MI,MS ))
1527     #define INCBr(RD) (_REXBrr(0, RD), _O_Mrm (0xfe ,_b11,_b000 ,_r1(RD) ))
1528    
1529     #define INCWm(MD, MB, MI, MS) (_d16(), _REXLrm(0, MB, MI), _O_r_X (0xff ,_b000 ,MD,MB,MI,MS ))
1530     #define INCWr(RD) (! X86_TARGET_64BIT ? (_d16(), _Or (0x40,_r2(RD) )) : \
1531     (_d16(), _REXLrr(0, RD), _O_Mrm (0xff ,_b11,_b000 ,_r2(RD) )) )
1532    
1533     #define INCLm(MD, MB, MI, MS) (_REXLrm(0, MB, MI), _O_r_X (0xff ,_b000 ,MD,MB,MI,MS ))
1534     #define INCLr(RD) (! X86_TARGET_64BIT ? _Or (0x40,_r4(RD) ) : \
1535     (_REXLrr(0, RD), _O_Mrm (0xff ,_b11,_b000 ,_r4(RD) )))
1536    
1537     #define INCQm(MD, MB, MI, MS) (_REXQrm(0, MB, MI), _O_r_X (0xff ,_b000 ,MD,MB,MI,MS ))
1538     #define INCQr(RD) (_REXQrr(0, RD), _O_Mrm (0xff ,_b11,_b000 ,_r8(RD) ))
1539    
1540    
1541 gbeauche 1.5 /* --- Misc instructions --------------------------------------------------- */
1542    
1543     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1544    
1545     #define BSFWrr(RS, RD) (_d16(), _REXLrr(RD, RS), _OO_Mrm (0x0fbc ,_b11,_r2(RD),_r2(RS) ))
1546     #define BSFWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0fbc ,_r2(RD) ,MD,MB,MI,MS ))
1547     #define BSRWrr(RS, RD) (_d16(), _REXLrr(RD, RS), _OO_Mrm (0x0fbd ,_b11,_r2(RD),_r2(RS) ))
1548     #define BSRWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0fbd ,_r2(RD) ,MD,MB,MI,MS ))
1549    
1550     #define BSFLrr(RS, RD) (_REXLrr(RD, RS), _OO_Mrm (0x0fbc ,_b11,_r4(RD),_r4(RS) ))
1551     #define BSFLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fbc ,_r4(RD) ,MD,MB,MI,MS ))
1552     #define BSRLrr(RS, RD) (_REXLrr(RD, RS), _OO_Mrm (0x0fbd ,_b11,_r4(RD),_r4(RS) ))
1553     #define BSRLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fbd ,_r4(RD) ,MD,MB,MI,MS ))
1554    
1555     #define BSFQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fbc ,_b11,_r8(RD),_r8(RS) ))
1556     #define BSFQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fbc ,_r8(RD) ,MD,MB,MI,MS ))
1557     #define BSRQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fbd ,_b11,_r8(RD),_r8(RS) ))
1558     #define BSRQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fbd ,_r8(RD) ,MD,MB,MI,MS ))
1559 gbeauche 1.1
1560     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1561    
1562 gbeauche 1.15 #define MOVSBWrr(RS, RD) (_d16(), _REXBLrr(RD, RS), _OO_Mrm (0x0fbe ,_b11,_r2(RD),_r1(RS) ))
1563 gbeauche 1.7 #define MOVSBWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0fbe ,_r2(RD) ,MD,MB,MI,MS ))
1564 gbeauche 1.15 #define MOVZBWrr(RS, RD) (_d16(), _REXBLrr(RD, RS), _OO_Mrm (0x0fb6 ,_b11,_r2(RD),_r1(RS) ))
1565 gbeauche 1.7 #define MOVZBWmr(MD, MB, MI, MS, RD) (_d16(), _REXLmr(MB, MI, RD), _OO_r_X (0x0fb6 ,_r2(RD) ,MD,MB,MI,MS ))
1566    
1567 gbeauche 1.14 #define MOVSBLrr(RS, RD) (_REXBLrr(RD, RS), _OO_Mrm (0x0fbe ,_b11,_r4(RD),_r1(RS) ))
1568 gbeauche 1.7 #define MOVSBLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fbe ,_r4(RD) ,MD,MB,MI,MS ))
1569 gbeauche 1.14 #define MOVZBLrr(RS, RD) (_REXBLrr(RD, RS), _OO_Mrm (0x0fb6 ,_b11,_r4(RD),_r1(RS) ))
1570 gbeauche 1.7 #define MOVZBLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fb6 ,_r4(RD) ,MD,MB,MI,MS ))
1571    
1572     #define MOVSBQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fbe ,_b11,_r8(RD),_r1(RS) ))
1573     #define MOVSBQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fbe ,_r8(RD) ,MD,MB,MI,MS ))
1574     #define MOVZBQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fb6 ,_b11,_r8(RD),_r1(RS) ))
1575     #define MOVZBQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fb6 ,_r8(RD) ,MD,MB,MI,MS ))
1576    
1577     #define MOVSWLrr(RS, RD) (_REXLrr(RD, RS), _OO_Mrm (0x0fbf ,_b11,_r4(RD),_r2(RS) ))
1578     #define MOVSWLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fbf ,_r4(RD) ,MD,MB,MI,MS ))
1579     #define MOVZWLrr(RS, RD) (_REXLrr(RD, RS), _OO_Mrm (0x0fb7 ,_b11,_r4(RD),_r2(RS) ))
1580     #define MOVZWLmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _OO_r_X (0x0fb7 ,_r4(RD) ,MD,MB,MI,MS ))
1581    
1582     #define MOVSWQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fbf ,_b11,_r8(RD),_r2(RS) ))
1583     #define MOVSWQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fbf ,_r8(RD) ,MD,MB,MI,MS ))
1584     #define MOVZWQrr(RS, RD) (_REXQrr(RD, RS), _OO_Mrm (0x0fb7 ,_b11,_r8(RD),_r2(RS) ))
1585     #define MOVZWQmr(MD, MB, MI, MS, RD) (_REXQmr(MB, MI, RD), _OO_r_X (0x0fb7 ,_r8(RD) ,MD,MB,MI,MS ))
1586    
1587     #define MOVSLQrr(RS, RD) _m64only((_REXQrr(RD, RS), _O_Mrm (0x63 ,_b11,_r8(RD),_r4(RS) )))
1588     #define MOVSLQmr(MD, MB, MI, MS, RD) _m64only((_REXQmr(MB, MI, RD), _O_r_X (0x63 ,_r8(RD) ,MD,MB,MI,MS )))
1589    
1590     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1591    
1592 gbeauche 1.1 #define LEALmr(MD, MB, MI, MS, RD) (_REXLmr(MB, MI, RD), _O_r_X (0x8d ,_r4(RD) ,MD,MB,MI,MS ))
1593    
1594     #define BSWAPLr(R) (_REXLrr(0, R), _OOr (0x0fc8,_r4(R) ))
1595     #define BSWAPQr(R) (_REXQrr(0, R), _OOr (0x0fc8,_r8(R) ))
1596    
1597     #define CLC() _O (0xf8 )
1598     #define STC() _O (0xf9 )
1599    
1600     #define CMC() _O (0xf5 )
1601     #define CLD() _O (0xfc )
1602     #define STD() _O (0xfd )
1603    
1604     #define CBTW() (_d16(), _O (0x98 ))
1605     #define CWTL() _O (0x98 )
1606     #define CLTQ() _m64only(_REXQrr(0, 0), _O (0x98 ))
1607    
1608     #define CBW CBTW
1609     #define CWDE CWTL
1610     #define CDQE CLTQ
1611    
1612     #define CWTD() (_d16(), _O (0x99 ))
1613     #define CLTD() _O (0x99 )
1614     #define CQTO() _m64only(_REXQrr(0, 0), _O (0x99 ))
1615    
1616     #define CWD CWTD
1617     #define CDQ CLTD
1618     #define CQO CQTO
1619    
1620     #define LAHF() _m32only( _O (0x9f ))
1621     #define SAHF() _m32only( _O (0x9e ))
1622    
1623 gbeauche 1.2 /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1624    
1625 gbeauche 1.6 #define CPUID() _OO (0x0fa2 )
1626 gbeauche 1.1 #define RDTSC() _OO (0xff31 )
1627    
1628     #define ENTERii(W, B) _O_W_B (0xc8 ,_su16(W),_su8(B))
1629    
1630     #define LEAVE() _O (0xc9 )
1631     #define RET() _O (0xc3 )
1632     #define RETi(IM) _O_W (0xc2 ,_su16(IM))
1633    
1634     #define NOP() _O (0x90 )
1635 gbeauche 1.3
1636    
1637     /* --- Media 128-bit instructions ------------------------------------------ */
1638    
1639     enum {
1640     X86_SSE_CVTIS = 0x2a,
1641     X86_SSE_CVTSI = 0x2d,
1642     X86_SSE_UCOMI = 0x2e,
1643     X86_SSE_COMI = 0x2f,
1644     X86_SSE_SQRT = 0x51,
1645     X86_SSE_RSQRT = 0x52,
1646     X86_SSE_RCP = 0x53,
1647     X86_SSE_AND = 0x54,
1648     X86_SSE_ANDN = 0x55,
1649     X86_SSE_OR = 0x56,
1650     X86_SSE_XOR = 0x57,
1651     X86_SSE_ADD = 0x58,
1652     X86_SSE_MUL = 0x59,
1653     X86_SSE_CVTSD = 0x5a,
1654     X86_SSE_CVTDT = 0x5b,
1655     X86_SSE_SUB = 0x5c,
1656     X86_SSE_MIN = 0x5d,
1657     X86_SSE_DIV = 0x5e,
1658     X86_SSE_MAX = 0x5f,
1659     };
1660    
1661     /* _format Opcd ,Mod ,r ,m ,mem=dsp+sib ,imm... */
1662    
1663     #define __SSELrr(OP,RS,RSA,RD,RDA) (_REXLrr(RD, RS), _OO_Mrm (0x0f00|(OP) ,_b11,RDA(RD),RSA(RS) ))
1664     #define __SSELmr(OP,MD,MB,MI,MS,RD,RDA) (_REXLmr(MB, MI, RD), _OO_r_X (0x0f00|(OP) ,RDA(RD) ,MD,MB,MI,MS ))
1665     #define __SSELrm(OP,RS,RSA,MD,MB,MI,MS) (_REXLrm(RS, MB, MI), _OO_r_X (0x0f00|(OP) ,RSA(RS) ,MD,MB,MI,MS ))
1666    
1667     #define __SSEQrr(OP,RS,RSA,RD,RDA) (_REXQrr(RD, RS), _OO_Mrm (0x0f00|(OP) ,_b11,RDA(RD),RSA(RS) ))
1668     #define __SSEQmr(OP,MD,MB,MI,MS,RD,RDA) (_REXQmr(MB, MI, RD), _OO_r_X (0x0f00|(OP) ,RDA(RD) ,MD,MB,MI,MS ))
1669     #define __SSEQrm(OP,RS,RSA,MD,MB,MI,MS) (_REXQrm(RS, MB, MI), _OO_r_X (0x0f00|(OP) ,RSA(RS) ,MD,MB,MI,MS ))
1670    
1671     #define _SSELrr(PX,OP,RS,RSA,RD,RDA) (_B(PX), __SSELrr(OP, RS, RSA, RD, RDA))
1672     #define _SSELmr(PX,OP,MD,MB,MI,MS,RD,RDA) (_B(PX), __SSELmr(OP, MD, MB, MI, MS, RD, RDA))
1673     #define _SSELrm(PX,OP,RS,RSA,MD,MB,MI,MS) (_B(PX), __SSELrm(OP, RS, RSA, MD, MB, MI, MS))
1674    
1675     #define _SSEQrr(PX,OP,RS,RSA,RD,RDA) (_B(PX), __SSEQrr(OP, RS, RSA, RD, RDA))
1676     #define _SSEQmr(PX,OP,MD,MB,MI,MS,RD,RDA) (_B(PX), __SSEQmr(OP, MD, MB, MI, MS, RD, RDA))
1677     #define _SSEQrm(PX,OP,RS,RSA,MD,MB,MI,MS) (_B(PX), __SSEQrm(OP, RS, RSA, MD, MB, MI, MS))
1678    
1679     #define _SSEPSrr(OP,RS,RD) __SSELrr( OP, RS,_rX, RD,_rX)
1680     #define _SSEPSmr(OP,MD,MB,MI,MS,RD) __SSELmr( OP, MD, MB, MI, MS, RD,_rX)
1681     #define _SSEPSrm(OP,RS,MD,MB,MI,MS) __SSELrm( OP, RS,_rX, MD, MB, MI, MS)
1682    
1683     #define _SSEPDrr(OP,RS,RD) _SSELrr(0x66, OP, RS,_rX, RD,_rX)
1684     #define _SSEPDmr(OP,MD,MB,MI,MS,RD) _SSELmr(0x66, OP, MD, MB, MI, MS, RD,_rX)
1685     #define _SSEPDrm(OP,RS,MD,MB,MI,MS) _SSELrm(0x66, OP, RS,_rX, MD, MB, MI, MS)
1686    
1687     #define _SSESSrr(OP,RS,RD) _SSELrr(0xf3, OP, RS,_rX, RD,_rX)
1688     #define _SSESSmr(OP,MD,MB,MI,MS,RD) _SSELmr(0xf3, OP, MD, MB, MI, MS, RD,_rX)
1689     #define _SSESSrm(OP,RS,MD,MB,MI,MS) _SSELrm(0xf3, OP, RS,_rX, MD, MB, MI, MS)
1690    
1691     #define _SSESDrr(OP,RS,RD) _SSELrr(0xf2, OP, RS,_rX, RD,_rX)
1692     #define _SSESDmr(OP,MD,MB,MI,MS,RD) _SSELmr(0xf2, OP, MD, MB, MI, MS, RD,_rX)
1693     #define _SSESDrm(OP,RS,MD,MB,MI,MS) _SSELrm(0xf2, OP, RS,_rX, MD, MB, MI, MS)
1694    
1695     #define ADDPSrr(RS, RD) _SSEPSrr(X86_SSE_ADD, RS, RD)
1696     #define ADDPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_ADD, MD, MB, MI, MS, RD)
1697     #define ADDPDrr(RS, RD) _SSEPDrr(X86_SSE_ADD, RS, RD)
1698     #define ADDPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_ADD, MD, MB, MI, MS, RD)
1699    
1700     #define ADDSSrr(RS, RD) _SSESSrr(X86_SSE_ADD, RS, RD)
1701     #define ADDSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_ADD, MD, MB, MI, MS, RD)
1702     #define ADDSDrr(RS, RD) _SSESDrr(X86_SSE_ADD, RS, RD)
1703     #define ADDSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_ADD, MD, MB, MI, MS, RD)
1704    
1705     #define ANDNPSrr(RS, RD) _SSEPSrr(X86_SSE_ANDN, RS, RD)
1706     #define ANDNPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_ANDN, MD, MB, MI, MS, RD)
1707     #define ANDNPDrr(RS, RD) _SSEPDrr(X86_SSE_ANDN, RS, RD)
1708     #define ANDNPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_ANDN, MD, MB, MI, MS, RD)
1709    
1710     #define ANDPSrr(RS, RD) _SSEPSrr(X86_SSE_AND, RS, RD)
1711     #define ANDPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_AND, MD, MB, MI, MS, RD)
1712     #define ANDPDrr(RS, RD) _SSEPDrr(X86_SSE_AND, RS, RD)
1713     #define ANDPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_AND, MD, MB, MI, MS, RD)
1714    
1715     #define DIVPSrr(RS, RD) _SSEPSrr(X86_SSE_DIV, RS, RD)
1716     #define DIVPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_DIV, MD, MB, MI, MS, RD)
1717     #define DIVPDrr(RS, RD) _SSEPDrr(X86_SSE_DIV, RS, RD)
1718     #define DIVPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_DIV, MD, MB, MI, MS, RD)
1719    
1720     #define DIVSSrr(RS, RD) _SSESSrr(X86_SSE_DIV, RS, RD)
1721     #define DIVSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_DIV, MD, MB, MI, MS, RD)
1722     #define DIVSDrr(RS, RD) _SSESDrr(X86_SSE_DIV, RS, RD)
1723     #define DIVSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_DIV, MD, MB, MI, MS, RD)
1724    
1725     #define MAXPSrr(RS, RD) _SSEPSrr(X86_SSE_MAX, RS, RD)
1726     #define MAXPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_MAX, MD, MB, MI, MS, RD)
1727     #define MAXPDrr(RS, RD) _SSEPDrr(X86_SSE_MAX, RS, RD)
1728     #define MAXPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_MAX, MD, MB, MI, MS, RD)
1729    
1730     #define MAXSSrr(RS, RD) _SSESSrr(X86_SSE_MAX, RS, RD)
1731     #define MAXSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_MAX, MD, MB, MI, MS, RD)
1732     #define MAXSDrr(RS, RD) _SSESDrr(X86_SSE_MAX, RS, RD)
1733     #define MAXSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_MAX, MD, MB, MI, MS, RD)
1734    
1735     #define MINPSrr(RS, RD) _SSEPSrr(X86_SSE_MIN, RS, RD)
1736     #define MINPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_MIN, MD, MB, MI, MS, RD)
1737     #define MINPDrr(RS, RD) _SSEPDrr(X86_SSE_MIN, RS, RD)
1738     #define MINPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_MIN, MD, MB, MI, MS, RD)
1739    
1740     #define MINSSrr(RS, RD) _SSESSrr(X86_SSE_MIN, RS, RD)
1741     #define MINSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_MIN, MD, MB, MI, MS, RD)
1742     #define MINSDrr(RS, RD) _SSESDrr(X86_SSE_MIN, RS, RD)
1743     #define MINSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_MIN, MD, MB, MI, MS, RD)
1744    
1745     #define MULPSrr(RS, RD) _SSEPSrr(X86_SSE_MUL, RS, RD)
1746     #define MULPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_MUL, MD, MB, MI, MS, RD)
1747     #define MULPDrr(RS, RD) _SSEPDrr(X86_SSE_MUL, RS, RD)
1748     #define MULPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_MUL, MD, MB, MI, MS, RD)
1749    
1750     #define MULSSrr(RS, RD) _SSESSrr(X86_SSE_MUL, RS, RD)
1751     #define MULSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_MUL, MD, MB, MI, MS, RD)
1752     #define MULSDrr(RS, RD) _SSESDrr(X86_SSE_MUL, RS, RD)
1753     #define MULSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_MUL, MD, MB, MI, MS, RD)
1754    
1755     #define ORPSrr(RS, RD) _SSEPSrr(X86_SSE_OR, RS, RD)
1756     #define ORPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_OR, MD, MB, MI, MS, RD)
1757     #define ORPDrr(RS, RD) _SSEPDrr(X86_SSE_OR, RS, RD)
1758     #define ORPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_OR, MD, MB, MI, MS, RD)
1759    
1760     #define RCPPSrr(RS, RD) _SSEPSrr(X86_SSE_RCP, RS, RD)
1761     #define RCPPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_RCP, MD, MB, MI, MS, RD)
1762     #define RCPSSrr(RS, RD) _SSESSrr(X86_SSE_RCP, RS, RD)
1763     #define RCPSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_RCP, MD, MB, MI, MS, RD)
1764    
1765     #define RSQRTPSrr(RS, RD) _SSEPSrr(X86_SSE_RSQRT, RS, RD)
1766     #define RSQRTPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_RSQRT, MD, MB, MI, MS, RD)
1767     #define RSQRTSSrr(RS, RD) _SSESSrr(X86_SSE_RSQRT, RS, RD)
1768     #define RSQRTSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_RSQRT, MD, MB, MI, MS, RD)
1769    
1770     #define SQRTPSrr(RS, RD) _SSEPSrr(X86_SSE_SQRT, RS, RD)
1771     #define SQRTPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_SQRT, MD, MB, MI, MS, RD)
1772     #define SQRTPDrr(RS, RD) _SSEPDrr(X86_SSE_SQRT, RS, RD)
1773     #define SQRTPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_SQRT, MD, MB, MI, MS, RD)
1774    
1775     #define SQRTSSrr(RS, RD) _SSESSrr(X86_SSE_SQRT, RS, RD)
1776     #define SQRTSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_SQRT, MD, MB, MI, MS, RD)
1777     #define SQRTSDrr(RS, RD) _SSESDrr(X86_SSE_SQRT, RS, RD)
1778     #define SQRTSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_SQRT, MD, MB, MI, MS, RD)
1779    
1780     #define SUBPSrr(RS, RD) _SSEPSrr(X86_SSE_SUB, RS, RD)
1781     #define SUBPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_SUB, MD, MB, MI, MS, RD)
1782     #define SUBPDrr(RS, RD) _SSEPDrr(X86_SSE_SUB, RS, RD)
1783     #define SUBPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_SUB, MD, MB, MI, MS, RD)
1784    
1785     #define SUBSSrr(RS, RD) _SSESSrr(X86_SSE_SUB, RS, RD)
1786     #define SUBSSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_SUB, MD, MB, MI, MS, RD)
1787     #define SUBSDrr(RS, RD) _SSESDrr(X86_SSE_SUB, RS, RD)
1788     #define SUBSDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_SUB, MD, MB, MI, MS, RD)
1789    
1790     #define XORPSrr(RS, RD) _SSEPSrr(X86_SSE_XOR, RS, RD)
1791     #define XORPSmr(MD, MB, MI, MS, RD) _SSEPSmr(X86_SSE_XOR, MD, MB, MI, MS, RD)
1792     #define XORPDrr(RS, RD) _SSEPDrr(X86_SSE_XOR, RS, RD)
1793     #define XORPDmr(MD, MB, MI, MS, RD) _SSEPDmr(X86_SSE_XOR, MD, MB, MI, MS, RD)
1794    
1795     #define COMISSrr(RS, RD) _SSESSrr(X86_SSE_COMI, RS, RD)
1796     #define COMISSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_COMI, MD, MB, MI, MS, RD)
1797     #define COMISDrr(RS, RD) _SSESDrr(X86_SSE_COMI, RS, RD)
1798     #define COMISDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_COMI, MD, MB, MI, MS, RD)
1799    
1800     #define UCOMISSrr(RS, RD) _SSESSrr(X86_SSE_UCOMI, RS, RD)
1801     #define UCOMISSmr(MD, MB, MI, MS, RD) _SSESSmr(X86_SSE_UCOMI, MD, MB, MI, MS, RD)
1802     #define UCOMISDrr(RS, RD) _SSESDrr(X86_SSE_UCOMI, RS, RD)
1803     #define UCOMISDmr(MD, MB, MI, MS, RD) _SSESDmr(X86_SSE_UCOMI, MD, MB, MI, MS, RD)
1804    
1805     #define MOVAPSrr(RS, RD) _SSEPSrr(0x28, RS, RD)
1806     #define MOVAPSmr(MD, MB, MI, MS, RD) _SSEPSmr(0x28, MD, MB, MI, MS, RD)
1807     #define MOVAPSrm(RS, MD, MB, MI, MS) _SSEPSrm(0x29, RS, MD, MB, MI, MS)
1808    
1809     #define MOVAPDrr(RS, RD) _SSEPDrr(0x28, RS, RD)
1810     #define MOVAPDmr(MD, MB, MI, MS, RD) _SSEPDmr(0x28, MD, MB, MI, MS, RD)
1811     #define MOVAPDrm(RS, MD, MB, MI, MS) _SSEPDrm(0x29, RS, MD, MB, MI, MS)
1812    
1813     #define CVTPS2PIrr(RS, RD) __SSELrr( X86_SSE_CVTSI, RS,_rX, RD,_rM)
1814     #define CVTPS2PImr(MD, MB, MI, MS, RD) __SSELmr( X86_SSE_CVTSI, MD, MB, MI, MS, RD,_rM)
1815     #define CVTPD2PIrr(RS, RD) _SSELrr(0x66, X86_SSE_CVTSI, RS,_rX, RD,_rM)
1816     #define CVTPD2PImr(MD, MB, MI, MS, RD) _SSELmr(0x66, X86_SSE_CVTSI, MD, MB, MI, MS, RD,_rM)
1817    
1818     #define CVTPI2PSrr(RS, RD) __SSELrr( X86_SSE_CVTIS, RS,_rM, RD,_rX)
1819     #define CVTPI2PSmr(MD, MB, MI, MS, RD) __SSELmr( X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1820     #define CVTPI2PDrr(RS, RD) _SSELrr(0x66, X86_SSE_CVTIS, RS,_rM, RD,_rX)
1821     #define CVTPI2PDmr(MD, MB, MI, MS, RD) _SSELmr(0x66, X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1822    
1823     #define CVTPS2PDrr(RS, RD) __SSELrr( X86_SSE_CVTSD, RS,_rX, RD,_rX)
1824     #define CVTPS2PDmr(MD, MB, MI, MS, RD) __SSELmr( X86_SSE_CVTSD, MD, MB, MI, MS, RD,_rX)
1825     #define CVTPD2PSrr(RS, RD) _SSELrr(0x66, X86_SSE_CVTSD, RS,_rX, RD,_rX)
1826     #define CVTPD2PSmr(MD, MB, MI, MS, RD) _SSELmr(0x66, X86_SSE_CVTSD, MD, MB, MI, MS, RD,_rX)
1827    
1828     #define CVTSS2SDrr(RS, RD) _SSELrr(0xf3, X86_SSE_CVTSD, RS,_rX, RD,_rX)
1829     #define CVTSS2SDmr(MD, MB, MI, MS, RD) _SSELmr(0xf3, X86_SSE_CVTSD, MD, MB, MI, MS, RD,_rX)
1830     #define CVTSD2SSrr(RS, RD) _SSELrr(0xf2, X86_SSE_CVTSD, RS,_rX, RD,_rX)
1831     #define CVTSD2SSmr(MD, MB, MI, MS, RD) _SSELmr(0xf2, X86_SSE_CVTSD, MD, MB, MI, MS, RD,_rX)
1832    
1833     #define CVTSS2SILrr(RS, RD) _SSELrr(0xf3, X86_SSE_CVTSI, RS,_rX, RD,_r4)
1834     #define CVTSS2SILmr(MD, MB, MI, MS, RD) _SSELmr(0xf3, X86_SSE_CVTSI, MD, MB, MI, MS, RD,_r4)
1835     #define CVTSD2SILrr(RS, RD) _SSELrr(0xf2, X86_SSE_CVTSI, RS,_rX, RD,_r4)
1836     #define CVTSD2SILmr(MD, MB, MI, MS, RD) _SSELmr(0xf2, X86_SSE_CVTSI, MD, MB, MI, MS, RD,_r4)
1837    
1838     #define CVTSI2SSLrr(RS, RD) _SSELrr(0xf3, X86_SSE_CVTIS, RS,_r4, RD,_rX)
1839     #define CVTSI2SSLmr(MD, MB, MI, MS, RD) _SSELmr(0xf3, X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1840     #define CVTSI2SDLrr(RS, RD) _SSELrr(0xf2, X86_SSE_CVTIS, RS,_r4, RD,_rX)
1841     #define CVTSI2SDLmr(MD, MB, MI, MS, RD) _SSELmr(0xf2, X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1842    
1843     #define CVTSS2SIQrr(RS, RD) _SSEQrr(0xf3, X86_SSE_CVTSI, RS,_rX, RD,_r8)
1844     #define CVTSS2SIQmr(MD, MB, MI, MS, RD) _SSEQmr(0xf3, X86_SSE_CVTSI, MD, MB, MI, MS, RD,_r8)
1845     #define CVTSD2SIQrr(RS, RD) _SSEQrr(0xf2, X86_SSE_CVTSI, RS,_rX, RD,_r8)
1846     #define CVTSD2SIQmr(MD, MB, MI, MS, RD) _SSEQmr(0xf2, X86_SSE_CVTSI, MD, MB, MI, MS, RD,_r8)
1847    
1848     #define CVTSI2SSQrr(RS, RD) _SSEQrr(0xf3, X86_SSE_CVTIS, RS,_r8, RD,_rX)
1849     #define CVTSI2SSQmr(MD, MB, MI, MS, RD) _SSEQmr(0xf3, X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1850     #define CVTSI2SDQrr(RS, RD) _SSEQrr(0xf2, X86_SSE_CVTIS, RS,_r8, RD,_rX)
1851     #define CVTSI2SDQmr(MD, MB, MI, MS, RD) _SSEQmr(0xf2, X86_SSE_CVTIS, MD, MB, MI, MS, RD,_rX)
1852    
1853     #define MOVDLXrr(RS, RD) _SSELrr(0x66, 0x6e, RS,_r4, RD,_rX)
1854     #define MOVDLXmr(MD, MB, MI, MS, RD) _SSELmr(0x66, 0x6e, MD, MB, MI, MS, RD,_rX)
1855     #define MOVDQXrr(RS, RD) _SSEQrr(0x66, 0x6e, RS,_r8, RD,_rX)
1856     #define MOVDQXmr(MD, MB, MI, MS, RD) _SSEQmr(0x66, 0x6e, MD, MB, MI, MS, RD,_rX)
1857    
1858     #define MOVDXLrr(RS, RD) _SSELrr(0x66, 0x7e, RS,_rX, RD,_r4)
1859     #define MOVDXLrm(RS, MD, MB, MI, MS) _SSELrm(0x66, 0x7e, RS,_rX, MD, MB, MI, MS)
1860     #define MOVDXQrr(RS, RD) _SSEQrr(0x66, 0x7e, RS,_rX, RD,_r8)
1861     #define MOVDXQrm(RS, MD, MB, MI, MS) _SSEQrm(0x66, 0x7e, RS,_rX, MD, MB, MI, MS)
1862    
1863     #define MOVDLMrr(RS, RD) __SSELrr( 0x6e, RS,_r4, RD,_rM)
1864     #define MOVDLMmr(MD, MB, MI, MS, RD) __SSELmr( 0x6e, MD, MB, MI, MS, RD,_rM)
1865     #define MOVDQMrr(RS, RD) __SSEQrr( 0x6e, RS,_r8, RD,_rM)
1866     #define MOVDQMmr(MD, MB, MI, MS, RD) __SSEQmr( 0x6e, MD, MB, MI, MS, RD,_rM)
1867    
1868     #define MOVDMLrr(RS, RD) __SSELrr( 0x7e, RS,_rM, RD,_r4)
1869     #define MOVDMLrm(RS, MD, MB, MI, MS) __SSELrm( 0x7e, RS,_rM, MD, MB, MI, MS)
1870     #define MOVDMQrr(RS, RD) __SSEQrr( 0x7e, RS,_rM, RD,_r8)
1871     #define MOVDMQrm(RS, MD, MB, MI, MS) __SSEQrm( 0x7e, RS,_rM, MD, MB, MI, MS)
1872    
1873     #define MOVDQ2Qrr(RS, RD) _SSELrr(0xf2, 0xd6, RS,_rX, RD,_rM)
1874     #define MOVHLPSrr(RS, RD) __SSELrr( 0x12, RS,_rX, RD,_rX)
1875     #define MOVLHPSrr(RS, RD) __SSELrr( 0x16, RS,_rX, RD,_rX)
1876    
1877     #define MOVDQArr(RS, RD) _SSELrr(0x66, 0x6f, RS,_rX, RD,_rX)
1878     #define MOVDQAmr(MD, MB, MI, MS, RD) _SSELmr(0x66, 0x6f, MD, MB, MI, MS, RD,_rX)
1879     #define MOVDQArm(RS, MD, MB, MI, MS) _SSELrm(0x66, 0x7f, RS,_rX, MD, MB, MI, MS)
1880    
1881     #define MOVDQUrr(RS, RD) _SSELrr(0xf3, 0x6f, RS,_rX, RD,_rX)
1882     #define MOVDQUmr(MD, MB, MI, MS, RD) _SSELmr(0xf3, 0x6f, MD, MB, MI, MS, RD,_rX)
1883     #define MOVDQUrm(RS, MD, MB, MI, MS) _SSELrm(0xf3, 0x7f, RS,_rX, MD, MB, MI, MS)
1884    
1885     #define MOVHPDmr(MD, MB, MI, MS, RD) _SSELmr(0x66, 0x16, MD, MB, MI, MS, RD,_rX)
1886     #define MOVHPDrm(RS, MD, MB, MI, MS) _SSELrm(0x66, 0x17, RS,_rX, MD, MB, MI, MS)
1887     #define MOVHPSmr(MD, MB, MI, MS, RD) __SSELmr( 0x16, MD, MB, MI, MS, RD,_rX)
1888     #define MOVHPSrm(RS, MD, MB, MI, MS) __SSELrm( 0x17, RS,_rX, MD, MB, MI, MS)
1889    
1890     #define MOVLPDmr(MD, MB, MI, MS, RD) _SSELmr(0x66, 0x12, MD, MB, MI, MS, RD,_rX)
1891     #define MOVLPDrm(RS, MD, MB, MI, MS) _SSELrm(0x66, 0x13, RS,_rX, MD, MB, MI, MS)
1892     #define MOVLPSmr(MD, MB, MI, MS, RD) __SSELmr( 0x12, MD, MB, MI, MS, RD,_rX)
1893     #define MOVLPSrm(RS, MD, MB, MI, MS) __SSELrm( 0x13, RS,_rX, MD, MB, MI, MS)
1894 gbeauche 1.2
1895    
1896     /* --- FLoating-Point instructions ----------------------------------------- */
1897    
1898     #define _ESCmi(D,B,I,S,OP) (_REXLrm(0,B,I), _O_r_X(0xd8|(OP & 7), (OP >> 3), D,B,I,S))
1899    
1900     #define FLDr(R) _OOr(0xd9c0,_rN(R))
1901     #define FLDLm(D,B,I,S) _ESCmi(D,B,I,S,005)
1902     #define FLDSm(D,B,I,S) _ESCmi(D,B,I,S,001)
1903     #define FLDTm(D,B,I,S) _ESCmi(D,B,I,S,053)
1904    
1905     #define FSTr(R) _OOr(0xddd0,_rN(R))
1906     #define FSTSm(D,B,I,S) _ESCmi(D,B,I,S,021)
1907     #define FSTLm(D,B,I,S) _ESCmi(D,B,I,S,025)
1908    
1909     #define FSTPr(R) _OOr(0xddd8,_rN(R))
1910     #define FSTPSm(D,B,I,S) _ESCmi(D,B,I,S,031)
1911     #define FSTPLm(D,B,I,S) _ESCmi(D,B,I,S,035)
1912     #define FSTPTm(D,B,I,S) _ESCmi(D,B,I,S,073)
1913    
1914     #define FADDr0(R) _OOr(0xd8c0,_rN(R))
1915     #define FADD0r(R) _OOr(0xdcc0,_rN(R))
1916     #define FADDP0r(R) _OOr(0xdec0,_rN(R))
1917     #define FADDSm(D,B,I,S) _ESCmi(D,B,I,S,000)
1918     #define FADDLm(D,B,I,S) _ESCmi(D,B,I,S,004)
1919    
1920     #define FSUBSm(D,B,I,S) _ESCmi(D,B,I,S,040)
1921     #define FSUBLm(D,B,I,S) _ESCmi(D,B,I,S,044)
1922     #define FSUBr0(R) _OOr(0xd8e0,_rN(R))
1923     #define FSUB0r(R) _OOr(0xdce8,_rN(R))
1924     #define FSUBP0r(R) _OOr(0xdee8,_rN(R))
1925    
1926     #define FSUBRr0(R) _OOr(0xd8e8,_rN(R))
1927     #define FSUBR0r(R) _OOr(0xdce0,_rN(R))
1928     #define FSUBRP0r(R) _OOr(0xdee0,_rN(R))
1929     #define FSUBRSm(D,B,I,S) _ESCmi(D,B,I,S,050)
1930     #define FSUBRLm(D,B,I,S) _ESCmi(D,B,I,S,054)
1931    
1932     #define FMULr0(R) _OOr(0xd8c8,_rN(R))
1933     #define FMUL0r(R) _OOr(0xdcc8,_rN(R))
1934     #define FMULP0r(R) _OOr(0xdec8,_rN(R))
1935     #define FMULSm(D,B,I,S) _ESCmi(D,B,I,S,010)
1936     #define FMULLm(D,B,I,S) _ESCmi(D,B,I,S,014)
1937    
1938     #define FDIVr0(R) _OOr(0xd8f0,_rN(R))
1939     #define FDIV0r(R) _OOr(0xdcf8,_rN(R))
1940     #define FDIVP0r(R) _OOr(0xdef8,_rN(R))
1941     #define FDIVSm(D,B,I,S) _ESCmi(D,B,I,S,060)
1942     #define FDIVLm(D,B,I,S) _ESCmi(D,B,I,S,064)
1943    
1944     #define FDIVRr0(R) _OOr(0xd8f8,_rN(R))
1945     #define FDIVR0r(R) _OOr(0xdcf0,_rN(R))
1946     #define FDIVRP0r(R) _OOr(0xdef0,_rN(R))
1947     #define FDIVRSm(D,B,I,S) _ESCmi(D,B,I,S,070)
1948     #define FDIVRLm(D,B,I,S) _ESCmi(D,B,I,S,074)
1949    
1950     #define FCMOVBr0(R) _OOr(0xdac0,_rN(R))
1951     #define FCMOVBEr0(R) _OOr(0xdad0,_rN(R))
1952     #define FCMOVEr0(R) _OOr(0xdac8,_rN(R))
1953     #define FCMOVNBr0(R) _OOr(0xdbc0,_rN(R))
1954     #define FCMOVNBEr0(R) _OOr(0xdbd0,_rN(R))
1955     #define FCMOVNEr0(R) _OOr(0xdbc8,_rN(R))
1956     #define FCMOVNUr0(R) _OOr(0xdbd8,_rN(R))
1957     #define FCMOVUr0(R) _OOr(0xdad8,_rN(R))
1958     #define FCOMIr0(R) _OOr(0xdbf0,_rN(R))
1959     #define FCOMIPr0(R) _OOr(0xdff0,_rN(R))
1960    
1961     #define FCOMr(R) _OOr(0xd8d0,_rN(R))
1962     #define FCOMSm(D,B,I,S) _ESCmi(D,B,I,S,020)
1963     #define FCOMLm(D,B,I,S) _ESCmi(D,B,I,S,024)
1964    
1965     #define FCOMPr(R) _OOr(0xd8d8,_rN(R))
1966     #define FCOMPSm(D,B,I,S) _ESCmi(D,B,I,S,030)
1967     #define FCOMPLm(D,B,I,S) _ESCmi(D,B,I,S,034)
1968    
1969     #define FUCOMIr0(R) _OOr(0xdbe8,_rN(R))
1970     #define FUCOMIPr0(R) _OOr(0xdfe8,_rN(R))
1971     #define FUCOMPr(R) _OOr(0xdde8,_rN(R))
1972     #define FUCOMr(R) _OOr(0xdde0,_rN(R))
1973    
1974     #define FIADDLm(D,B,I,S) _ESCmi(D,B,I,S,002)
1975     #define FICOMLm(D,B,I,S) _ESCmi(D,B,I,S,022)
1976     #define FICOMPLm(D,B,I,S) _ESCmi(D,B,I,S,032)
1977     #define FIDIVLm(D,B,I,S) _ESCmi(D,B,I,S,062)
1978     #define FIDIVRLm(D,B,I,S) _ESCmi(D,B,I,S,072)
1979     #define FILDLm(D,B,I,S) _ESCmi(D,B,I,S,003)
1980     #define FILDQm(D,B,I,S) _ESCmi(D,B,I,S,057)
1981     #define FIMULLm(D,B,I,S) _ESCmi(D,B,I,S,012)
1982     #define FISTLm(D,B,I,S) _ESCmi(D,B,I,S,023)
1983     #define FISTPLm(D,B,I,S) _ESCmi(D,B,I,S,033)
1984     #define FISTPQm(D,B,I,S) _ESCmi(D,B,I,S,077)
1985     #define FISUBLm(D,B,I,S) _ESCmi(D,B,I,S,042)
1986     #define FISUBRLm(D,B,I,S) _ESCmi(D,B,I,S,052)
1987    
1988     #define FREEr(R) _OOr(0xddc0,_rN(R))
1989     #define FXCHr(R) _OOr(0xd9c8,_rN(R))
1990 gbeauche 1.1
1991     #endif /* X86_RTASM_H */