rtasm: add minimal x86-64 support and new instructions (v5)

Changes in v5:
- Add sse2_movdqa

Changes in v4:
- Use _WIN64 instead of WIN64

Changes in v3:
- Add target and target caps functions, so that they could be different in
  principle from the current CPU and they don't need #ifs to check

Changes in v2:
- Win64 support (untested)
- Use u_cpu_detect.h constants instead of #ifs

This commit adds minimal x86-64 support: only movs between registers
are supported for r8-r15, and x64_rexw() must be used to ask for 64-bit
operations.

It also adds several new instructions for the new translate_sse code.

movdqa
This commit is contained in:
Luca Barbieri 2010-08-12 18:27:02 +02:00
parent 4a4e29a9ab
commit a3e6e50544
3 changed files with 551 additions and 40 deletions

View file

@ -30,7 +30,7 @@
#include "rtasm_cpu.h"
#if defined(PIPE_ARCH_X86)
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
static boolean rtasm_sse_enabled(void)
{
static boolean firsttime = 1;
@ -49,7 +49,7 @@ static boolean rtasm_sse_enabled(void)
int rtasm_cpu_has_sse(void)
{
/* FIXME: actually detect this at run-time */
#if defined(PIPE_ARCH_X86)
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
return rtasm_sse_enabled();
#else
return 0;
@ -59,7 +59,7 @@ int rtasm_cpu_has_sse(void)
int rtasm_cpu_has_sse2(void)
{
/* FIXME: actually detect this at run-time */
#if defined(PIPE_ARCH_X86)
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
return rtasm_sse_enabled();
#else
return 0;

View file

@ -22,8 +22,9 @@
**************************************************************************/
#include "pipe/p_config.h"
#include "util/u_cpu_detect.h"
#if defined(PIPE_ARCH_X86)
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
@ -231,6 +232,10 @@ static void emit_modrm( struct x86_function *p,
assert(reg.mod == mod_REG);
/* TODO: support extended x86-64 registers */
assert(reg.idx < 8);
assert(regmem.idx < 8);
val |= regmem.mod << 6; /* mod field */
val |= reg.idx << 3; /* reg field */
val |= regmem.idx; /* r/m field */
@ -363,6 +368,12 @@ int x86_get_label( struct x86_function *p )
*/
void x64_rexw(struct x86_function *p)
{
if(x86_target(p) != X86_32)
emit_1ub(p, 0x48);
}
void x86_jcc( struct x86_function *p,
enum x86_cc cc,
int label )
@ -449,6 +460,52 @@ void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
emit_1i(p, imm);
}
void x86_mov_imm( struct x86_function *p, struct x86_reg dst, int imm )
{
DUMP_RI( dst, imm );
if(dst.mod == mod_REG)
x86_mov_reg_imm(p, dst, imm);
else
{
emit_1ub(p, 0xc7);
emit_modrm_noreg(p, 0, dst);
emit_1i(p, imm);
}
}
void x86_mov16_imm( struct x86_function *p, struct x86_reg dst, uint16_t imm )
{
DUMP_RI( dst, imm );
emit_1ub(p, 0x66);
if(dst.mod == mod_REG)
{
emit_1ub(p, 0xb8 + dst.idx);
emit_2ub(p, imm & 0xff, imm >> 8);
}
else
{
emit_1ub(p, 0xc7);
emit_modrm_noreg(p, 0, dst);
emit_2ub(p, imm & 0xff, imm >> 8);
}
}
void x86_mov8_imm( struct x86_function *p, struct x86_reg dst, uint8_t imm )
{
DUMP_RI( dst, imm );
if(dst.mod == mod_REG)
{
emit_1ub(p, 0xb0 + dst.idx);
emit_1ub(p, imm);
}
else
{
emit_1ub(p, 0xc6);
emit_modrm_noreg(p, 0, dst);
emit_1ub(p, imm);
}
}
/**
* Immediate group 1 instructions.
*/
@ -520,7 +577,7 @@ void x86_push( struct x86_function *p,
}
p->stack_offset += 4;
p->stack_offset += sizeof(void*);
}
void x86_push_imm32( struct x86_function *p,
@ -530,7 +587,7 @@ void x86_push_imm32( struct x86_function *p,
emit_1ub(p, 0x68);
emit_1i(p, imm32);
p->stack_offset += 4;
p->stack_offset += sizeof(void*);
}
@ -540,23 +597,33 @@ void x86_pop( struct x86_function *p,
DUMP_R( reg );
assert(reg.mod == mod_REG);
emit_1ub(p, 0x58 + reg.idx);
p->stack_offset -= 4;
p->stack_offset -= sizeof(void*);
}
void x86_inc( struct x86_function *p,
struct x86_reg reg )
{
DUMP_R( reg );
assert(reg.mod == mod_REG);
emit_1ub(p, 0x40 + reg.idx);
if(x86_target(p) == X86_32 && reg.mod == mod_REG)
{
emit_1ub(p, 0x40 + reg.idx);
return;
}
emit_1ub(p, 0xff);
emit_modrm_noreg(p, 0, reg);
}
void x86_dec( struct x86_function *p,
struct x86_reg reg )
{
DUMP_R( reg );
assert(reg.mod == mod_REG);
emit_1ub(p, 0x48 + reg.idx);
if(x86_target(p) == X86_32 && reg.mod == mod_REG)
{
emit_1ub(p, 0x48 + reg.idx);
return;
}
emit_1ub(p, 0xff);
emit_modrm_noreg(p, 1, reg);
}
void x86_ret( struct x86_function *p )
@ -583,6 +650,65 @@ void x86_mov( struct x86_function *p,
struct x86_reg src )
{
DUMP_RR( dst, src );
/* special hack for reading arguments until we support x86-64 registers everywhere */
if(src.mod == mod_REG && dst.mod == mod_REG && (src.idx >= 8 || dst.idx >= 8))
{
uint8_t rex = 0x40;
if(dst.idx >= 8)
{
rex |= 4;
dst.idx -= 8;
}
if(src.idx >= 8)
{
rex |= 1;
src.idx -= 8;
}
emit_1ub(p, rex);
}
emit_op_modrm( p, 0x8b, 0x89, dst, src );
}
void x86_mov16( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
DUMP_RR( dst, src );
emit_1ub(p, 0x66);
emit_op_modrm( p, 0x8b, 0x89, dst, src );
}
void x86_mov8( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
DUMP_RR( dst, src );
emit_op_modrm( p, 0x8a, 0x88, dst, src );
}
void x64_mov64( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
uint8_t rex = 0x48;
DUMP_RR( dst, src );
assert(x86_target(p) != X86_32);
/* special hack for reading arguments until we support x86-64 registers everywhere */
if(src.mod == mod_REG && dst.mod == mod_REG && (src.idx >= 8 || dst.idx >= 8))
{
if(dst.idx >= 8)
{
rex |= 4;
dst.idx -= 8;
}
if(src.idx >= 8)
{
rex |= 1;
src.idx -= 8;
}
}
emit_1ub(p, rex);
emit_op_modrm( p, 0x8b, 0x89, dst, src );
}
@ -694,6 +820,61 @@ void x86_div( struct x86_function *p,
emit_op_modrm(p, 0xf7, 0, x86_make_reg(file_REG32, 6), src);
}
void x86_bswap( struct x86_function *p, struct x86_reg reg )
{
DUMP_R(reg);
assert(reg.file == file_REG32);
assert(reg.mod == mod_REG);
emit_2ub(p, 0x0f, 0xc8 + reg.idx);
}
void x86_shr_imm( struct x86_function *p, struct x86_reg reg, unsigned imm )
{
DUMP_RI(reg, imm);
if(imm == 1)
{
emit_1ub(p, 0xd1);
emit_modrm_noreg(p, 5, reg);
}
else
{
emit_1ub(p, 0xc1);
emit_modrm_noreg(p, 5, reg);
emit_1ub(p, imm);
}
}
void x86_sar_imm( struct x86_function *p, struct x86_reg reg, unsigned imm )
{
DUMP_RI(reg, imm);
if(imm == 1)
{
emit_1ub(p, 0xd1);
emit_modrm_noreg(p, 7, reg);
}
else
{
emit_1ub(p, 0xc1);
emit_modrm_noreg(p, 7, reg);
emit_1ub(p, imm);
}
}
void x86_shl_imm( struct x86_function *p, struct x86_reg reg, unsigned imm )
{
DUMP_RI(reg, imm);
if(imm == 1)
{
emit_1ub(p, 0xd1);
emit_modrm_noreg(p, 4, reg);
}
else
{
emit_1ub(p, 0xc1);
emit_modrm_noreg(p, 4, reg);
emit_1ub(p, imm);
}
}
/***********************************************************************
@ -1027,6 +1208,77 @@ void sse_movmskps( struct x86_function *p,
* SSE2 instructions
*/
void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0x66, 0x0f);
if(dst.mod == mod_REG && dst.file == file_REG32)
{
emit_1ub(p, 0x7e);
emit_modrm(p, src, dst);
}
else
{
emit_op_modrm(p, 0x6e, 0x7e, dst, src);
}
}
void sse2_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
switch (dst.mod) {
case mod_REG:
emit_3ub(p, 0xf3, 0x0f, 0x7e);
emit_modrm(p, dst, src);
break;
case mod_INDIRECT:
case mod_DISP32:
case mod_DISP8:
assert(src.mod == mod_REG);
emit_3ub(p, 0x66, 0x0f, 0xd6);
emit_modrm(p, src, dst);
break;
default:
assert(0);
break;
}
}
void sse2_movdqu( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0xf3, 0x0f);
emit_op_modrm(p, 0x6f, 0x7f, dst, src);
}
void sse2_movdqa( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0x66, 0x0f);
emit_op_modrm(p, 0x6f, 0x7f, dst, src);
}
void sse2_movsd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0xf2, 0x0f);
emit_op_modrm(p, 0x10, 0x11, dst, src);
}
void sse2_movupd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0x66, 0x0f);
emit_op_modrm(p, 0x10, 0x11, dst, src);
}
void sse2_movapd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_2ub(p, 0x66, 0x0f);
emit_op_modrm(p, 0x28, 0x29, dst, src);
}
/**
* Perform a reduced swizzle:
*/
@ -1041,6 +1293,28 @@ void sse2_pshufd( struct x86_function *p,
emit_1ub(p, shuf);
}
void sse2_pshuflw( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src,
unsigned char shuf)
{
DUMP_RRI( dst, src, shuf );
emit_3ub(p, 0xf2, X86_TWOB, 0x70);
emit_modrm(p, dst, src);
emit_1ub(p, shuf);
}
void sse2_pshufhw( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src,
unsigned char shuf)
{
DUMP_RRI( dst, src, shuf );
emit_3ub(p, 0xf3, X86_TWOB, 0x70);
emit_modrm(p, dst, src);
emit_1ub(p, shuf);
}
void sse2_cvttps2dq( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
@ -1059,6 +1333,24 @@ void sse2_cvtps2dq( struct x86_function *p,
emit_modrm( p, dst, src );
}
void sse2_cvtsd2ss( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
DUMP_RR( dst, src );
emit_3ub(p, 0xf2, 0x0f, 0x5a);
emit_modrm( p, dst, src );
}
void sse2_cvtpd2ps( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
DUMP_RR( dst, src );
emit_3ub(p, 0x66, 0x0f, 0x5a);
emit_modrm( p, dst, src );
}
void sse2_packssdw( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
@ -1095,6 +1387,97 @@ void sse2_punpcklbw( struct x86_function *p,
emit_modrm( p, dst, src );
}
void sse2_punpcklwd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR( dst, src );
emit_3ub(p, 0x66, 0x0f, 0x61);
emit_modrm( p, dst, src );
}
void sse2_punpckldq( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR( dst, src );
emit_3ub(p, 0x66, 0x0f, 0x62);
emit_modrm( p, dst, src );
}
void sse2_punpcklqdq( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR( dst, src );
emit_3ub(p, 0x66, 0x0f, 0x6c);
emit_modrm( p, dst, src );
}
void sse2_psllw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x71);
emit_modrm_noreg(p, 6, dst);
emit_1ub(p, imm);
}
void sse2_pslld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x72);
emit_modrm_noreg(p, 6, dst);
emit_1ub(p, imm);
}
void sse2_psllq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x73);
emit_modrm_noreg(p, 6, dst);
emit_1ub(p, imm);
}
void sse2_psrlw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x71);
emit_modrm_noreg(p, 2, dst);
emit_1ub(p, imm);
}
void sse2_psrld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x72);
emit_modrm_noreg(p, 2, dst);
emit_1ub(p, imm);
}
void sse2_psrlq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x73);
emit_modrm_noreg(p, 2, dst);
emit_1ub(p, imm);
}
void sse2_psraw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x71);
emit_modrm_noreg(p, 4, dst);
emit_1ub(p, imm);
}
void sse2_psrad_imm( struct x86_function *p, struct x86_reg dst, unsigned imm )
{
DUMP_RI(dst, imm);
emit_3ub(p, 0x66, 0x0f, 0x72);
emit_modrm_noreg(p, 4, dst);
emit_1ub(p, imm);
}
void sse2_por( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
{
DUMP_RR(dst, src);
emit_3ub(p, 0x66, 0x0f, 0xeb);
emit_modrm(p, dst, src);
}
void sse2_rcpps( struct x86_function *p,
struct x86_reg dst,
@ -1114,18 +1497,6 @@ void sse2_rcpss( struct x86_function *p,
emit_modrm( p, dst, src );
}
void sse2_movd( struct x86_function *p,
struct x86_reg dst,
struct x86_reg src )
{
DUMP_RR( dst, src );
emit_2ub(p, 0x66, X86_TWOB);
emit_op_modrm( p, 0x6e, 0x7e, dst, src );
}
/***********************************************************************
* x87 instructions
*/
@ -1716,23 +2087,79 @@ void x86_cdecl_caller_pop_regs( struct x86_function *p )
}
/* Retreive a reference to one of the function arguments, taking into
* account any push/pop activity:
*/
struct x86_reg x86_fn_arg( struct x86_function *p,
unsigned arg )
unsigned arg )
{
return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
switch(x86_target(p))
{
case X86_64_WIN64_ABI:
/* Microsoft uses a different calling convention than the rest of the world */
switch(arg)
{
case 1:
return x86_make_reg(file_REG32, reg_CX);
case 2:
return x86_make_reg(file_REG32, reg_DX);
case 3:
return x86_make_reg(file_REG32, reg_R8);
case 4:
return x86_make_reg(file_REG32, reg_R9);
default:
return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
p->stack_offset + (arg - 4) * 8); /* ??? */
}
case X86_64_STD_ABI:
switch(arg)
{
case 1:
return x86_make_reg(file_REG32, reg_DI);
case 2:
return x86_make_reg(file_REG32, reg_SI);
case 3:
return x86_make_reg(file_REG32, reg_DX);
case 4:
return x86_make_reg(file_REG32, reg_CX);
case 5:
return x86_make_reg(file_REG32, reg_R8);
case 6:
return x86_make_reg(file_REG32, reg_R9);
default:
return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
p->stack_offset + (arg - 6) * 8); /* ??? */
}
case X86_32:
return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
p->stack_offset + arg * 4); /* ??? */
default:
abort();
}
}
static void x86_init_func_common( struct x86_function *p )
{
util_cpu_detect();
p->caps = 0;
if(util_cpu_caps.has_mmx)
p->caps |= X86_MMX;
if(util_cpu_caps.has_mmx2)
p->caps |= X86_MMX2;
if(util_cpu_caps.has_sse)
p->caps |= X86_SSE;
if(util_cpu_caps.has_sse2)
p->caps |= X86_SSE2;
if(util_cpu_caps.has_sse3)
p->caps |= X86_SSE3;
if(util_cpu_caps.has_sse4_1)
p->caps |= X86_SSE4_1;
p->csr = p->store;
DUMP_START();
}
void x86_init_func( struct x86_function *p )
{
p->size = 0;
p->store = NULL;
p->csr = p->store;
DUMP_START();
x86_init_func_common(p);
}
void x86_init_func_size( struct x86_function *p, unsigned code_size )
@ -1742,8 +2169,7 @@ void x86_init_func_size( struct x86_function *p, unsigned code_size )
if (p->store == NULL) {
p->store = p->error_overflow;
}
p->csr = p->store;
DUMP_START();
x86_init_func_common(p);
}
void x86_release_func( struct x86_function *p )

View file

@ -26,20 +26,28 @@
#include "pipe/p_config.h"
#if defined(PIPE_ARCH_X86)
#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
/* It is up to the caller to ensure that instructions issued are
* suitable for the host cpu. There are no checks made in this module
* for mmx/sse/sse2 support on the cpu.
*/
struct x86_reg {
unsigned file:3;
unsigned idx:3;
unsigned file:2;
unsigned idx:4;
unsigned mod:2; /* mod_REG if this is just a register */
int disp:24; /* only +/- 23bits of offset - should be enough... */
};
#define X86_MMX 1
#define X86_MMX2 2
#define X86_SSE 4
#define X86_SSE2 8
#define X86_SSE3 0x10
#define X86_SSE4_1 0x20
struct x86_function {
unsigned caps;
unsigned size;
unsigned char *store;
unsigned char *csr;
@ -75,7 +83,15 @@ enum x86_reg_name {
reg_SP,
reg_BP,
reg_SI,
reg_DI
reg_DI,
reg_R8,
reg_R9,
reg_R10,
reg_R11,
reg_R12,
reg_R13,
reg_R14,
reg_R15
};
@ -110,6 +126,29 @@ typedef void (*x86_func)(void);
/* Begin/end/retrieve function creation:
*/
enum x86_target
{
X86_32,
X86_64_STD_ABI,
X86_64_WIN64_ABI
};
/* make this read a member of x86_function if target != host is desired */
static INLINE enum x86_target x86_target( struct x86_function* p )
{
#ifdef PIPE_ARCH_X86
return X86_32;
#elif defined(_WIN64)
return X86_64_WIN64_ABI;
#elif defined(PIPE_ARCH_X86_64)
return X86_64_STD_ABI;
#endif
}
static INLINE unsigned x86_target_caps( struct x86_function* p )
{
return p->caps;
}
void x86_init_func( struct x86_function *p );
void x86_init_func_size( struct x86_function *p, unsigned code_size );
@ -138,6 +177,8 @@ struct x86_reg x86_get_base_reg( struct x86_reg reg );
*/
int x86_get_label( struct x86_function *p );
void x64_rexw(struct x86_function *p);
void x86_jcc( struct x86_function *p,
enum x86_cc cc,
int label );
@ -178,18 +219,54 @@ void mmx_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void mmx_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void mmx_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movdqu( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movdqa( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movsd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movupd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movapd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvttps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtdq2ps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtsd2ss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_cvtpd2ps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packsswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_pshufd( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_pshuflw( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_pshufhw( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
unsigned char shuf );
void sse2_rcpps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_rcpss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklbw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklwd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpckldq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_punpcklqdq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_psllw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_pslld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psllq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrlw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrld_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrlq_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psraw_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_psrad_imm( struct x86_function *p, struct x86_reg dst, unsigned imm );
void sse2_por( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse2_pshuflw( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse2_pshufhw( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse2_pshufd( struct x86_function *p, struct x86_reg dst, struct x86_reg src, uint8_t imm );
void sse_prefetchnta( struct x86_function *p, struct x86_reg ptr);
void sse_prefetch0( struct x86_function *p, struct x86_reg ptr);
@ -227,7 +304,6 @@ void sse_shufps( struct x86_function *p, struct x86_reg dest, struct x86_reg arg
void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_pmovmskb( struct x86_function *p, struct x86_reg dest, struct x86_reg src );
void sse2_punpcklbw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void sse_movmskps( struct x86_function *p, struct x86_reg dst, struct x86_reg src);
void x86_add( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
@ -237,8 +313,14 @@ void x86_dec( struct x86_function *p, struct x86_reg reg );
void x86_inc( struct x86_function *p, struct x86_reg reg );
void x86_lea( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx8( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx16( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x64_mov64( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov8( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov16( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx8(struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_movzx16(struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_mov_imm(struct x86_function *p, struct x86_reg dst, int imm );
void x86_mov8_imm(struct x86_function *p, struct x86_reg dst, uint8_t imm );
void x86_mov16_imm(struct x86_function *p, struct x86_reg dst, uint16_t imm );
void x86_mul( struct x86_function *p, struct x86_reg src );
void x86_imul( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_or( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
@ -252,7 +334,10 @@ void x86_test( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_xor( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
void x86_sahf( struct x86_function *p );
void x86_div( struct x86_function *p, struct x86_reg src );
void x86_bswap( struct x86_function *p, struct x86_reg src );
void x86_shr_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_sar_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_shl_imm( struct x86_function *p, struct x86_reg reg, unsigned imm );
void x86_cdecl_caller_push_regs( struct x86_function *p );
void x86_cdecl_caller_pop_regs( struct x86_function *p );