summaryrefslogtreecommitdiff
path: root/sux.h
diff options
context:
space:
mode:
Diffstat (limited to 'sux.h')
-rw-r--r--sux.h804
1 files changed, 745 insertions, 59 deletions
diff --git a/sux.h b/sux.h
index f719b03..9d7bcb3 100644
--- a/sux.h
+++ b/sux.h
@@ -44,7 +44,7 @@ extern pthread_cond_t main_cond;
#if debug
extern void print_regs(struct sux *cpu, uint8_t lines, uint8_t thread);
-extern void disasm(struct sux *cpu, uint8_t lines, uint8_t opcode, uint8_t prefix, uint8_t thread);
+extern void disasm(struct sux *cpu, uint8_t lines, uint8_t opcode, uint8_t prefix, uint8_t ext_prefix, uint8_t prefix2, uint8_t thread);
#endif
/*#define KEYBUF_SIZE 0x40
@@ -59,6 +59,11 @@ static inline uint8_t get_addrsize(uint8_t prefix, uint8_t addrmode) {
uint8_t id = (prefix & 0x0C) >> 2;
switch (addrmode) {
case ZM:
+ case ZMX:
+ case ZMY:
+ case IND:
+ case INDX:
+ case INDY:
switch (id) {
case 2: return 5;
case 3: return 3;
@@ -67,6 +72,11 @@ static inline uint8_t get_addrsize(uint8_t prefix, uint8_t addrmode) {
}
break;
case ABS:
+ case ABSX:
+ case ABSY:
+ case AIND:
+ case AINDX:
+ case AINDY:
switch (id) {
case 3: return 7;
case 2: return 6;
@@ -75,7 +85,7 @@ static inline uint8_t get_addrsize(uint8_t prefix, uint8_t addrmode) {
}
break;
}
- return 0;
+ return 0xFF;
}
static inline uint8_t isrw(uint8_t opcode) {
@@ -269,37 +279,26 @@ static inline uint64_t read_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_cl
return address;
}
-static inline uint64_t zmx_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
+static inline uint64_t idx_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t type, uint8_t inc_pc, uint64_t idx_reg) {
#if getclk
cpu->clk += inc_clk;
#endif
- return read_addr(cpu, prefix, inc_clk, ZM, inc_pc) + cpu->x;
+ return read_addr(cpu, prefix, inc_clk, type, inc_pc) + idx_reg;
}
-static inline uint64_t zmy_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
- #if getclk
- cpu->clk += inc_clk;
- #endif
- return read_addr(cpu, prefix, inc_clk, ZM, inc_pc) + cpu->y;
-}
-
-
-static inline uint64_t ind_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
- return read_value(cpu, 0, read_addr(cpu, prefix, inc_clk, ZM, inc_pc), 7, inc_clk, 0);
+static inline uint64_t ind_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t type, uint8_t inc_pc) {
+ return read_value(cpu, 0, read_addr(cpu, prefix, inc_clk, type, inc_pc), 7, inc_clk, 0);
}
-static inline uint64_t indx_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
+static inline uint64_t ind_idx_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t type, uint8_t inc_pc, uint64_t idx_reg, uint8_t pre_idx) {
#if getclk
cpu->clk += inc_clk;
#endif
- return read_value(cpu, 0, read_addr(cpu, prefix, inc_clk, ZM, inc_pc)+cpu->x, 7, inc_clk, 0);
-}
-
-static inline uint64_t indy_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
- #if getclk
- cpu->clk += inc_clk;
- #endif
- return ind_addr(cpu, prefix, inc_clk, inc_pc) + cpu->y;
+ if (pre_idx) {
+ return read_value(cpu, 0, read_addr(cpu, prefix, inc_clk, type, inc_pc)+idx_reg, 7, inc_clk, 0);
+ } else {
+ return ind_addr(cpu, prefix, inc_clk, type, inc_pc) + idx_reg;
+ }
}
static inline uint64_t rel_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk, uint8_t inc_pc) {
@@ -321,12 +320,21 @@ static inline uint64_t rel_addr(struct sux *cpu, uint8_t prefix, uint8_t inc_clk
}
}
-static inline uint64_t get_addr(struct sux *cpu, uint8_t opcode, uint8_t prefix, uint8_t inc_pc, uint8_t inc_clk, uint8_t thread) {
+static inline uint64_t get_addr(struct sux *cpu, uint8_t opcode, uint8_t prefix, uint8_t ext_prefix, uint8_t inc_pc, uint8_t inc_clk, uint8_t thread) {
uint64_t address = 0;
- switch (optype[opcode]) {
+ uint8_t type;
+ if ((ext_prefix & 0xD) == 0xD) {
+ switch (ext_prefix >> 4) {
+ case 0x0: type = ext_optype[opcode]; break;
+ }
+ } else {
+ type = optype[opcode];
+ }
+ switch (type) {
case BREG:
case IMPL:
break;
+ case EIND: address = cpu->e; break;
case IMM:
address = imm_addr(cpu);
switch (opcode) {
@@ -335,34 +343,41 @@ static inline uint64_t get_addr(struct sux *cpu, uint8_t opcode, uint8_t prefix,
case ROL_IMM:
case ROR_IMM:
case ASR_IMM:
- if (inc_pc) {
- ++cpu->pc;
+ if ((ext_prefix & 0xD) != 0xD) {
+ if (inc_pc) {
+ ++cpu->pc;
+ }
+ break;
}
- break;
+ /* Falls Through. */
default:
if (inc_pc) {
cpu->pc+=(1 << ((prefix >> 4) & 3));
}
- /* Falls Through. */
- case TXS_IMM: break;
+ break;
}
break;
- case ZM : return read_addr(cpu, prefix, inc_clk, ZM, inc_pc);
- case ABS : return read_addr(cpu, prefix, inc_clk, ABS, inc_pc);
- case ZMX : return zmx_addr(cpu, prefix, inc_clk, /**/ inc_pc);
- case ZMY : return zmy_addr(cpu, prefix, inc_clk, /**/ inc_pc);
- case IND : return ind_addr(cpu, prefix, inc_clk, /**/ inc_pc);
- case INDX: return indx_addr(cpu, prefix, inc_clk, /**/ inc_pc);
- case INDY: return indy_addr(cpu, prefix, inc_clk, /**/ inc_pc);
- case REL : return rel_addr(cpu, prefix, inc_clk, /**/ inc_pc);
+ case ZM : return read_addr(cpu, prefix, inc_clk, ZM, inc_pc);
+ case ZMX : return idx_addr(cpu, prefix, inc_clk, ZM, inc_pc, cpu->x);
+ case ZMY : return idx_addr(cpu, prefix, inc_clk, ZM, inc_pc, cpu->y);
+ case IND : return ind_addr(cpu, prefix, inc_clk, ZM, inc_pc);
+ case INDX : return ind_idx_addr(cpu, prefix, inc_clk, ZM, inc_pc, cpu->x, 1);
+ case INDY : return ind_idx_addr(cpu, prefix, inc_clk, ZM, inc_pc, cpu->y, 0);
+ case ABS : return read_addr(cpu, prefix, inc_clk, ABS, inc_pc);
+ case ABSX : return idx_addr(cpu, prefix, inc_clk, ABS, inc_pc, cpu->x);
+ case ABSY : return idx_addr(cpu, prefix, inc_clk, ABS, inc_pc, cpu->y);
+ case AIND : return ind_addr(cpu, prefix, inc_clk, ABS, inc_pc);
+ case AINDX: return ind_idx_addr(cpu, prefix, inc_clk, ABS, inc_pc, cpu->x, 1);
+ case AINDY: return ind_idx_addr(cpu, prefix, inc_clk, ABS, inc_pc, cpu->y, 0);
+ case REL : return rel_addr(cpu, prefix, inc_clk, /**/ inc_pc);
}
return address;
}
-static inline uint64_t adc(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
- uint64_t sum = reg+value+getflag(C);
+static inline uint64_t adc(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t carry, uint8_t thread) {
+ uint64_t sum = reg+value+carry;
setflag(sum == 0, Z);
setflag((sum >> 63), N);
setflag(((reg^value) >> 63) && ((reg^sum) >> 63), V);
@@ -406,55 +421,65 @@ static inline uint64_t xor(struct sux *cpu, uint64_t reg, uint64_t value, uint8_
return reg;
}
-static inline uint64_t lsl(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
- uint64_t sum = (value < 64) ? reg << value : 0;
+static inline uint64_t lsl(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
+ uint64_t sum = (value < msb) ? reg << value : 0;
setflag(sum == 0, Z);
- setflag(sum >> 63, N);
- setflag(reg >> (64-value), C);
+ setflag(sum >> (msb-1), N);
+ setflag(reg >> (msb-value), C);
return sum;
}
-static inline uint64_t lsr(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
- uint64_t sum = (value < 64) ? reg >> value : 0;
+static inline uint64_t lsr(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
+ uint64_t sum = (value < msb) ? reg >> value : 0;
setflag(sum == 0, Z);
- setflag(sum >> 63, N);
+ setflag(sum >> (msb-1), N);
setflag(reg & 1, C);
return sum;
}
-static inline uint64_t asr(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
- uint8_t sign = reg >> 63;
- uint64_t sum = (value < 64) ? (reg >> value) | ((uint64_t)sign << 63) : 0;
+static inline uint64_t asr(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
+ uint8_t sign = reg >> (msb-1);
+ uint64_t sum = (value < msb) ? (reg >> value) | ((uint64_t)sign << (msb-1)) : 0;
setflag(sum == 0, Z);
- setflag(sum >> 63, N);
+ setflag(sum >> (msb-1), N);
setflag(reg & 1, C);
return sum;
}
-static inline uint64_t rol(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
+static inline uint64_t rol(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
uint64_t sum;
uint64_t c = getflag(C);
switch (value & 0x3F) {
case 0 : return reg;
case 1 : sum = (reg << 1) | (c & 1); break;
- default: sum = (reg << value) | (c << (value-1)) | (reg >> (65-value)); break;
+ default: sum = (reg << value) | (c << (value-1)) | (reg >> ((msb+1)-value)); break;
}
setflag(sum == 0, Z);
- setflag(sum >> 63, N);
- setflag((reg >> (64-value)) & 1, C);
+ setflag(sum >> (msb-1), N);
+ setflag((reg >> (msb-value)) & 1, C);
return sum;
}
-static inline uint64_t ror(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
+static inline uint64_t ror(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
uint64_t sum;
uint64_t c = getflag(C);
switch (value & 0x3F) {
case 0 : return reg;
- case 1 : sum = (reg >> 1) | (c << 63); break;
- default: sum = (reg >> value) | (c << (64-value)) | (reg << (65-value)); break;
+ case 1 : sum = (reg >> 1) | (c << (msb-1)); break;
+ default: sum = (reg >> value) | (c << (msb-value)) | (reg << ((msb+1)-value)); break;
}
setflag(sum == 0, Z);
- setflag(sum >> 63, N);
+ setflag(sum >> (msb-1), N);
setflag((reg >> (value-1)) & 1, C);
return sum;
}
@@ -474,6 +499,7 @@ static inline uint64_t divd(struct sux *cpu, uint64_t reg, uint64_t value, uint6
setflag((sum >> 63), N);
return sum;
}
+
static inline void cmp(struct sux *cpu, uint64_t value, uint64_t reg, uint8_t thread) {
uint64_t sum = reg-value;
setflag(sum >> 63, N);
@@ -490,6 +516,40 @@ static inline uint64_t idr(struct sux *cpu, uint64_t reg, uint8_t inc, uint8_t t
return reg;
}
+static inline uint64_t lbcnt(struct sux *cpu, uint64_t value, uint8_t bit, uint8_t size, uint8_t thread) {
+ size = (size > 8) ? 8 : size;
+ uint8_t msb = size*8;
+ if ((!bit && !value) || (bit && value == -1)) {
+ return msb;
+ }
+ uint64_t j = 0;
+ for (int i = msb-1; ((value >> i) & 1) == bit; i--, j++);
+ setflag(j == 0, Z);
+ return j;
+}
+
+static inline void bit_test(struct sux *cpu, uint64_t reg, uint64_t value, uint8_t thread) {
+ setflag((value >> 7) & 1, N);
+ setflag((value >> 6) & 1, V);
+ setflag((value & reg) == 0, Z);
+}
+
+static inline uint64_t swap(struct sux *cpu, uint64_t reg, uint8_t size, uint8_t thread) {
+ size = (size > 7) ? 7 : size;
+ uint8_t half = ((size-1)*8) >> 1;
+ uint64_t mask = (-(uint64_t)1 >> ((7 - size) * 8));
+ uint64_t lo_mask = mask >> half;
+ uint64_t hi_mask = (mask << half) & mask;
+ return (((reg >> half) & lo_mask) | ((reg << half) & hi_mask));
+
+}
+
+static inline uint64_t popcnt(struct sux *cpu, uint64_t value, uint8_t thread) {
+ uint64_t count = 0;
+ for (; value; count++, value &= value - 1);
+ return count;
+}
+
/* Increment, or Decrement memory. */
static inline void idm(struct sux *cpu, uint64_t address, uint8_t prefix, uint8_t inc, uint8_t thread) {
uint8_t size = (1 << ((prefix >> 4) & 3))-1;
@@ -508,6 +568,47 @@ static inline void idm(struct sux *cpu, uint64_t address, uint8_t prefix, uint8_
write_value(cpu, value, address, size, 1, 1);
}
+static inline void bitshft_mem(struct sux *cpu, uint8_t shft_type, uint64_t shft_cnt, uint64_t address, uint8_t prefix, uint8_t thread) {
+ uint8_t size = (1 << ((prefix >> 4) & 3))-1;
+ uint64_t value = read_value(cpu, 0, address, size, 1, 0);
+ switch (shft_type) {
+ case 0: value = lsl(cpu, value, shft_cnt, size+1, thread); break;
+ case 1: value = lsr(cpu, value, shft_cnt, size+1, thread); break;
+ case 2: value = rol(cpu, value, shft_cnt, size+1, thread); break;
+ case 3: value = ror(cpu, value, shft_cnt, size+1, thread); break;
+ case 4: value = asr(cpu, value, shft_cnt, size+1, thread); break;
+ }
+ write_value(cpu, value, address, size, 1, 1);
+}
+
+static inline void not_mem(struct sux *cpu, uint64_t address, uint8_t prefix, uint8_t thread) {
+ uint8_t size = (1 << ((prefix >> 4) & 3))-1;
+ write_value(cpu, ~read_value(cpu, 0, address, size, 1, 0), address, size, 1, 1);
+}
+
+static inline void lbcnt_mem(struct sux *cpu, uint64_t address, uint8_t bit, uint8_t size, uint8_t thread) {
+ uint64_t value = read_value(cpu, 0, address, size, 1, 0);
+ write_value(cpu, lbcnt(cpu, value, bit, size, thread), address, size, 1, 1);
+}
+
+static inline void swap_mem(struct sux *cpu, uint64_t address, uint8_t size, uint8_t thread) {
+ uint64_t value = read_value(cpu, 0, address, size, 1, 0);
+ write_value(cpu, swap(cpu, value, size, thread), address, size, 1, 1);
+}
+
+static inline uint64_t mem_move(struct sux *cpu, uint64_t n, uint64_t dst, uint64_t src, uint8_t rep, uint8_t size, uint8_t thread) {
+ if (!rep) {
+ uint64_t value = read_value(cpu, 0, src, size, 1, 1);
+ write_value(cpu, value, dst, size, 1, 1);
+ return n-(size+1);
+ } else {
+ if (src < mem_size && dst < mem_size) {
+ memcopy(addr+dst, addr+src, n*(size+1));
+ }
+ return 0;
+ }
+}
+
static inline uint64_t load(struct sux *cpu, uint64_t reg, uint64_t address, uint8_t size, uint8_t thread) {
uint64_t value = read_value(cpu, reg, address, size, 1, 1);
setflag(value == 0, Z);
@@ -519,3 +620,588 @@ static inline void store(struct sux *cpu, uint64_t address, uint64_t reg, uint8_
uint8_t size = (1 << ((prefix >> 4) & 3))-1;
write_value(cpu, reg, address, size, 1, 1);
}
+
+static inline void exec_ext_inst(struct sux *cpu, uint8_t opcode, uint8_t prefix, union reg value, union reg address, uint8_t size, uint8_t thread) {
+ uint8_t addr_size = get_addrsize(ext_optype[opcode], prefix);
+ uint8_t tmp = 0;
+ switch (opcode) {
+ case LEA_AB : /* LEA Absolute. */
+ case LEA_AX : /* LEA Absolute, Indexed with X. */
+ case LEA_AY : /* LEA Absolute, Indexed with Y. */
+ case LEA_AI : /* LEA Absolute Indirect. */
+ case LEA_AIX: /* LEA Absolute Indexed Idirect. */
+ case LEA_AIY: /* LEA Absolute Indirect Indexed. */
+ case LEA_Z : /* LEA Zero Matrix. */
+ case LEA_ZX : /* LEA Zero Matrix, Indexed with X. */
+ case LEA_ZY : /* LEA Zero Matrix, Indexed with Y. */
+ case LEA_IN : /* LEA Indirect. */
+ case LEA_IX : /* LEA Indexed Idirect. */
+ case LEA_IY : /* LEA Indirect Indexed. */
+ cpu->e = address.u64;
+ break;
+ case PEA_AB : /* PEA Absolute. */
+ case PEA_AX : /* PEA Absolute, Indexed with X. */
+ case PEA_AY : /* PEA Absolute, Indexed with Y. */
+ case PEA_AI : /* PEA Absolute Indirect. */
+ case PEA_AIX: /* PEA Absolute Indexed Idirect. */
+ case PEA_AIY: /* PEA Absolute Indirect Indexed. */
+ case PEA_Z : /* PEA Zero Matrix. */
+ case PEA_ZX : /* PEA Zero Matrix, Indexed with X. */
+ case PEA_ZY : /* PEA Zero Matrix, Indexed with Y. */
+ case PEA_IN : /* PEA Indirect. */
+ case PEA_IX : /* PEA Indexed Idirect. */
+ case PEA_IY : /* PEA Indirect Indexed. */
+ push(cpu, address.u64, 7, thread);
+ break;
+ case ADD_IMM: /* ADD Immediate. */
+ case ADD_AB : /* ADD Absolute. */
+ case ADD_Z : /* ADD Zero Matrix. */
+ case ADD_E : /* ADD E Indirect. */
+ cpu->a = adc(cpu, cpu->a, value.u64, 0, thread);
+ break;
+ case SUB_IMM: /* SUB Immediate. */
+ case SUB_AB : /* SUB Absolute. */
+ case SUB_Z : /* SUB Zero Matrix. */
+ case SUB_E : /* SUB E Indirect. */
+ cpu->a = adc(cpu, cpu->a, ~value.u64, 1, thread);
+ break;
+ case ADE_IMM: /* ADE Immediate. */
+ case ADE_AB : /* ADE Absolute. */
+ case ADE_Z : /* ADE Zero Matrix. */
+ cpu->e = adc(cpu, cpu->e, value.u64, 0, thread);
+ break;
+ case SBE_IMM: /* SBE Immediate. */
+ case SBE_AB : /* SBE Absolute. */
+ case SBE_Z : /* SBE Zero Matrix. */
+ cpu->e = adc(cpu, cpu->e, ~value.u64, 1, thread);
+ break;
+ case ADS_IMM: /* ADS Immediate. */
+ case ADS_AB : /* ADS Absolute. */
+ case ADS_Z : /* ADS Zero Matrix. */
+ case ADS_E : /* ADS E Indirect. */
+ cpu->sp = adc(cpu, cpu->sp, value.u64, 0, thread);
+ break;
+ case SBS_IMM: /* SBS Immediate. */
+ case SBS_AB : /* SBS Absolute. */
+ case SBS_Z : /* SBS Zero Matrix. */
+ case SBS_E : /* SBS E Indirect. */
+ cpu->sp = adc(cpu, cpu->sp, ~value.u64, 1, thread);
+ break;
+ case NOT_A : /* NOT Accumulator. */
+ cpu->a = ~cpu->a;
+ break;
+ case NOT_AB: /* NOT Absolute. */
+ case NOT_Z : /* NOT Zero Matrix. */
+ case NOT_E : /* NOT E Indirect. */
+ not_mem(cpu, address.u64, prefix, thread);
+ break;
+ case LLM_AB: /* LLM Absolute. */
+ case LLM_Z : /* LLM Zero Matrix. */
+ case LLM_E : /* LLM E Indirect. */
+ case LRM_AB: /* LRM Absolute. */
+ case LRM_Z : /* LRM Zero Matrix. */
+ case LRM_E : /* LRM E Indirect. */
+ case RLM_AB: /* RLM Absolute. */
+ case RLM_Z : /* RLM Zero Matrix. */
+ case RLM_E : /* RLM E Indirect. */
+ case RRM_AB: /* RRM Absolute. */
+ case RRM_Z : /* RRM Zero Matrix. */
+ case RRM_E : /* RRM E Indirect. */
+ case ARM_AB: /* ARM Absolute. */
+ case ARM_Z : /* ARM Zero Matrix. */
+ case ARM_E : /* ARM E Indirect. */
+ switch (opcode) {
+ case LLM_AB:
+ case LLM_Z :
+ case LLM_E : tmp = 0; break;
+ case LRM_AB:
+ case LRM_Z :
+ case LRM_E : tmp = 1; break;
+ case RLM_AB:
+ case RLM_Z :
+ case RLM_E : tmp = 2; break;
+ case RRM_AB:
+ case RRM_Z :
+ case RRM_E : tmp = 3; break;
+ case ARM_AB:
+ case ARM_Z :
+ case ARM_E : tmp = 4; break;
+ }
+ bitshft_mem(cpu, tmp, cpu->b, address.u64, prefix, thread);
+ break;
+ case PHE_IMP: push(cpu, cpu->e, size, thread); break; /* PusH E register. */
+ case PLE_IMP: pull(cpu, size, thread); break; /* PuLl E register. */
+ case CPE_IMM: /* CPE Immediate. */
+ case CPE_AB : /* CPE Absolute. */
+ case CPE_Z : /* CPE Zero Matrix. */
+ adc(cpu, cpu->e, ~value.u64, 1, thread);
+ break;
+ case ICE_AB : /* ICE Absolute. */
+ case ICE_Z : /* ICE Zero Matrix. */
+ case ICE_E : /* ICE E Indirect. */
+ break;
+ case LDS_IMM: /* LDS Immediate. */
+ case LDS_AB : /* LDS Absolute. */
+ case LDS_Z : /* LDS Zero Matrix. */
+ case LDS_E : /* LDS E Indirect. */
+ cpu->sp = load(cpu, cpu->sp, address.u64, size, thread);
+ break;
+ case DEE_IMP: cpu->e = idr(cpu, cpu->e, 0, thread); break; /* DEcrement E register. */
+ case INE_IMP: cpu->e = idr(cpu, cpu->e, 1, thread); break; /* INcrement E register. */
+ case DES_IMP: cpu->sp = idr(cpu, cpu->sp, 0, thread); break; /* DEcrement Stack pointer. */
+ case INS_IMP: cpu->sp = idr(cpu, cpu->sp, 1, thread); break; /* INcrement Stack pointer. */
+ case STS_AB: /* STS Absolute. */
+ case STS_Z : /* STS Zero Matrix. */
+ case STS_E : /* STS E Indirect. */
+ store(cpu, address.u64, cpu->sp, prefix, thread);
+ break;
+ case STE_AB: /* STE Absolute. */
+ case STE_Z : /* STE Zero Matrix. */
+ store(cpu, address.u64, cpu->sp, prefix, thread);
+ break;
+ case STZ_AB: /* STZ Absolute. */
+ case STZ_Z : /* STZ Zero Matrix. */
+ case STZ_E : /* STZ E Indirect. */
+ store(cpu, address.u64, 0, prefix, thread);
+ break;
+ case SCO_IMM: /* SCO Immediate. */
+ case SCO_AB : /* SCO Absolute. */
+ case SCO_Z : /* SCO Zero Matrix. */
+ case SCO_E : /* SCO E Indirect. */
+ break;
+ case ECO_IMM: /* ECO Immediate. */
+ case ECO_AB : /* ECO Absolute. */
+ case ECO_Z : /* ECO Zero Matrix. */
+ case ECO_E : /* ECO E Indirect. */
+ break;
+ case CLZ_AB: /* CLZ Absolute. */
+ case CLZ_Z : /* CLZ Zero Matrix. */
+ case CLZ_E : /* CLZ E Indirect. */
+ cpu->a = lbcnt(cpu, value.u64, 0, size, thread);
+ break;
+ case CLO_AB: /* CLO Absolute. */
+ case CLO_Z : /* CLO Zero Matrix. */
+ case CLO_E : /* CLO E Indirect. */
+ cpu->a = lbcnt(cpu, value.u64, 1, size, thread);
+ break;
+ case BIT_AB: /* BIT Absolute. */
+ case BIT_Z : /* BIT Zero Matrix. */
+ case BIT_E : /* BIT E Indirect. */
+ bit_test(cpu, cpu->a, value.u64, thread);
+ break;
+ case MMV_IMP: /* Memory MoVe. */
+ cpu->b = mem_move(cpu, cpu->b, cpu->x, cpu->y, 0, size, thread);
+ break;
+ case SWP_A : /* SWaP lower half, with upper half. */
+ cpu->a = swap(cpu, cpu->a, size, thread);
+ break;
+ case SWP_AB: /* SWP Absolute. */
+ case SWP_Z : /* SWP Zero Matrix. */
+ case SWP_E : /* SWP E Indirect. */
+ swap_mem(cpu, address.u64, size, thread);
+ break;
+ case PCN_AB: /* PCN Absolute. */
+ case PCN_Z : /* PCN Zero Matrix. */
+ case PCN_E : /* PCN E Indirect. */
+ cpu->a = popcnt(cpu, value.u64, thread);
+ break;
+ case REP_REL: /* REP Relative. */
+ if (cpu->b != 0) {
+ cpu->b--;
+ cpu->pc = address.u64;
+ }
+ break;
+ case REQ_REL: /* REQ Relative. */
+ if (cpu->b != 0 && getflag(Z)) {
+ cpu->b--;
+ cpu->pc = address.u64;
+ }
+ break;
+ case RNE_REL: /* RNE Relative. */
+ if (cpu->b != 0 && !getflag(Z)) {
+ cpu->b--;
+ cpu->pc = address.u64;
+ }
+ break;
+ case LNG_IMM: /* LNG Immediate. */
+ case LNG_E : /* LNG E Indirect. */
+ if (getflag(N)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case LPO_IMM: /* LPO Immediate. */
+ case LPO_E : /* LPO E Indirect. */
+ if (!getflag(N)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case LCS_IMM: /* LCS Immediate. */
+ case LCS_E : /* LCS E Indirect. */
+ if (getflag(C)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case LCC_IMM: /* LCC Immediate. */
+ case LCC_E : /* LCC E Indirect. */
+ if (!getflag(C)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case LEQ_IMM: /* LEQ Immediate. */
+ case LEQ_E : /* LEQ E Indirect. */
+ if (getflag(Z)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case LNE_IMM: /* LNE Immediate. */
+ case LNE_E : /* LNE E Indirect. */
+ if (!getflag(Z)) {
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ }
+ break;
+ case SNG_E : /* SNG E Indirect. */
+ if (getflag(N)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+ case SPO_E : /* SPO E Indirect. */
+ if (!getflag(N)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+ case SCS_E : /* SCS E Indirect. */
+ if (getflag(C)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+ case SCC_E : /* SCC E Indirect. */
+ if (!getflag(C)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+ case SEQ_E : /* SEQ E Indirect. */
+ if (getflag(Z)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+ case SNE_E : /* SNE E Indirect. */
+ if (!getflag(Z)) {
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ }
+ break;
+
+ }
+}
+
+static inline void exec_base_inst(struct sux *cpu, uint8_t opcode, uint8_t prefix, union reg value, union reg address, uint8_t size, uint8_t thread) {
+ uint64_t *rem = 0;
+ switch (opcode) {
+ case CPS_IMP: /* Clear Processor Status. */
+ cpu->ps.u8[thread] = 0;
+ break;
+ case ADC_B: /* ADC B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case ADC_IMM: /* ADC Immediate. */
+ case ADC_AB: /* ADC Absolute. */
+ case ADC_Z: /* ADC Zero Matrix. */
+ cpu->a = adc(cpu, cpu->a, value.u64, getflag(C), thread);
+ break;
+ case PHP_IMP: push(cpu, cpu->ps.u8[thread], 0, thread); break; /* PusH Processor status to stack. */
+ case PHA_IMP: push(cpu, cpu->a , size, thread); break; /* PusH Accumulator to stack. */
+ case PHB_IMP: push(cpu, cpu->b , size, thread); break; /* PusH B register to stack. */
+ case PHY_IMP: push(cpu, cpu->y , size, thread); break; /* PusH Y register to stack. */
+ case PHX_IMP: push(cpu, cpu->x , size, thread); break; /* PusH X register to stack. */
+ case TAY_IMP: cpu->y = transfer(cpu, cpu->a , value.u64, thread); break; /* Transfer Accumulator to Y. */
+ case TAX_IMP: cpu->x = transfer(cpu, cpu->a , value.u64, thread); break; /* Transfer Accumulator to Y. */
+ case TYX_IMP: cpu->x = transfer(cpu, cpu->y , value.u64, thread); break; /* Transfer Y to X. */
+ case TYA_IMP: cpu->a = transfer(cpu, cpu->y , value.u64, thread); break; /* Transfer Y to Accumulator. */
+ case TXA_IMP: cpu->a = transfer(cpu, cpu->x , value.u64, thread); break; /* Transfer X to Accumulator. */
+ case TXY_IMP: cpu->y = transfer(cpu, cpu->x , value.u64, thread); break; /* Transfer X to Y. */
+ case TAB_IMP: cpu->b = transfer(cpu, cpu->a , value.u64, thread); break; /* Transfer Accumulator to B. */
+ case TSX_IMP: cpu->x = transfer(cpu, cpu->sp, value.u64, thread); break; /* Transfer Stack pointer to X. */
+ case TBA_IMP: cpu->a = transfer(cpu, cpu->b , value.u64, thread); break; /* Transfer B to Accumulator. */
+ case TXS_IMP: cpu->sp = transfer(cpu, cpu->x , value.u64, thread); break; /* Transfer X to Stack pointer. */
+ case BRA_REL: /* BRA Relative. */
+ case JMP_AB: /* JMP Absolute. */
+ case JMP_Z: /* JMP Zero Matrix. */
+ case JMP_IN: /* JMP Indirect. */
+ cpu->pc = address.u64;
+ break;
+ case SBC_B: /* SBC B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case SBC_IMM: /* SBC Immediate. */
+ case SBC_AB: /* SBC Absolute. */
+ case SBC_Z: /* SBC Zero Matrix. */
+ cpu->a = adc(cpu, cpu->a, ~value.u64, getflag(C), thread);
+ break;
+ case PLP_IMP: cpu->ps.u8[thread] = pull(cpu, 0, thread); break; /* PuLl Processor status from stack. */
+ case PLA_IMP: cpu->a = pull(cpu, size, thread); break; /* PuLl Accumulator from stack. */
+ case PLB_IMP: cpu->b = pull(cpu, size, thread); break; /* PuLl B register from stack. */
+ case PLY_IMP: cpu->y = pull(cpu, size, thread); break; /* PuLl Y register from stack. */
+ case PLX_IMP: cpu->x = pull(cpu, size, thread); break; /* PuLl X register from stack. */
+ break;
+ case AND_B: /* AND B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case AND_IMM: /* AND Immediate. */
+ case AND_AB: /* AND Absolute. */
+ case AND_Z: /* AND Zero Matrix. */
+ cpu->a = and(cpu, cpu->a, value.u64, thread);
+ break;
+ case BPO_REL: /* BPO Relative. */
+ if (!getflag(N)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case ORA_B: /* ORA B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case ORA_IMM: /* ORA Immediate. */
+ case ORA_AB: /* ORA Absolute. */
+ case ORA_Z: /* ORA Zero Matrix. */
+ cpu->a = or(cpu, cpu->a, value.u64, thread);
+ break;
+ case SEI_IMP: /* SEt Interrupt. */
+ setflag(1, I);
+ break;
+ case BNG_REL: /* BNG Relative. */
+ if (getflag(N)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case XOR_B: /* XOR B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case XOR_IMM: /* XOR Immediate. */
+ case XOR_AB: /* XOR Absolute. */
+ case XOR_Z: /* XOR Zero Matrix. */
+ cpu->a = xor(cpu, cpu->a, value.u64, thread);
+ break;
+ case CLI_IMP: /* CLear Interrupt. */
+ setflag(0, I);
+ break;
+ case BCS_REL: /* BCS Relative. */
+ if (getflag(C)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case LSL_B: /* LSL B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case LSL_IMM: /* LSL Immediate. */
+ case LSL_AB: /* LSL Absolute. */
+ case LSL_Z: /* LSL Zero Matrix. */
+ cpu->a = lsl(cpu, cpu->a, value.u64, 8, thread);
+ break;
+ case SEC_IMP: /* SEt Carry flag.*/
+ setflag(1, C);
+ break;
+ case STA_AB: /* STA Absolute. */
+ case STA_Z: /* STA Zero Matrix. */
+ case STA_ZX: /* STA Zero Matrix, Indexed with X. */
+ case STA_ZY: /* STA Zero Matrix, Indexed with Y. */
+ case STA_IN: /* STA Indirect. */
+ case STA_IX: /* STA Indexed Indirect. */
+ case STA_IY: /* STA Indirect Indexed. */
+ store(cpu, address.u64, cpu->a, prefix, thread);
+ break;
+ case STY_AB: /* STY Absolute. */
+ case STY_Z: /* STY Zero Matrix. */
+ case STY_IN: /* STY Indirect. */
+ store(cpu, address.u64, cpu->y, prefix, thread);
+ break;
+ case STX_AB: /* STX Absolute. */
+ case STX_Z: /* STX Zero Matrix. */
+ case STX_IN: /* STX Indirect. */
+ store(cpu, address.u64, cpu->x, prefix, thread);
+ break;
+ case STB_AB: /* STB Absolute. */
+ case STB_Z: /* STB Zero Matrix. */
+ case STB_ZX: /* STB Zero Matrix, Indexed with X. */
+ case STB_ZY: /* STB Zero Matrix, Indexed with Y. */
+ case STB_IN: /* STB Indirect. */
+ case STB_IX: /* STB Indexed Indirect. */
+ case STB_IY: /* STB Indirect Indexed. */
+ store(cpu, address.u64, cpu->b, prefix, thread);
+ break;
+ case BCC_REL: /* BCC Relative. */
+ if (!getflag(C)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case LSR_B: /* LSR B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case LSR_IMM: /* LSR Immediate. */
+ case LSR_AB: /* LSR Absolute. */
+ case LSR_Z: /* LSR Zero Matrix. */
+ cpu->a = lsr(cpu, cpu->a, value.u64, 8, thread);
+ break;
+ case ASR_B: /* ASR B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case ASR_IMM: /* ASR Immediate. */
+ case ASR_AB: /* ASR Absolute. */
+ case ASR_Z: /* ASR Zero Matrix. */
+ cpu->a = asr(cpu, cpu->a, value.u64, 8, thread);
+ break;
+ case CLC_IMP: /* CLear Carry flag. */
+ setflag(0, C);
+ break;
+ case LDB_IMM: /* LDB Immediate. */
+ case LDB_AB: /* LDB Absolute. */
+ case LDB_Z: /* LDB Zero Matrix. */
+ case LDB_ZX: /* LDB Zero Matrix, Indexed with X. */
+ case LDB_ZY: /* LDB Zero Matrix, Indexed with Y. */
+ case LDB_IN: /* LDB Indirect. */
+ case LDB_IX: /* LDB Indexed Indirect. */
+ case LDB_IY: /* LDB Indirect Indexed. */
+ cpu->b = load(cpu, cpu->b, address.u64, size, thread);
+ break;
+ case LDA_IMM: /* LDA Immediate. */
+ case LDA_AB: /* LDA Absolute. */
+ case LDA_Z: /* LDA Zero Matrix. */
+ case LDA_ZX: /* LDA Zero Matrix, Indexed with X. */
+ case LDA_ZY: /* LDA Zero Matrix, Indexed with Y. */
+ case LDA_IN: /* LDA Indirect. */
+ case LDA_IX: /* LDA Indexed Indirect. */
+ case LDA_IY: /* LDA Indirect Indexed. */
+ cpu->a = load(cpu, cpu->a, address.u64, size, thread);
+ break;
+ case LDY_IMM: /* LDY Immediate. */
+ case LDY_AB: /* LDY Absolute. */
+ case LDY_Z: /* LDY Zero Matrix. */
+ case LDY_IN: /* LDY Indirect. */
+ cpu->y = load(cpu, cpu->y, address.u64, size, thread);
+ break;
+ case LDX_IMM: /* LDX Immediate. */
+ case LDX_AB: /* LDX Absolute. */
+ case LDX_Z: /* LDX Zero Matrix. */
+ case LDX_IN: /* LDX Indirect. */
+ cpu->x = load(cpu, cpu->x, address.u64, size, thread);
+ break;
+ case BEQ_REL: /* BEQ Relative. */
+ if (getflag(Z)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case ROL_B: /* ROL B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case ROL_IMM: /* ROL Immediate. */
+ case ROL_AB: /* ROL Absolute. */
+ case ROL_Z: /* ROL Zero Matrix. */
+ cpu->a = rol(cpu, cpu->a, value.u64, 8, thread);
+ break;
+ case BNE_REL: /* BNE Relative. */
+ if (!getflag(Z)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case ROR_B: /* ROR B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case ROR_IMM: /* ROR Immediate. */
+ case ROR_AB: /* ROR Absolute. */
+ case ROR_Z: /* ROR Zero Matrix. */
+ cpu->a = ror(cpu, cpu->a, value.u64, 8, thread);
+ break;
+ case BVS_REL: /* BVS Relative. */
+ if (getflag(V)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case MUL_B: /* MUL B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case MUL_IMM: /* MUL Immediate. */
+ case MUL_AB: /* MUL Absolute. */
+ case MUL_Z: /* MUL Zero Matrix. */
+ cpu->a = mul(cpu, cpu->a, value.u64, thread);
+ break;
+ case BVC_REL: /* BVC Relative. */
+ if (!getflag(V)) {
+ cpu->pc = address.u64;
+ }
+ break;
+ case DIV_B: /* DIV B register. */
+ case DIV_IMM: /* DIV Immediate. */
+ case DIV_AB: /* DIV Absolute. */
+ case DIV_Z: /* DIV Zero Matrix. */
+ rem = (opcode != DIV_B) ? &cpu->b : &cpu->x;
+ cpu->a = divd(cpu, cpu->a, value.u64, rem, thread);
+ break;
+ case CLV_IMP: /* CLear oVerflow flag. */
+ setflag(0, V);
+ break;
+ case CPB_IMM: /* CPB Immediate. */
+ case CPB_AB: /* CPB Absolute. */
+ case CPB_Z: /* CPB Zero Matrix. */
+ case CPB_IN: /* CPB Indirect. */
+ case CPB_IX: /* CPB Indexed Indirect. */
+ case CPB_IY: /* CPB Indirect Indexed. */
+ adc(cpu, cpu->b, ~value.u64, 1, thread);
+ break;
+ case CMP_B: /* CMP B register. */
+ value.u64 = cpu->b; /* Falls Through. */
+ case CMP_IMM: /* CMP Immediate. */
+ case CMP_AB: /* CMP Absolute. */
+ case CMP_Z: /* CMP Zero Matrix. */
+ case CMP_IN: /* CMP Indirect. */
+ case CMP_IX: /* CMP Indexed Indirect. */
+ case CMP_IY: /* CMP Indirect Indexed. */
+ adc(cpu, cpu->a, ~value.u64, 1, thread);
+ break;
+ case CPY_IMM: /* CPY Immediate. */
+ case CPY_AB: /* CPY Absolute. */
+ case CPY_Z: /* CPY Zero Matrix. */
+ adc(cpu, cpu->y, ~value.u64, 1, thread);
+ break;
+ case CPX_IMM: /* CPX Immediate. */
+ case CPX_AB: /* CPX Absolute. */
+ case CPX_Z: /* CPX Zero Matrix. */
+ adc(cpu, cpu->x, ~value.u64, 1, thread);
+ break;
+ case INC_IMP: cpu->a = idr(cpu, cpu->a, 1, thread); break;
+ case INB_IMP: cpu->b = idr(cpu, cpu->b, 1, thread); break;
+ case INY_IMP: cpu->y = idr(cpu, cpu->y, 1, thread); break;
+ case INX_IMP: cpu->x = idr(cpu, cpu->x, 1, thread); break;
+ case DEC_IMP: cpu->a = idr(cpu, cpu->a, 0, thread); break;
+ case DEB_IMP: cpu->b = idr(cpu, cpu->b, 0, thread); break;
+ case DEY_IMP: cpu->y = idr(cpu, cpu->y, 0, thread); break;
+ case DEX_IMP: cpu->x = idr(cpu, cpu->x, 0, thread); break;
+ case JSR_IN: /* JSR Indirect. */
+ case JSR_AB: /* Jump to SubRoutine. */
+ case JSR_Z: /* JSR Zero Matrix. */
+ push(cpu, cpu->pc, (size) ? size : 7, thread);
+ cpu->pc = address.u64;
+ break;
+ case INC_AB: /* INC Absolute. */
+ case INC_Z: /* INC Zero Matrix. */
+ idm(cpu, address.u64, prefix, 1, thread);
+ break;
+ case NOP_IMP: /* No OPeration. */
+ break;
+ case RTI_IMP: /* ReTurn from Interrupt routine. */
+ cpu->ps.u8[thread] = pull(cpu, 0, thread);
+ size = 0;
+ case RTS_IMP: /* ReTurn from Subroutine. */
+ cpu->pc = pull(cpu, (size) ? size : 7, thread);
+ break;
+ case DEC_AB: /* DEC Absolute. */
+ case DEC_Z: /* DEC Zero Matrix. */
+ idm(cpu, address.u64, prefix, 0, thread);
+ break;
+ case BRK_IMP: /* BReaK. */
+ case WAI_IMP: /* WAit for Interrupt. */
+ if (opcode == WAI_IMP) {
+ pthread_mutex_lock(&main_mutex);
+ pthread_cond_signal(&main_cond);
+ pthread_mutex_unlock(&main_mutex);
+ pthread_mutex_lock(&mutex);
+ pthread_cond_wait(&cond, &mutex);
+ pthread_mutex_unlock(&mutex);
+ }
+ push(cpu, cpu->pc, 7, thread);
+ push(cpu, cpu->ps.u8[thread], 0, thread);
+ setflag(1, I);
+ value.u64 = read_value(cpu, 0, (opcode == BRK) ? 0xFFE0 : 0xFFA0, 7, 1, 0);
+ if (opcode == WAI_IMP) {
+ kbd_rdy &= (uint8_t)~(1 << thread);
+ }
+ cpu->pc = value.u64;
+ default:
+ break;
+ }
+}