From 59cd3a3e1ac4970be7dd905485d31e2ae2a38efd Mon Sep 17 00:00:00 2001 From: ergo720 <45463469+ergo720@users.noreply.github.com> Date: Sat, 22 Jun 2024 11:45:07 +0200 Subject: [PATCH] Check for interrupts after the execution of every instruction, instead of only at the end of a code block --- lib86cpu/core/breakpoint.cpp | 24 +- lib86cpu/core/breakpoint.h | 4 +- lib86cpu/core/emitter/emitter_common.h | 6 +- lib86cpu/core/emitter/x64/jit.cpp | 193 ++++++---------- lib86cpu/core/emitter/x64/jit.h | 12 +- lib86cpu/core/emitter/x64/support.cpp | 24 -- lib86cpu/core/fpu_instructions.cpp | 54 ++--- lib86cpu/core/helpers.cpp | 73 +++--- lib86cpu/core/helpers.h | 16 +- lib86cpu/core/instructions.cpp | 301 +++++++++++++------------ lib86cpu/core/instructions.h | 34 +-- lib86cpu/core/internal.h | 34 ++- lib86cpu/core/linux/clock.cpp | 6 +- lib86cpu/core/memory_management.cpp | 128 ++++++----- lib86cpu/core/memory_management.h | 50 ++-- lib86cpu/core/translate.cpp | 260 ++++++++++----------- lib86cpu/core/windows/clock.cpp | 6 +- lib86cpu/dbg/debugger.cpp | 50 ++-- lib86cpu/interface.cpp | 20 +- lib86cpu/lib86cpu_priv.h | 8 +- lib86cpu/support.cpp | 6 +- lib86cpu/support.h | 2 - 22 files changed, 594 insertions(+), 717 deletions(-) diff --git a/lib86cpu/core/breakpoint.cpp b/lib86cpu/core/breakpoint.cpp index a373be2..403917a 100644 --- a/lib86cpu/core/breakpoint.cpp +++ b/lib86cpu/core/breakpoint.cpp @@ -40,12 +40,12 @@ cpu_check_watchpoint_overlap(cpu_t *cpu, addr_t addr, size_t size, int idx) } static void -cpu_check_watchpoints(cpu_t *cpu, addr_t addr, int dr_idx, int type, uint32_t eip) +cpu_check_watchpoints(cpu_t *cpu, addr_t addr, int dr_idx, int type) { bool match = false; int dr7_type = cpu_get_watchpoint_type(cpu, dr_idx); if (type == DR7_TYPE_DATA_W) { - if (((dr7_type == DR7_TYPE_DATA_W) || (dr7_type == DR7_TYPE_DATA_RW)) && !(cpu->cpu_flags & CPU_INHIBIT_DBG_TRAP)) { + if (((dr7_type == DR7_TYPE_DATA_W) || (dr7_type == DR7_TYPE_DATA_RW))) { match = true; } } @@ -54,36 +54,40 @@ cpu_check_watchpoints(cpu_t *cpu, addr_t addr, int dr_idx, int type, uint32_t ei match = true; } } - else if ((type == dr7_type) && !(cpu->cpu_flags & CPU_INHIBIT_DBG_TRAP)) { // either DR7_TYPE_IO_RW or DR7_TYPE_DATA_RW + else if (type == dr7_type) { // either DR7_TYPE_IO_RW or DR7_TYPE_DATA_RW match = true; } if (match) { cpu->cpu_ctx.regs.dr[6] |= (1 << dr_idx); - cpu->cpu_ctx.exp_info.exp_data.fault_addr = addr; + cpu->cpu_ctx.exp_info.exp_data.fault_addr = 0; cpu->cpu_ctx.exp_info.exp_data.code = 0; cpu->cpu_ctx.exp_info.exp_data.idx = EXP_DB; - cpu->cpu_ctx.exp_info.exp_data.eip = eip; - throw host_exp_t::db_exp; + if (type == DR7_TYPE_INSTR) { + throw host_exp_t::db_exp; + } + else { + cpu->raise_int_fn(&cpu->cpu_ctx, CPU_DBG_TRAP_INT); + } } } void -cpu_check_data_watchpoints(cpu_t *cpu, addr_t addr, size_t size, int type, uint32_t eip) +cpu_check_data_watchpoints(cpu_t *cpu, addr_t addr, size_t size, int type) { for (const auto &wp : cpu->wp_data) { if ((wp.watch_addr <= (addr + size - 1)) && (addr <= wp.watch_end)) [[unlikely]] { - cpu_check_watchpoints(cpu, addr, wp.dr_idx, type, eip); + cpu_check_watchpoints(cpu, addr, wp.dr_idx, type); } } } void -cpu_check_io_watchpoints(cpu_t *cpu, port_t port, size_t size, int type, uint32_t eip) +cpu_check_io_watchpoints(cpu_t *cpu, port_t port, size_t size, int type) { for (const auto &wp : cpu->wp_io) { if ((wp.watch_addr <= (port + size - 1)) && (port <= wp.watch_end)) [[unlikely]] { - cpu_check_watchpoints(cpu, port, wp.dr_idx, type, eip); + cpu_check_watchpoints(cpu, port, wp.dr_idx, type); } } } diff --git a/lib86cpu/core/breakpoint.h b/lib86cpu/core/breakpoint.h index dfe86fb..6aa51c0 100644 --- a/lib86cpu/core/breakpoint.h +++ b/lib86cpu/core/breakpoint.h @@ -7,8 +7,8 @@ #pragma once -void cpu_check_data_watchpoints(cpu_t *cpu, addr_t addr, size_t size, int type, uint32_t eip); -void cpu_check_io_watchpoints(cpu_t *cpu, port_t port, size_t size, int type, uint32_t eip); +void cpu_check_data_watchpoints(cpu_t *cpu, addr_t addr, size_t size, int type); +void cpu_check_io_watchpoints(cpu_t *cpu, port_t port, size_t size, int type); bool cpu_check_watchpoint_enabled(cpu_t *cpu, int idx); int cpu_get_watchpoint_type(cpu_t *cpu, int idx); size_t cpu_get_watchpoint_length(cpu_t *cpu, int idx); diff --git a/lib86cpu/core/emitter/emitter_common.h b/lib86cpu/core/emitter/emitter_common.h index 5705f32..e5b3e7a 100644 --- a/lib86cpu/core/emitter/emitter_common.h +++ b/lib86cpu/core/emitter/emitter_common.h @@ -18,8 +18,6 @@ #define CPU_CTX_HFLG offsetof(cpu_ctx_t, hflags) #define CPU_CTX_EXP offsetof(cpu_ctx_t, exp_info) #define CPU_CTX_INT offsetof(cpu_ctx_t, int_pending) -#define CPU_CTX_EXIT offsetof(cpu_ctx_t, exit_requested) -#define CPU_CTX_HALTED offsetof(cpu_ctx_t, is_halted) #define CPU_CTX_EAX offsetof(cpu_ctx_t, regs.eax) #define CPU_CTX_ECX offsetof(cpu_ctx_t, regs.ecx) @@ -122,7 +120,6 @@ #define CPU_EXP_ADDR offsetof(cpu_ctx_t, exp_info.exp_data.fault_addr) #define CPU_EXP_CODE offsetof(cpu_ctx_t, exp_info.exp_data.code) #define CPU_EXP_IDX offsetof(cpu_ctx_t, exp_info.exp_data.idx) -#define CPU_EXP_EIP offsetof(cpu_ctx_t, exp_info.exp_data.eip) #define REG_off(reg) get_reg_offset(reg) #define REG_idx(reg) get_reg_idx(reg) @@ -202,7 +199,8 @@ inline constexpr auto all_callable_funcs = std::make_tuple( idivw_helper, idivb_helper, cpuid_helper, - hlt_helper, + hlt_helper, + hlt_helper, fxsave_helper, fxrstor_helper, fpu_update_tag, diff --git a/lib86cpu/core/emitter/x64/jit.cpp b/lib86cpu/core/emitter/x64/jit.cpp index c3a1d82..aadd2e9 100644 --- a/lib86cpu/core/emitter/x64/jit.cpp +++ b/lib86cpu/core/emitter/x64/jit.cpp @@ -366,7 +366,6 @@ static_assert((LOCAL_VARS_off(0) & 15) == 0); // must be 16 byte aligned so that #define PUSH(dst) m_a.push(dst) #define POP(dst) m_a.pop(dst) #define INT3() m_a.int3() -#define PAUSE() m_a.pause() #define BR_UNCOND(dst) m_a.jmp(dst) #define BR_EQ(label) m_a.je(label) @@ -448,10 +447,10 @@ static_assert((LOCAL_VARS_off(0) & 15) == 0); // must be 16 byte aligned so that #define RAISEin_no_param_t() gen_raise_exp_inline() #define RAISEin_no_param_f() gen_raise_exp_inline() -#define RAISEin_t(addr, code, idx, eip) gen_raise_exp_inline(addr, code, idx, eip) -#define RAISEin_f(addr, code, idx, eip) gen_raise_exp_inline(addr, code, idx, eip) -#define RAISEin0_t(idx) gen_raise_exp_inline(0, 0, idx, m_cpu->instr_eip) -#define RAISEin0_f(idx) gen_raise_exp_inline(0, 0, idx, m_cpu->instr_eip) +#define RAISEin_t(addr, code, idx) gen_raise_exp_inline(addr, code, idx) +#define RAISEin_f(addr, code, idx) gen_raise_exp_inline(addr, code, idx) +#define RAISEin0_t(idx) gen_raise_exp_inline(0, 0, idx) +#define RAISEin0_f(idx) gen_raise_exp_inline(0, 0, idx) #define SIZED_REG(reg, size) reg_to_sized_reg.find(reg | size)->second #define GET_REG(op) get_register_op(instr, op) @@ -641,55 +640,46 @@ lc86_jit::gen_exit_func() } } +template +void lc86_jit::gen_interrupt_check() +{ + if constexpr (update_eip) { + MOV(MEMD32(RCX, CPU_CTX_EIP), m_cpu->virt_pc - m_cpu->cpu_ctx.regs.cs_hidden.base); + } + m_next_instr = m_a.newLabel(); + MOV(EDX, MEMD32(RCX, CPU_CTX_INT)); + MOV(EAX, MEMD32(RCX, CPU_CTX_EFLAGS)); + OR(EAX, ~(CPU_HW_INT)); + AND(EDX, EAX); // mask out pending hw interrupt when if=0 + BR_EQ(m_next_instr); + BR_UNCOND(m_exit_int); + m_a.bind(m_next_instr); +} + void -lc86_jit::gen_interrupt_check() +lc86_jit::gen_timeout_check() { - Label no_int = m_a.newLabel(); if (m_cpu->cpu_flags & CPU_TIMEOUT) { Label no_timeout = m_a.newLabel(); CALL_F(&cpu_timer_helper); - TEST(EAX, EAX); - BR_EQ(no_int); - TEST(EAX, CPU_HW_INT | CPU_NON_HW_INT); - BR_NE(no_timeout); - MOV(MEMD8(RCX, CPU_CTX_EXIT), 1); // request an exit + TEST(EAX, CPU_TIMEOUT_INT); + BR_EQ(no_timeout); + XOR(EAX, EAX); + gen_epilogue_main(); m_a.bind(no_timeout); } - else { - MOV(EDX, MEMD32(RCX, CPU_CTX_INT)); - TEST(EDX, EDX); - BR_EQ(no_int); - MOV(RAX, &cpu_do_int); - CALL(RAX); - } - XOR(EAX, EAX); - gen_epilogue_main(); - m_a.bind(no_int); } void lc86_jit::gen_no_link_checks() { - if ((m_cpu->disas_ctx.flags & DISAS_FLG_INHIBIT_INT) && (m_cpu->cpu_ctx.hflags & HFLG_INHIBIT_INT)) { - assert(m_cpu->disas_ctx.flags & DISAS_FLG_ONE_INSTR); - - MOV(EAX, MEMD32(RCX, CPU_CTX_HFLG)); - AND(EAX, ~HFLG_INHIBIT_INT); - MOV(MEMD32(RCX, CPU_CTX_HFLG), EAX); - m_cpu->cpu_flags |= CPU_FORCE_INSERT; - } - - if (m_cpu->cpu_flags & CPU_INHIBIT_DBG_TRAP) { - LD_R32(EAX, CPU_CTX_EIP); - gen_raise_exp_inline(0, 0, EXP_DB, EAX); - return; - } + gen_interrupt_check(); if (gen_check_rf_single_step()) { return; } - gen_interrupt_check(); + gen_timeout_check(); } void @@ -717,7 +707,7 @@ lc86_jit::gen_prologue_main() PUSH(RBX); SUB(RSP, get_jit_stack_required()); - m_needs_epilogue = true; + m_exit_int = m_a.newLabel(); } template @@ -742,33 +732,36 @@ lc86_jit::gen_tail_call(x86::Gp addr) void lc86_jit::gen_tc_epilogue() { - // update the eip if we stopped decoding without a terminating instr if (m_cpu->translate_next == 1) { assert((m_cpu->disas_ctx.flags & (DISAS_FLG_PAGE_CROSS | DISAS_FLG_PAGE_CROSS_NEXT | DISAS_FLG_ONE_INSTR)) != 0); assert((m_cpu->tc->flags & TC_FLG_LINK_MASK) == 0); - MOV(MEMD32(RCX, CPU_CTX_EIP), m_cpu->virt_pc - m_cpu->cpu_ctx.regs.cs_hidden.base); - gen_no_link_checks(); + // The interrupt check is already generated by a terminating instr, so skip it + if (gen_check_rf_single_step() == false) { + gen_timeout_check(); + gen_epilogue_main(); + } } - if (m_needs_epilogue) { - gen_epilogue_main(); - } + // Generate the interrupt handling code block which is reached when an interrupt happens between instructions of the tc + m_a.bind(m_exit_int); + MOV(RAX, &cpu_do_int); + CALL(RAX); + XOR(EAX, EAX); + gen_epilogue_main(); } -template -void lc86_jit::gen_raise_exp_inline(T1 fault_addr, T2 code, T3 idx, T4 eip) +template +void lc86_jit::gen_raise_exp_inline(T1 fault_addr, T2 code, T3 idx) { // should be false when generating a conditional exception, true when taking an unconditional exception if constexpr (terminates) { - m_needs_epilogue = false; m_cpu->translate_next = 0; } MOV(MEMD32(RCX, CPU_EXP_ADDR), fault_addr); MOV(MEMD16(RCX, CPU_EXP_CODE), code); MOV(MEMD16(RCX, CPU_EXP_IDX), idx); - MOV(MEMD32(RCX, CPU_EXP_EIP), eip); MOV(RAX, &cpu_raise_exception<>); CALL(RAX); gen_epilogue_main(); @@ -779,7 +772,6 @@ void lc86_jit::gen_raise_exp_inline() { // same as the function above, but it doesn't populate the exception data if constexpr (terminates) { - m_needs_epilogue = false; m_cpu->translate_next = 0; } @@ -793,12 +785,13 @@ lc86_jit::gen_hook(hook_t hook_addr) { CALL_F(hook_addr); gen_link_ret(); + m_cpu->translate_next = 0; } void -lc86_jit::gen_raise_exp_inline(uint32_t fault_addr, uint16_t code, uint16_t idx, uint32_t eip) +lc86_jit::gen_raise_exp_inline(uint32_t fault_addr, uint16_t code, uint16_t idx) { - gen_raise_exp_inline(fault_addr, code, idx, eip); + gen_raise_exp_inline(fault_addr, code, idx); } bool @@ -823,8 +816,7 @@ lc86_jit::gen_check_rf_single_step() MOV(EDX, MEMD32(RCX, CPU_CTX_DR6)); OR(EDX, DR6_BS_MASK); MOV(MEMD32(RCX, CPU_CTX_DR6), EDX); - MOV(EDX, MEMD32(RCX, CPU_CTX_EIP)); - RAISEin_f(0, 0, EXP_DB, EDX); + RAISEin_f(0, 0, EXP_DB); return true; } } @@ -840,8 +832,6 @@ void lc86_jit::gen_link_direct(addr_t dst_pc, addr_t *next_pc, T target_pc) // and only emit the taken code path. If it's in a reg, it must be ebx because otherwise a volatile reg might be trashed by the timer and // interrupt calls in gen_no_link_checks - m_needs_epilogue = false; - gen_no_link_checks(); // vec_addr: instr_pc, dst_pc, next_pc @@ -981,8 +971,6 @@ void lc86_jit::gen_link_direct(addr_t dst_pc, addr_t *next_pc, T target_pc) void lc86_jit::gen_link_dst_only() { - m_needs_epilogue = false; - gen_no_link_checks(); m_cpu->tc->flags |= (1 & TC_FLG_NUM_JMP); @@ -995,8 +983,6 @@ lc86_jit::gen_link_dst_only() void lc86_jit::gen_link_indirect() { - m_needs_epilogue = false; - gen_no_link_checks(); MOV(RDX, m_cpu->tc); @@ -1017,8 +1003,6 @@ void lc86_jit::gen_link_dst_cond(T &&lambda) { // condition result should be in ebx; if true, jumps to dst, otherwise jumps to next - m_needs_epilogue = false; - gen_no_link_checks(); if ((m_cpu->virt_pc & ~PAGE_MASK) == (m_cpu->virt_pc + m_cpu->instr_bytes & ~PAGE_MASK)) { @@ -1871,7 +1855,7 @@ void lc86_jit::load_mem(uint8_t size, uint8_t is_priv) { // RCX: cpu_ctx, EDX: addr, R8: instr_eip, R9B: is_priv - // for SIZE128/80 -> RCX: ptr to stack-allocated uint128/80_t, RDX: cpu_ctx, R8: addr, R9: instr_eip, stack: is_priv + // for SIZE128/80 -> RCX: ptr to stack-allocated uint128/80_t, RDX: cpu_ctx, R8: addr switch (size) { @@ -1879,8 +1863,7 @@ lc86_jit::load_mem(uint8_t size, uint8_t is_priv) LEA(RCX, MEMD64(RSP, LOCAL_VARS_off(0))); MOV(R8D, EDX); MOV(RDX, &m_cpu->cpu_ctx); - MOV(R9D, m_cpu->instr_eip); - MOV(MEMD8(RSP, LOCAL_VARS_off(2)), is_priv); + MOV(R9B, is_priv); CALL_F(&mem_read_helper); break; @@ -1888,14 +1871,12 @@ lc86_jit::load_mem(uint8_t size, uint8_t is_priv) LEA(RCX, MEMD64(RSP, LOCAL_VARS_off(0))); MOV(R8D, EDX); MOV(RDX, &m_cpu->cpu_ctx); - MOV(R9D, m_cpu->instr_eip); - MOV(MEMD8(RSP, LOCAL_VARS_off(2)), is_priv); + MOV(R9B, is_priv); CALL_F(&mem_read_helper); break; default: - MOV(R9B, is_priv); - MOV(R8D, m_cpu->instr_eip); + MOV(R8B, is_priv); switch (size) { @@ -1924,10 +1905,9 @@ lc86_jit::load_mem(uint8_t size, uint8_t is_priv) template void lc86_jit::store_mem(T val, uint8_t size, uint8_t is_priv) { - // RCX: cpu_ctx, EDX: addr, R8B/R8W/R8D: val, R9D: instr_eip, stack: is_priv + // RCX: cpu_ctx, EDX: addr - MOV(MEMD32(RSP, STACK_ARGS_off), is_priv); - MOV(R9D, m_cpu->instr_eip); + MOV(R9B, is_priv); bool is_r8 = false; if constexpr (!std::is_integral_v) { @@ -1990,8 +1970,6 @@ lc86_jit::load_io(uint8_t size_mode) { // RCX: cpu_ctx, EDX: port - MOV(R8D, m_cpu->instr_eip); - switch (size_mode) { case SIZE32: @@ -2016,8 +1994,6 @@ lc86_jit::store_io(uint8_t size_mode) { // RCX: cpu_ctx, EDX: port, R8B/R8W/R8D: val - MOV(R9D, m_cpu->instr_eip); - switch (size_mode) { case SIZE32: @@ -3091,7 +3067,6 @@ void lc86_jit::load_sys_seg_reg(decoded_instr *instr) }); Label ok = m_a.newLabel(); - MOV(R8D, m_cpu->instr_eip); MOV(DX, AX); if constexpr (idx == LDTR_idx) { CALL_F(&lldt_helper); @@ -3209,7 +3184,6 @@ void lc86_jit::verx(decoded_instr *instr) LD_MEMs(SIZE16); }); - MOV(R8D, m_cpu->instr_eip); MOV(DX, AX); if constexpr (is_verr) { CALL_F(&verrw_helper); @@ -3241,7 +3215,6 @@ void lc86_jit::lxs(decoded_instr *instr) LD_MEMs(SIZE16); if (IS_PE_NOT_VM86()) { - MOV(R8D, m_cpu->instr_eip); MOV(DX, AX); switch (idx) @@ -3392,7 +3365,7 @@ void lc86_jit::int_(decoded_instr *instr) MOV(MEMD32(RCX, CPU_EXP_ADDR), 0); MOV(MEMD16(RCX, CPU_EXP_CODE), 0); MOV(MEMD16(RCX, CPU_EXP_IDX), EXP_OF); - MOV(MEMD32(RCX, CPU_EXP_EIP), m_cpu->instr_eip + m_cpu->instr_bytes); + ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); MOV(RAX, &cpu_raise_exception); CALL(RAX); gen_epilogue_main(); @@ -3411,12 +3384,11 @@ void lc86_jit::int_(decoded_instr *instr) LIB86CPU_ABORT_msg("Unknown int instruction specified with index %u", idx); } - MOV(MEMD32(RCX, CPU_EXP_EIP), m_cpu->instr_eip + m_cpu->instr_bytes); + ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); MOV(RAX, &cpu_raise_exception); CALL(RAX); gen_epilogue_main(); - m_needs_epilogue = false; m_cpu->translate_next = 0; } } @@ -4073,7 +4045,6 @@ lc86_jit::call(decoded_instr *instr) if (IS_PE_NOT_VM86()) { Label exp = m_a.newLabel(); - MOV(MEMD32(RSP, STACK_ARGS_off + 8), m_cpu->instr_eip); MOV(MEMD32(RSP, STACK_ARGS_off), ret_eip); MOV(R9B, m_cpu->size_mode); MOV(R8D, call_eip); @@ -4156,7 +4127,6 @@ lc86_jit::call(decoded_instr *instr) LD_MEMs(SIZE16); if (IS_PE_NOT_VM86()) { Label exp = m_a.newLabel(); - MOV(MEMD32(RSP, STACK_ARGS_off + 8), m_cpu->instr_eip); MOV(MEMD32(RSP, STACK_ARGS_off), ret_eip); MOV(R9B, m_cpu->size_mode); MOV(R8D, EBX); @@ -5489,7 +5459,6 @@ lc86_jit::fxrstor(decoded_instr *instr) }, [this](const op_info rm) { - MOV(R8D, m_cpu->instr_eip); CALL_F(&fxrstor_helper); Label ok = m_a.newLabel(); TEST(EAX, EAX); @@ -5519,7 +5488,6 @@ lc86_jit::fxsave(decoded_instr *instr) BR_EQ(ok); RAISEin0_f(EXP_GP); m_a.bind(ok); - MOV(R8D, m_cpu->instr_eip); CALL_F(&fxsave_helper); }); } @@ -5545,35 +5513,16 @@ lc86_jit::hlt(decoded_instr *instr) else { MOV(MEMD32(RCX, CPU_CTX_EIP), m_cpu->instr_eip + m_cpu->instr_bytes); if (m_cpu->cpu_flags & CPU_TIMEOUT) { - Label retry = m_a.newLabel(); - Label no_timeout = m_a.newLabel(); - m_a.bind(retry); - CALL_F(&cpu_timer_helper); - PAUSE(); - TEST(EAX, EAX); - BR_EQ(retry); - TEST(EAX, CPU_NON_HW_INT); - BR_NE(retry); - TEST(EAX, CPU_HW_INT); - BR_NE(no_timeout); - MOV(MEMD8(RCX, CPU_CTX_EXIT), 1); // request an exit - MOV(MEMD8(RCX, CPU_CTX_HALTED), 1); // set halted flag - m_a.bind(no_timeout); + CALL_F(&hlt_helper); } else { - Label retry = m_a.newLabel(); - m_a.bind(retry); - CALL_F(&hlt_helper); - PAUSE(); - TEST(EAX, EAX); - BR_EQ(retry); + CALL_F(&hlt_helper); } XOR(EAX, EAX); gen_epilogue_main(); } - m_needs_epilogue = false; m_cpu->translate_next = 0; } } @@ -6016,7 +5965,6 @@ lc86_jit::iret(decoded_instr *instr) if (IS_PE()) { Label exp = m_a.newLabel(); - MOV(R8D, m_cpu->instr_eip); MOV(DL, m_cpu->size_mode); CALL_F(&lret_pe_helper); TEST(EAX, EAX); @@ -6026,7 +5974,6 @@ lc86_jit::iret(decoded_instr *instr) RAISEin_no_param_f(); } else { - MOV(R8D, m_cpu->instr_eip); MOV(DL, m_cpu->size_mode); CALL_F(&iret_real_helper); gen_link_ret(); @@ -6308,7 +6255,6 @@ lc86_jit::jmp(decoded_instr *instr) uint16_t new_sel = instr->o[OPNUM_SINGLE].ptr.segment; if (IS_PE_NOT_VM86()) { Label exp = m_a.newLabel(); - MOV(MEMD32(RSP, STACK_ARGS_off), m_cpu->instr_eip); MOV(R9D, new_eip); MOV(R8B, m_cpu->size_mode); MOV(DX, new_sel); @@ -6368,7 +6314,6 @@ lc86_jit::jmp(decoded_instr *instr) if (IS_PE_NOT_VM86()) { Label exp = m_a.newLabel(); - MOV(MEMD32(RSP, STACK_ARGS_off), m_cpu->instr_eip); MOV(R9D, EBX); MOV(R8B, m_cpu->size_mode); MOV(DX, AX); @@ -6521,6 +6466,7 @@ lc86_jit::lmsw(decoded_instr* instr) ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); gen_no_link_checks(); + gen_epilogue_main(); m_cpu->translate_next = 0; } } @@ -6776,6 +6722,7 @@ lc86_jit::mov(decoded_instr *instr) if ((cr_idx == CR0_idx) || (cr_idx == CR4_idx)) { ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); gen_no_link_checks(); + gen_epilogue_main(); } } break; @@ -6860,6 +6807,7 @@ lc86_jit::mov(decoded_instr *instr) } else { gen_no_link_checks(); + gen_epilogue_main(); } m_cpu->translate_next = 0; } @@ -6933,7 +6881,6 @@ lc86_jit::mov(decoded_instr *instr) LD_MEM(); }); if (IS_PE_NOT_VM86()) { - MOV(R8D, m_cpu->instr_eip); MOV(DX, AX); switch (REG_idx(instr->o[OPNUM_DST].reg.value)) @@ -6977,9 +6924,7 @@ lc86_jit::mov(decoded_instr *instr) } if (instr->o[OPNUM_DST].reg.value == ZYDIS_REGISTER_SS) { - MOV(EAX, MEMD32(RCX, CPU_CTX_HFLG)); - OR(EAX, HFLG_INHIBIT_INT); - MOV(MEMD32(RCX, CPU_CTX_HFLG), EAX); + m_a.lock().or_(MEMD32(RCX, CPU_CTX_INT), CPU_MASKED_INT); ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); gen_link_dst_cond([this] { @@ -7730,7 +7675,6 @@ lc86_jit::pop(decoded_instr *instr) gen_stack_pop<1, 0, false>(); if (IS_PE_NOT_VM86()) { - MOV(R8D, m_cpu->instr_eip); MOV(DX, R11W); switch (sel.first) @@ -7785,9 +7729,7 @@ lc86_jit::pop(decoded_instr *instr) } if (sel.first == SS_idx) { - MOV(EAX, MEMD32(RCX, CPU_CTX_HFLG)); - OR(EAX, HFLG_INHIBIT_INT); - MOV(MEMD32(RCX, CPU_CTX_HFLG), EAX); + m_a.lock().or_(MEMD32(RCX, CPU_CTX_INT), CPU_MASKED_INT); ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); gen_link_dst_cond([this] { @@ -8319,7 +8261,6 @@ lc86_jit::ret(decoded_instr *instr) case 0xCB: { if (IS_PE_NOT_VM86()) { Label ok = m_a.newLabel(); - MOV(R8D, m_cpu->instr_eip); MOV(DL, m_cpu->size_mode); CALL_F(&lret_pe_helper); TEST(EAX, EAX); @@ -8888,17 +8829,8 @@ lc86_jit::sti(decoded_instr *instr) Label no_inhibition = m_a.newLabel(); TEST(EDX, IF_MASK); BR_NE(no_inhibition); - MOV(EAX, MEMD32(RCX, CPU_CTX_HFLG)); - OR(EAX, HFLG_INHIBIT_INT); - MOV(MEMD32(RCX, CPU_CTX_HFLG), EAX); + m_a.lock().or_(MEMD32(RCX, CPU_CTX_INT), CPU_MASKED_INT); m_a.bind(no_inhibition); - ST_R32(CPU_CTX_EIP, m_cpu->instr_eip + m_cpu->instr_bytes); - - gen_link_dst_cond([this] { - MOV(EBX, MEMD32(RCX, CPU_CTX_HFLG)); - TEST(EBX, HFLG_INHIBIT_INT); - }); - m_cpu->translate_next = 0; } void @@ -9412,4 +9344,7 @@ lc86_jit::xorps(decoded_instr *instr) } } +template void lc86_jit::gen_interrupt_check(); +template void lc86_jit::gen_interrupt_check(); + #endif diff --git a/lib86cpu/core/emitter/x64/jit.h b/lib86cpu/core/emitter/x64/jit.h index 95e43ee..31c1f67 100644 --- a/lib86cpu/core/emitter/x64/jit.h +++ b/lib86cpu/core/emitter/x64/jit.h @@ -33,7 +33,9 @@ class lc86_jit : public Target { void gen_tc_prologue() { start_new_session(); gen_exit_func(); gen_prologue_main(); } void gen_tc_epilogue(); void gen_hook(hook_t hook_addr); - void gen_raise_exp_inline(uint32_t fault_addr, uint16_t code, uint16_t idx, uint32_t eip); + void gen_raise_exp_inline(uint32_t fault_addr, uint16_t code, uint16_t idx); + template + void gen_interrupt_check(); void free_code_block(void *addr) { m_mem.release_sys_mem(addr); } void destroy_all_code() { m_mem.destroy_all_blocks(); } @@ -185,8 +187,8 @@ class lc86_jit : public Target { void gen_tail_call(x86::Gp addr); void gen_exit_func(); void gen_aux_funcs(); - void gen_interrupt_check(); void gen_no_link_checks(); + void gen_timeout_check(); bool gen_check_rf_single_step(); template void gen_link_direct(addr_t dst_pc, addr_t *next_pc, T target_addr); @@ -195,8 +197,8 @@ class lc86_jit : public Target { void gen_link_ret(); template void gen_link_dst_cond(T &&lambda); - template - void gen_raise_exp_inline(T1 fault_addr, T2 code, T3 idx, T4 eip); + template + void gen_raise_exp_inline(T1 fault_addr, T2 code, T3 idx); template void gen_raise_exp_inline(); template @@ -287,8 +289,8 @@ class lc86_jit : public Target { cpu_t *m_cpu; CodeHolder m_code; x86::Assembler m_a; - bool m_needs_epilogue; mem_manager m_mem; + Label m_exit_int, m_next_instr; }; #endif diff --git a/lib86cpu/core/emitter/x64/support.cpp b/lib86cpu/core/emitter/x64/support.cpp index 017b1c2..b3d58b8 100644 --- a/lib86cpu/core/emitter/x64/support.cpp +++ b/lib86cpu/core/emitter/x64/support.cpp @@ -5,7 +5,6 @@ */ #include "internal.h" -#include "clock.h" #include #if defined(_MSC_VER) #include @@ -207,26 +206,3 @@ uint128_t::operator<<(int shift) _mm_store_si128(reinterpret_cast<__m128i *>(&low), val); return *this; } - -void -halt_loop(cpu_t *cpu) -{ - while (true) { - uint32_t ret = cpu_timer_helper(&cpu->cpu_ctx); - _mm_pause(); - - if ((ret == CPU_NO_INT) || (ret == CPU_NON_HW_INT)) { - // either nothing changed or it's not a hw int, keep looping in both cases - continue; - } - - if (ret == CPU_HW_INT) { - // hw int, exit the loop and clear the halted state - cpu->cpu_ctx.is_halted = 0; - return; - } - - // timeout, exit the loop - return; - } -} diff --git a/lib86cpu/core/fpu_instructions.cpp b/lib86cpu/core/fpu_instructions.cpp index 03692f3..94469da 100644 --- a/lib86cpu/core/fpu_instructions.cpp +++ b/lib86cpu/core/fpu_instructions.cpp @@ -8,51 +8,51 @@ void -fxsave_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip) +fxsave_helper(cpu_ctx_t *cpu_ctx, addr_t addr) { - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fctrl, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fctrl, 0); addr += 2; - mem_write_helper(cpu_ctx, addr, read_fstatus(cpu_ctx->cpu), eip, 0); + mem_write_helper(cpu_ctx, addr, read_fstatus(cpu_ctx->cpu), 0); addr += 2; uint8_t ftag_abridged = 0; for (unsigned i = 0; i < 8; ++i) { ftag_abridged |= (((cpu_ctx->regs.ftags[i] == FPU_TAG_EMPTY) ? 0 : 1) << i); } - mem_write_helper(cpu_ctx, addr, ftag_abridged, eip, 0); + mem_write_helper(cpu_ctx, addr, ftag_abridged, 0); addr += 2; - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fop, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fop, 0); addr += 2; - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fip, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fip, 0); addr += 4; - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fcs, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fcs, 0); addr += 4; - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fdp, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fdp, 0); addr += 4; - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fds, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fds, 0); addr += 4; if (cpu_ctx->hflags & HFLG_CR4_OSFXSR) { - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.mxcsr, eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.mxcsr, 0); addr += 4; - mem_write_helper(cpu_ctx, addr, 0, eip, 0); + mem_write_helper(cpu_ctx, addr, 0, 0); addr += 4; } else { addr += 8; } for (unsigned i = 0; i < 8; ++i) { - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fr[i], eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.fr[i], 0); addr += 16; } if (cpu_ctx->hflags & HFLG_CR4_OSFXSR) { for (unsigned i = 0; i < 8; ++i) { - mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.xmm[i], eip, 0); + mem_write_helper(cpu_ctx, addr, cpu_ctx->regs.xmm[i], 0); addr += 16; } } } uint32_t -fxrstor_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip) +fxrstor_helper(cpu_ctx_t *cpu_ctx, addr_t addr) { // must be 16 byte aligned if (addr & 15) { @@ -60,42 +60,42 @@ fxrstor_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip) } // PF check for the first and last byte we are going to read - volatile addr_t pf_check1 = get_read_addr(cpu_ctx->cpu, addr, 0, eip); - volatile addr_t pf_check2 = get_read_addr(cpu_ctx->cpu, addr + 288 - 1, 0, eip); + volatile addr_t pf_check1 = get_read_addr(cpu_ctx->cpu, addr, 0); + volatile addr_t pf_check2 = get_read_addr(cpu_ctx->cpu, addr + 288 - 1, 0); // check reserved bits of mxcsr if (cpu_ctx->hflags & HFLG_CR4_OSFXSR) { - uint32_t temp = mem_read_helper(cpu_ctx, addr + 24, eip, 0); + uint32_t temp = mem_read_helper(cpu_ctx, addr + 24, 0); if (temp & ~MXCSR_MASK) { return 1; } cpu_ctx->regs.mxcsr = temp; } - cpu_ctx->regs.fctrl = mem_read_helper(cpu_ctx, addr, eip, 0) | 0x40; + cpu_ctx->regs.fctrl = mem_read_helper(cpu_ctx, addr, 0) | 0x40; cpu_ctx->fpu_data.frp = cpu_ctx->regs.fctrl | FPU_EXP_ALL | 0x40; addr += 2; - write_fstatus(cpu_ctx->cpu, mem_read_helper(cpu_ctx, addr, eip, 0)); + write_fstatus(cpu_ctx->cpu, mem_read_helper(cpu_ctx, addr, 0)); addr += 2; - uint8_t ftag_abridged = mem_read_helper(cpu_ctx, addr, eip, 0); + uint8_t ftag_abridged = mem_read_helper(cpu_ctx, addr, 0); addr += 2; - cpu_ctx->regs.fop = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fop = mem_read_helper(cpu_ctx, addr, 0); addr += 2; - cpu_ctx->regs.fip = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fip = mem_read_helper(cpu_ctx, addr, 0); addr += 4; - cpu_ctx->regs.fcs = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fcs = mem_read_helper(cpu_ctx, addr, 0); addr += 4; - cpu_ctx->regs.fdp = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fdp = mem_read_helper(cpu_ctx, addr, 0); addr += 4; - cpu_ctx->regs.fds = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fds = mem_read_helper(cpu_ctx, addr, 0); addr += (4 + 8); for (unsigned i = 0; i < 8; ++i) { - cpu_ctx->regs.fr[i] = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.fr[i] = mem_read_helper(cpu_ctx, addr, 0); addr += 16; } if (cpu_ctx->hflags & HFLG_CR4_OSFXSR) { for (unsigned i = 0; i < 8; ++i) { - cpu_ctx->regs.xmm[i] = mem_read_helper(cpu_ctx, addr, eip, 0); + cpu_ctx->regs.xmm[i] = mem_read_helper(cpu_ctx, addr, 0); addr += 16; } } diff --git a/lib86cpu/core/helpers.cpp b/lib86cpu/core/helpers.cpp index 1d64375..d0bf850 100644 --- a/lib86cpu/core/helpers.cpp +++ b/lib86cpu/core/helpers.cpp @@ -8,7 +8,7 @@ void -stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t &addr, uint32_t eip) +stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t &addr) { assert(size_mode != SIZE8); @@ -17,7 +17,7 @@ stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t & case (SIZE32 << 1) | 0: { // sp, push 32 uint16_t sp = addr & 0xFFFF; sp -= 4; - mem_write_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, val, eip, 0); + mem_write_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, val, 0); addr = sp; } break; @@ -25,7 +25,7 @@ stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t & case (SIZE32 << 1) | 1: { // esp, push 32 uint32_t esp = addr; esp -= 4; - mem_write_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, val, eip, 0); + mem_write_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, val, 0); addr = esp; } break; @@ -33,7 +33,7 @@ stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t & case (SIZE16 << 1) | 0: { // sp, push 16 uint16_t sp = addr & 0xFFFF; sp -= 2; - mem_write_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, val, eip, 0); + mem_write_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, val, 0); addr = sp; } break; @@ -41,7 +41,7 @@ stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t & case (SIZE16 << 1) | 1: { // esp, push 16 uint16_t esp = addr; esp -= 2; - mem_write_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, val, eip, 0); + mem_write_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, val, 0); addr = esp; } break; @@ -52,7 +52,7 @@ stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t & } uint32_t -stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr, uint32_t eip) +stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr) { assert(size_mode != SIZE8); @@ -62,28 +62,28 @@ stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr, uint32_t eip) { case (SIZE32 << 1) | 0: { // sp, pop 32 uint16_t sp = addr & 0xFFFF; - ret = mem_read_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, eip, 0); + ret = mem_read_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, 0); addr = sp + 4; } break; case (SIZE32 << 1) | 1: { // esp, pop 32 uint32_t esp = addr; - ret = mem_read_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, eip, 0); + ret = mem_read_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, 0); addr = esp + 4; } break; case (SIZE16 << 1) | 0: { // sp, pop 16 uint16_t sp = addr & 0xFFFF; - ret = mem_read_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, eip, 0); + ret = mem_read_helper(&cpu->cpu_ctx, sp + cpu->cpu_ctx.regs.ss_hidden.base, 0); addr = sp + 2; } break; case (SIZE16 << 1) | 1: { // esp, pop 16 uint16_t esp = addr; - ret = mem_read_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, eip, 0); + ret = mem_read_helper(&cpu->cpu_ctx, esp + cpu->cpu_ctx.regs.ss_hidden.base, 0); addr = esp + 2; } break; @@ -96,16 +96,15 @@ stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr, uint32_t eip) } uint8_t -raise_exp_helper(cpu_t *cpu, uint16_t code, uint16_t idx, uint32_t eip) +raise_exp_helper(cpu_t *cpu, uint16_t code, uint16_t idx) { cpu->cpu_ctx.exp_info.exp_data.code = code; cpu->cpu_ctx.exp_info.exp_data.idx = idx; - cpu->cpu_ctx.exp_info.exp_data.eip = eip; return 1; } template -uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip) +uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc) { uint32_t base, limit; uint16_t idx = sel >> 3; @@ -113,7 +112,7 @@ uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64 if constexpr (is_tss) { if (ti) { - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); // must be in the gdt + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); // must be in the gdt } else { base = cpu->cpu_ctx.regs.gdtr_hidden.base; @@ -133,18 +132,18 @@ uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64 desc_addr = base + idx * 8; if (desc_addr + 7 > base + limit) { // sel idx outside of descriptor table - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } - desc = mem_read_helper(&cpu->cpu_ctx, desc_addr, eip, 2); + desc = mem_read_helper(&cpu->cpu_ctx, desc_addr, 2); return 0; } void -set_access_flg_seg_desc_helper(cpu_t *cpu, uint64_t desc, addr_t desc_addr, uint32_t eip) +set_access_flg_seg_desc_helper(cpu_t *cpu, uint64_t desc, addr_t desc_addr) { if ((((desc & SEG_DESC_S) >> 44) | ((desc & SEG_DESC_A) >> 39)) == 1) { - mem_write_helper(&cpu->cpu_ctx, desc_addr, desc | SEG_DESC_A, eip, 2); + mem_write_helper(&cpu->cpu_ctx, desc_addr, desc | SEG_DESC_A, 2); } } @@ -171,13 +170,13 @@ read_seg_desc_limit_helper(cpu_t *cpu, uint64_t desc) } uint8_t -check_ss_desc_priv_helper(cpu_t *cpu, uint16_t sel, uint16_t *cs, addr_t &desc_addr, uint64_t &desc, uint32_t eip) +check_ss_desc_priv_helper(cpu_t *cpu, uint16_t sel, uint16_t *cs, addr_t &desc_addr, uint64_t &desc) { if ((sel >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { return 1; } @@ -198,32 +197,32 @@ check_ss_desc_priv_helper(cpu_t *cpu, uint16_t sel, uint16_t *cs, addr_t &desc_a } if (val) { - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } uint64_t p = desc & SEG_DESC_P; if (p == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_SS, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_SS); } return 0; } uint8_t -check_seg_desc_priv_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip) +check_seg_desc_priv_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc) { - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { return 1; } // check for segment privilege violations if ((desc & SEG_DESC_S) == 0) { - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } uint16_t d = (desc & SEG_DESC_DC) >> 43; uint16_t r = (desc & SEG_DESC_R) >> 40; if ((d | r) == 1) { - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } if ((d == 0) || ((desc & SEG_DESC_C) == 0)) { @@ -231,12 +230,12 @@ check_seg_desc_priv_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t uint16_t dpl = (desc & SEG_DESC_DPL) >> 45; uint16_t rpl = sel & 3; if ((rpl > dpl) && (cpl > dpl)) { - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } } if ((desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } return 0; @@ -256,25 +255,25 @@ write_eflags_helper(cpu_t *cpu, uint32_t eflags, uint32_t mask) } uint8_t -read_stack_ptr_from_tss_helper(cpu_t *cpu, uint32_t dpl, uint32_t &esp, uint16_t &ss, uint32_t eip, uint8_t is_priv) +read_stack_ptr_from_tss_helper(cpu_t *cpu, uint32_t dpl, uint32_t &esp, uint16_t &ss, uint8_t is_priv) { uint32_t type = (cpu->cpu_ctx.regs.tr_hidden.flags & SEG_HIDDEN_TSS_TY) >> 11; uint32_t idx = (2 << type) + dpl * (4 << type); if ((idx + (4 << type) - 1) > cpu->cpu_ctx.regs.tr_hidden.limit) { - return raise_exp_helper(cpu, cpu->cpu_ctx.regs.tr & 0xFFFC, EXP_TS, eip); + return raise_exp_helper(cpu, cpu->cpu_ctx.regs.tr & 0xFFFC, EXP_TS); } if (type) { - esp = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx, eip, is_priv); - ss = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx + 4, eip, is_priv); + esp = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx, is_priv); + ss = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx + 4, is_priv); } else { - esp = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx, eip, is_priv); - ss = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx + 2, eip, is_priv); + esp = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx, is_priv); + ss = mem_read_helper(&cpu->cpu_ctx, cpu->cpu_ctx.regs.tr_hidden.base + idx + 2, is_priv); } return 0; } -template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip); -template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip); +template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc); +template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc); diff --git a/lib86cpu/core/helpers.h b/lib86cpu/core/helpers.h index fde55b4..4d0f4e0 100644 --- a/lib86cpu/core/helpers.h +++ b/lib86cpu/core/helpers.h @@ -7,15 +7,15 @@ #include "memory_management.h" -uint32_t stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr, uint32_t eip); -void stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t &addr, uint32_t eip); -uint8_t raise_exp_helper(cpu_t *cpu, uint16_t code, uint16_t idx, uint32_t eip); -template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip); -void set_access_flg_seg_desc_helper(cpu_t *cpu, uint64_t desc, addr_t desc_addr, uint32_t eip); +uint32_t stack_pop_helper(cpu_t *cpu, uint32_t size_mode, uint32_t &addr); +void stack_push_helper(cpu_t *cpu, const uint32_t val, uint32_t size_mode, uint32_t &addr); +uint8_t raise_exp_helper(cpu_t *cpu, uint16_t code, uint16_t idx); +template uint8_t read_seg_desc_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc); +void set_access_flg_seg_desc_helper(cpu_t *cpu, uint64_t desc, addr_t desc_addr); uint32_t read_seg_desc_base_helper(cpu_t *cpu, uint64_t desc); uint32_t read_seg_desc_flags_helper(cpu_t *cpu, uint64_t desc); uint32_t read_seg_desc_limit_helper(cpu_t *cpu, uint64_t desc); -uint8_t check_ss_desc_priv_helper(cpu_t *cpu, uint16_t sel, uint16_t *cs, addr_t &desc_addr, uint64_t &desc, uint32_t eip); -uint8_t check_seg_desc_priv_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc, uint32_t eip); +uint8_t check_ss_desc_priv_helper(cpu_t *cpu, uint16_t sel, uint16_t *cs, addr_t &desc_addr, uint64_t &desc); +uint8_t check_seg_desc_priv_helper(cpu_t *cpu, uint16_t sel, addr_t &desc_addr, uint64_t &desc); void write_eflags_helper(cpu_t *cpu, uint32_t eflags, uint32_t mask); -uint8_t read_stack_ptr_from_tss_helper(cpu_t *cpu, uint32_t dpl, uint32_t &esp, uint16_t &ss, uint32_t eip, uint8_t is_priv = 0); +uint8_t read_stack_ptr_from_tss_helper(cpu_t *cpu, uint32_t dpl, uint32_t &esp, uint16_t &ss, uint8_t is_priv = 0); diff --git a/lib86cpu/core/instructions.cpp b/lib86cpu/core/instructions.cpp index 487a8e3..db06dc9 100644 --- a/lib86cpu/core/instructions.cpp +++ b/lib86cpu/core/instructions.cpp @@ -6,6 +6,7 @@ #include "instructions.h" #include "debugger.h" +#include "clock.h" template @@ -131,7 +132,7 @@ validate_seg_helper(cpu_t *cpu) } template -uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) +uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode) { cpu_t *cpu = cpu_ctx->cpu; uint32_t cpl = cpu->cpu_ctx.hflags & HFLG_CPL; @@ -145,31 +146,31 @@ uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) if (eflags & VM_MASK) { // return from vm86 mode if (((eflags & IOPL_MASK) >> 12) == 3) { - iret_real_helper(cpu_ctx, size_mode, eip); + iret_real_helper(cpu_ctx, size_mode); return 0; } - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } if (eflags & NT_MASK) { LIB86CPU_ABORT_msg("Task returns are not supported in iret instructions yet"); } - ret_eip = stack_pop_helper(cpu, size_mode, esp, eip); - cs = stack_pop_helper(cpu, size_mode, esp, eip); - temp_eflags = stack_pop_helper(cpu, size_mode, esp, eip); + ret_eip = stack_pop_helper(cpu, size_mode, esp); + cs = stack_pop_helper(cpu, size_mode, esp); + temp_eflags = stack_pop_helper(cpu, size_mode, esp); if (temp_eflags & VM_MASK) { // return to vm86 mode uint32_t new_esp, ss, es, ds, fs, gs; - new_esp = stack_pop_helper(cpu, SIZE32, esp, eip); - ss = stack_pop_helper(cpu, SIZE32, esp, eip); - es = stack_pop_helper(cpu, SIZE32, esp, eip); - ds = stack_pop_helper(cpu, SIZE32, esp, eip); - fs = stack_pop_helper(cpu, SIZE32, esp, eip); - gs = stack_pop_helper(cpu, SIZE32, esp, eip); + new_esp = stack_pop_helper(cpu, SIZE32, esp); + ss = stack_pop_helper(cpu, SIZE32, esp); + es = stack_pop_helper(cpu, SIZE32, esp); + ds = stack_pop_helper(cpu, SIZE32, esp); + fs = stack_pop_helper(cpu, SIZE32, esp); + gs = stack_pop_helper(cpu, SIZE32, esp); write_eflags_helper(cpu, temp_eflags, TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); @@ -203,55 +204,55 @@ uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) } } else { - ret_eip = stack_pop_helper(cpu, size_mode, esp, eip); - cs = stack_pop_helper(cpu, size_mode, esp, eip); + ret_eip = stack_pop_helper(cpu, size_mode, esp); + cs = stack_pop_helper(cpu, size_mode, esp); } if ((cs >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } addr_t desc_addr; uint64_t cs_desc; - if (read_seg_desc_helper(cpu, cs, desc_addr, cs_desc, eip)) { + if (read_seg_desc_helper(cpu, cs, desc_addr, cs_desc)) { return 1; } uint32_t s = (cs_desc & SEG_DESC_S) >> 44; // !(sys desc) uint32_t d = (cs_desc & SEG_DESC_DC) >> 42; // !(data desc) if ((s | d) != 3) { - return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP); } uint32_t rpl = cs & 3; if (rpl < cpl) { // rpl < cpl - return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP); } uint64_t c = cs_desc & SEG_DESC_C; uint32_t dpl = (cs_desc & SEG_DESC_DPL) >> 45; if (c && (dpl > rpl)) { // conf && dpl > rpl - return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, cs & 0xFFFC, EXP_GP); } uint64_t p = cs_desc & SEG_DESC_P; if (p == 0) { // segment not present - return raise_exp_helper(cpu, cs & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, cs & 0xFFFC, EXP_NP); } if (rpl > cpl) { // less privileged - uint32_t ret_esp = stack_pop_helper(cpu, size_mode, esp, eip); - uint16_t ss = stack_pop_helper(cpu, size_mode, esp, eip); + uint32_t ret_esp = stack_pop_helper(cpu, size_mode, esp); + uint16_t ss = stack_pop_helper(cpu, size_mode, esp); addr_t ss_desc_addr; uint64_t ss_desc; - if (check_ss_desc_priv_helper(cpu, ss, &cs, ss_desc_addr, ss_desc, eip)) { + if (check_ss_desc_priv_helper(cpu, ss, &cs, ss_desc_addr, ss_desc)) { return 1; } - set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr, eip); - set_access_flg_seg_desc_helper(cpu, cs_desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr); + set_access_flg_seg_desc_helper(cpu, cs_desc, desc_addr); write_seg_reg_helper(cpu, ss, read_seg_desc_base_helper(cpu, ss_desc), read_seg_desc_limit_helper(cpu, ss_desc), read_seg_desc_flags_helper(cpu, ss_desc)); write_seg_reg_helper(cpu, cs, read_seg_desc_base_helper(cpu, cs_desc), read_seg_desc_limit_helper(cpu, cs_desc), read_seg_desc_flags_helper(cpu, cs_desc)); @@ -272,7 +273,7 @@ uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) else { // same privilege - set_access_flg_seg_desc_helper(cpu, cs_desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, cs_desc, desc_addr); uint32_t stack_mask; if (cpu->cpu_ctx.hflags & HFLG_SS32) { @@ -294,15 +295,15 @@ uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) } void -iret_real_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) +iret_real_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode) { cpu_t *cpu = cpu_ctx->cpu; uint32_t esp = cpu->cpu_ctx.regs.esp; uint32_t eflags_mask; - uint32_t ret_eip = stack_pop_helper(cpu, size_mode, esp, eip); - uint16_t cs = stack_pop_helper(cpu, size_mode, esp, eip); - uint32_t temp_eflags = stack_pop_helper(cpu, size_mode, esp, eip); + uint32_t ret_eip = stack_pop_helper(cpu, size_mode, esp); + uint16_t cs = stack_pop_helper(cpu, size_mode, esp); + uint32_t temp_eflags = stack_pop_helper(cpu, size_mode, esp); if (cpu->cpu_ctx.regs.eflags & VM_MASK) { // vm86 mode masks iopl, real mode doesn't @@ -325,18 +326,18 @@ iret_real_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip) } uint32_t -ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp_eip, uint32_t eip) +ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp_eip) { cpu_t *cpu = cpu_ctx->cpu; uint16_t cpl = cpu->cpu_ctx.hflags & HFLG_CPL; if ((sel >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } addr_t desc_addr; uint64_t desc; - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { return 1; } @@ -344,7 +345,7 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp // non-system desc if ((desc & SEG_DESC_DC) == 0) { // !(data desc) - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } uint16_t dpl = (desc & SEG_DESC_DPL) >> 45; @@ -353,7 +354,7 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp // conforming if (dpl > cpl) { // dpl > cpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } } else { @@ -361,17 +362,17 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp uint16_t rpl = sel & 3; if ((rpl > cpl) || (dpl != cpl)) { // rpl > cpl || dpl != cpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } } // common path for conf/non-conf if ((desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } - set_access_flg_seg_desc_helper(cpu, desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, desc, desc_addr); write_seg_reg_helper(cpu, (sel & 0xFFFC) | cpl, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); cpu_ctx->regs.eip = size_mode == SIZE16 ? jmp_eip & 0xFFFF : jmp_eip; } @@ -391,7 +392,7 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp break; default: - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } sys_ty >>= 3; @@ -399,19 +400,19 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp uint16_t rpl = sel & 3; if ((dpl < cpl) || (rpl > dpl)) { // dpl < cpl || rpl > dpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } if ((desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } uint16_t code_sel = (desc & 0xFFFF0000) >> 16; if ((code_sel >> 2) == 0) { // code_sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } - if (read_seg_desc_helper(cpu, code_sel, desc_addr, desc, eip)) { // read code desc pointed to by the call gate sel + if (read_seg_desc_helper(cpu, code_sel, desc_addr, desc)) { // read code desc pointed to by the call gate sel return 1; } @@ -419,14 +420,14 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp if (((((desc & SEG_DESC_S) >> 43) | ((desc & SEG_DESC_DC) >> 43)) != 3) || // !(code desc) || (conf && dpl > cpl) || (non-conf && dpl != cpl) ((desc & SEG_DESC_C) && (dpl > cpl)) || (((desc & SEG_DESC_C) == 0) && (dpl != cpl))) { - return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_GP); } if ((desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_NP); } - set_access_flg_seg_desc_helper(cpu, desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, desc, desc_addr); write_seg_reg_helper(cpu, (sel & 0xFFFC) | cpl, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); if (sys_ty == 0) { @@ -439,18 +440,18 @@ ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp } uint32_t -lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t size_mode, uint32_t ret_eip, uint32_t eip) +lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t size_mode, uint32_t ret_eip) { cpu_t *cpu = cpu_ctx->cpu; uint16_t cpl = cpu->cpu_ctx.hflags & HFLG_CPL; if ((sel >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } addr_t cs_desc_addr; uint64_t cs_desc; - if (read_seg_desc_helper(cpu, sel, cs_desc_addr, cs_desc, eip)) { + if (read_seg_desc_helper(cpu, sel, cs_desc_addr, cs_desc)) { return 1; } @@ -458,7 +459,7 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz // non-system desc if ((cs_desc & SEG_DESC_DC) == 0) { // !(data desc) - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } uint16_t dpl = (cs_desc & SEG_DESC_DPL) >> 45; @@ -467,7 +468,7 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz // conforming if (dpl > cpl) { // dpl > cpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } } else { @@ -475,20 +476,20 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz uint16_t rpl = sel & 3; if ((rpl > cpl) || (dpl != cpl)) { // rpl > cpl || dpl != cpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } } // common path for conf/non-conf if ((cs_desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } uint32_t esp = cpu->cpu_ctx.regs.esp; - stack_push_helper(cpu, cpu_ctx->regs.cs, size_mode, esp, eip); - stack_push_helper(cpu, ret_eip, size_mode, esp, eip); - set_access_flg_seg_desc_helper(cpu, cs_desc, cs_desc_addr, eip); + stack_push_helper(cpu, cpu_ctx->regs.cs, size_mode, esp); + stack_push_helper(cpu, ret_eip, size_mode, esp); + set_access_flg_seg_desc_helper(cpu, cs_desc, cs_desc_addr); write_seg_reg_helper(cpu, (sel & 0xFFFC) | cpl, read_seg_desc_base_helper(cpu, cs_desc), read_seg_desc_limit_helper(cpu, cs_desc), read_seg_desc_flags_helper(cpu, cs_desc)); cpu->cpu_ctx.regs.esp = esp; cpu_ctx->regs.eip = call_eip; // call_eip is already appropriately masked by the caller @@ -509,7 +510,7 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz break; default: - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } sys_ty >>= 3; @@ -517,34 +518,34 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz uint16_t rpl = sel & 3; if ((dpl < cpl) || (rpl > dpl)) { // dpl < cpl || rpl > dpl - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } if ((cs_desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } uint32_t num_param = (cs_desc >> 32) & 0x1F; uint32_t new_eip = ((cs_desc & 0xFFFF000000000000) >> 32) | (cs_desc & 0xFFFF); uint16_t code_sel = (cs_desc & 0xFFFF0000) >> 16; if ((code_sel >> 2) == 0) { // code_sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } addr_t code_desc_addr; uint64_t code_desc; - if (read_seg_desc_helper(cpu, code_sel, code_desc_addr, code_desc, eip)) { // read code desc pointed to by the call gate sel + if (read_seg_desc_helper(cpu, code_sel, code_desc_addr, code_desc)) { // read code desc pointed to by the call gate sel return 1; } dpl = (code_desc & SEG_DESC_DPL) >> 45; if (((((code_desc & SEG_DESC_S) >> 43) | ((code_desc & SEG_DESC_DC) >> 43)) != 3) || // !(code desc) || dpl > cpl (dpl > cpl)) { - return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_GP); } if ((code_desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, code_sel & 0xFFFC, EXP_NP); } uint32_t eip_mask, esp; @@ -552,17 +553,17 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz // more privileged uint16_t ss; - if (read_stack_ptr_from_tss_helper(cpu, dpl, esp, ss, eip)) { + if (read_stack_ptr_from_tss_helper(cpu, dpl, esp, ss)) { return 1; } if ((ss >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, ss & 0xFFFC, EXP_TS, eip); + return raise_exp_helper(cpu, ss & 0xFFFC, EXP_TS); } addr_t ss_desc_addr; uint64_t ss_desc; - if (read_seg_desc_helper(cpu, ss, ss_desc_addr, ss_desc, eip)) { // read data (stack) desc pointed to by ss + if (read_seg_desc_helper(cpu, ss, ss_desc_addr, ss_desc)) { // read data (stack) desc pointed to by ss return 1; } @@ -572,11 +573,11 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz uint16_t dpl_ss = (ss_desc & SEG_DESC_DPL) >> 42; // dpl(ss) == dpl(code) uint16_t rpl_ss = (ss & 3) << 5; // rpl(ss) == dpl(code) if (((((s | d) | w) | dpl_ss) | rpl_ss) ^ ((5 | (dpl << 3)) | (dpl << 5))) { - return raise_exp_helper(cpu, ss & 0xFFFC, EXP_TS, eip); + return raise_exp_helper(cpu, ss & 0xFFFC, EXP_TS); } if ((ss_desc & SEG_DESC_P) == 0) { // segment not present - return raise_exp_helper(cpu, ss & 0xFFFC, EXP_SS, eip); + return raise_exp_helper(cpu, ss & 0xFFFC, EXP_SS); } uint32_t stack_mask = ss_desc & SEG_DESC_DB ? 0xFFFFFFFF : 0xFFFF; @@ -585,39 +586,39 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz if (sys_ty) { // 32 bit push eip_mask = 0xFFFFFFFF; esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, eip, 2); // push ss + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, 2); // push ss esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, eip, 2); // push esp + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, 2); // push esp while (i >= 0) { - uint32_t param32 = mem_read_helper(cpu_ctx, cpu_ctx->regs.ss_hidden.base + ((cpu_ctx->regs.esp + i * 4) & stack_mask), eip, 2); // read param from src stack + uint32_t param32 = mem_read_helper(cpu_ctx, cpu_ctx->regs.ss_hidden.base + ((cpu_ctx->regs.esp + i * 4) & stack_mask), 2); // read param from src stack esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), param32, eip, 2); // push param to dst stack + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), param32, 2); // push param to dst stack --i; } esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 2); // push cs + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 2); // push cs esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, eip, 2); // push eip + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, 2); // push eip } else { // 16 bit push eip_mask = 0xFFFF; esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, eip, 2); // push ss + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, 2); // push ss esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, eip, 2); // push sp + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, 2); // push sp while (i >= 0) { - uint16_t param16 = mem_read_helper(cpu_ctx, cpu_ctx->regs.ss_hidden.base + ((cpu_ctx->regs.esp + i * 2) & stack_mask), eip, 2); // read param from src stack + uint16_t param16 = mem_read_helper(cpu_ctx, cpu_ctx->regs.ss_hidden.base + ((cpu_ctx->regs.esp + i * 2) & stack_mask), 2); // read param from src stack esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), param16, eip, 2); // push param to dst stack + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), param16, 2); // push param to dst stack --i; } esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 2); // push cs + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 2); // push cs esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, eip, 2); // push ip + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, 2); // push ip } - set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr); write_seg_reg_helper(cpu, (ss & 0xFFFC) | dpl, read_seg_desc_base_helper(cpu, ss_desc), read_seg_desc_limit_helper(cpu, ss_desc), read_seg_desc_flags_helper(cpu, ss_desc)); } else { @@ -629,20 +630,20 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz if (sys_ty) { // 32 bit push eip_mask = 0xFFFFFFFF; esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 0); // push cs + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 0); // push cs esp -= 4; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, eip, 0); // push eip + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, 0); // push eip } else { // 16 bit push eip_mask = 0xFFFF; esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 0); // push cs + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 0); // push cs esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, eip, 0); // push ip + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), ret_eip, 0); // push ip } } - set_access_flg_seg_desc_helper(cpu, code_desc, code_desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, code_desc, code_desc_addr); write_seg_reg_helper(cpu, (code_sel & 0xFFFC) | dpl, read_seg_desc_base_helper(cpu, code_desc), read_seg_desc_limit_helper(cpu, code_desc), read_seg_desc_flags_helper(cpu, code_desc)); cpu->cpu_ctx.regs.esp = esp; cpu_ctx->regs.eip = (new_eip & ~eip_mask) | (new_eip & eip_mask); @@ -652,18 +653,18 @@ lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t siz } template -uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) +uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel) { cpu_t *cpu = cpu_ctx->cpu; if constexpr (reg == SS_idx) { addr_t desc_addr; uint64_t desc; - if (check_ss_desc_priv_helper(cpu, sel, nullptr, desc_addr, desc, eip)) { + if (check_ss_desc_priv_helper(cpu, sel, nullptr, desc_addr, desc)) { return 1; } - set_access_flg_seg_desc_helper(cpu, desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, desc, desc_addr); write_seg_reg_helper(cpu, sel, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); } else { @@ -674,11 +675,11 @@ uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) addr_t desc_addr; uint64_t desc; - if (check_seg_desc_priv_helper(cpu, sel, desc_addr, desc, eip)) { + if (check_seg_desc_priv_helper(cpu, sel, desc_addr, desc)) { return 1; } - set_access_flg_seg_desc_helper(cpu, desc, desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, desc, desc_addr); write_seg_reg_helper(cpu, sel /* & rpl?? */, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); } @@ -686,7 +687,7 @@ uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) } template -void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) +void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel) { cpu_t *cpu = cpu_ctx->cpu; @@ -697,7 +698,7 @@ void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) addr_t desc_addr; uint64_t desc; - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { // gdt or ldt limit exceeded + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { // gdt or ldt limit exceeded // NOTE: ignore possible gp exp raised by read_seg_desc_helper cpu_ctx->lazy_eflags.result |= 0x100; return; @@ -732,38 +733,38 @@ void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) } uint32_t -ltr_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) +ltr_helper(cpu_ctx_t *cpu_ctx, uint16_t sel) { cpu_t *cpu = cpu_ctx->cpu; if ((sel >> 2) == 0) { // sel == NULL - return raise_exp_helper(cpu, 0, EXP_GP, eip); + return raise_exp_helper(cpu, 0, EXP_GP); } addr_t desc_addr; uint64_t desc; - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { return 1; } uint8_t s = (desc & SEG_DESC_S) >> 40; uint8_t ty = (desc & SEG_DESC_TY) >> 40; if (!(((s | ty) == SEG_DESC_TSS16AV) || ((s | ty) == SEG_DESC_TSS32AV))) { // must be an available tss - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } if ((desc & SEG_DESC_P) == 0) { // tss not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } - mem_write_helper(cpu_ctx, desc_addr, desc | SEG_DESC_BY, eip, 2); + mem_write_helper(cpu_ctx, desc_addr, desc | SEG_DESC_BY, 2); write_seg_reg_helper(cpu, sel, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); return 0; } uint32_t -lldt_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) +lldt_helper(cpu_ctx_t *cpu_ctx, uint16_t sel) { cpu_t *cpu = cpu_ctx->cpu; @@ -774,18 +775,18 @@ lldt_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip) addr_t desc_addr; uint64_t desc; - if (read_seg_desc_helper(cpu, sel, desc_addr, desc, eip)) { + if (read_seg_desc_helper(cpu, sel, desc_addr, desc)) { return 1; } uint8_t s = (desc & SEG_DESC_S) >> 40; uint8_t ty = (desc & SEG_DESC_TY) >> 40; if ((s | ty) != SEG_DESC_LDT) { // must be ldt type - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_GP); } if ((desc & SEG_DESC_P) == 0) { // ldt not present - return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP, eip); + return raise_exp_helper(cpu, sel & 0xFFFC, EXP_NP); } write_seg_reg_helper(cpu, sel, read_seg_desc_base_helper(cpu, desc), read_seg_desc_limit_helper(cpu, desc), read_seg_desc_flags_helper(cpu, desc)); @@ -1251,16 +1252,16 @@ msr_write_helper(cpu_ctx_t *cpu_ctx) } uint32_t -divd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip) +divd_helper(cpu_ctx_t *cpu_ctx, uint32_t d) { uint64_t D = (static_cast(cpu_ctx->regs.eax)) | (static_cast(cpu_ctx->regs.edx) << 32); if (d == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } uint64_t q = (D / d); uint64_t r = (D % d); if (q > 0xFFFFFFFF) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } cpu_ctx->regs.eax = q; cpu_ctx->regs.edx = r; @@ -1269,16 +1270,16 @@ divd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip) } uint32_t -divw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip) +divw_helper(cpu_ctx_t *cpu_ctx, uint16_t d) { uint32_t D = (cpu_ctx->regs.eax & 0xFFFF) | ((cpu_ctx->regs.edx & 0xFFFF) << 16); if (d == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } uint32_t q = (D / d); uint32_t r = (D % d); if (q > 0xFFFF) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } q &= 0xFFFF; r &= 0xFFFF; @@ -1289,16 +1290,16 @@ divw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip) } uint32_t -divb_helper(cpu_ctx_t *cpu_ctx, uint8_t d, uint32_t eip) +divb_helper(cpu_ctx_t *cpu_ctx, uint8_t d) { uint16_t D = cpu_ctx->regs.eax & 0xFFFF; if (d == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } uint16_t q = (D / d); uint16_t r = (D % d); if (q > 0xFF) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } q &= 0xFF; r &= 0xFF; @@ -1308,17 +1309,17 @@ divb_helper(cpu_ctx_t *cpu_ctx, uint8_t d, uint32_t eip) } uint32_t -idivd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip) +idivd_helper(cpu_ctx_t *cpu_ctx, uint32_t d) { int64_t D = static_cast((static_cast(cpu_ctx->regs.eax)) | (static_cast(cpu_ctx->regs.edx) << 32)); int32_t d0 = static_cast(d); if (d0 == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } int64_t q = (D / d0); int64_t r = (D % d0); if (q != static_cast(q)) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } cpu_ctx->regs.eax = q; cpu_ctx->regs.edx = r; @@ -1327,17 +1328,17 @@ idivd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip) } uint32_t -idivw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip) +idivw_helper(cpu_ctx_t *cpu_ctx, uint16_t d) { int32_t D = static_cast((cpu_ctx->regs.eax & 0xFFFF) | ((cpu_ctx->regs.edx & 0xFFFF) << 16)); int16_t d0 = static_cast(d); if (d0 == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } int32_t q = (D / d0); int32_t r = (D % d0); if (q != static_cast(q)) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } q &= 0xFFFF; r &= 0xFFFF; @@ -1348,17 +1349,17 @@ idivw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip) } uint32_t -idivb_helper(cpu_ctx_t *cpu_ctx, uint8_t d, uint32_t eip) +idivb_helper(cpu_ctx_t *cpu_ctx, uint8_t d) { int16_t D = static_cast(cpu_ctx->regs.eax & 0xFFFF); int8_t d0 = static_cast(d); if (d0 == 0) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } int16_t q = (D / d0); int16_t r = (D % d0); if (q != static_cast(q)) { - return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE, eip); + return raise_exp_helper(cpu_ctx->cpu, 0, EXP_DE); } q &= 0xFF; r &= 0xFF; @@ -1401,39 +1402,45 @@ cpuid_helper(cpu_ctx_t *cpu_ctx) } } -uint32_t -hlt_helper(cpu_ctx_t *cpu_ctx) +template +void hlt_helper(cpu_ctx_t *cpu_ctx) { - uint32_t int_flg = cpu_ctx->cpu->read_int_fn(cpu_ctx); - if (int_flg & CPU_ABORT_INT) { - // abort interrupts are checked so that the client can still terminate the emulation with cpu_exit, in the case hw interrupts were - // masked by the guest or not sent by the client - throw lc86_exp_abort("Received abort signal, terminating the emulation", lc86_status::success); - } + while (true) { + uint32_t int_ret = cpu_do_int(cpu_ctx, cpu_ctx->cpu->read_int_fn(cpu_ctx)); + uint32_t timeout_ret = should_check_timeout ? cpu_timer_helper(cpu_ctx) : 0; + uint32_t ret = int_ret | timeout_ret; - if (((int_flg & CPU_HW_INT) | (cpu_ctx->regs.eflags & IF_MASK) | (cpu_ctx->hflags & HFLG_INHIBIT_INT)) == (CPU_HW_INT | IF_MASK)) { - cpu_ctx->exp_info.exp_data.fault_addr = 0; - cpu_ctx->exp_info.exp_data.code = 0; - cpu_ctx->exp_info.exp_data.idx = cpu_ctx->cpu->int_data.first(cpu_ctx->cpu->int_data.second); - cpu_ctx->exp_info.exp_data.eip = cpu_ctx->regs.eip; - cpu_raise_exception(cpu_ctx); - return 1; - } + if ((ret & (CPU_HW_INT | CPU_TIMEOUT_INT)) == CPU_NO_INT) { + // either nothing changed or it's not a hw int, keep looping in both cases + continue; + } - return 0; + if (ret & CPU_HW_INT) { + // hw int, exit the loop and clear the halted state + cpu_ctx->cpu->is_halted = 0; + return; + } + + // timeout, exit the loop and set the halted state + cpu_ctx->cpu->is_halted = 1; + return; + } } -template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip); -template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip); +template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode); +template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode); + +template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); -template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); +template JIT_API void hlt_helper(cpu_ctx_t *cpu_ctx); +template JIT_API void hlt_helper(cpu_ctx_t *cpu_ctx); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); template JIT_API uint32_t update_crN_helper<0>(cpu_ctx_t* cpu_ctx, uint32_t new_cr, uint8_t idx); template JIT_API uint32_t update_crN_helper<1>(cpu_ctx_t* cpu_ctx, uint32_t new_cr, uint8_t idx); diff --git a/lib86cpu/core/instructions.h b/lib86cpu/core/instructions.h index 1d5e672..84030d5 100644 --- a/lib86cpu/core/instructions.h +++ b/lib86cpu/core/instructions.h @@ -7,26 +7,26 @@ #include "helpers.h" -template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip); -JIT_API void iret_real_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode, uint32_t eip); -JIT_API uint32_t ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp_eip, uint32_t eip); -JIT_API uint32_t lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t size_mode, uint32_t ret_eip, uint32_t eip); -template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -JIT_API uint32_t ltr_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); -JIT_API uint32_t lldt_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t eip); +template JIT_API uint32_t lret_pe_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode); +JIT_API void iret_real_helper(cpu_ctx_t *cpu_ctx, uint8_t size_mode); +JIT_API uint32_t ljmp_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint8_t size_mode, uint32_t jmp_eip); +JIT_API uint32_t lcall_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel, uint32_t call_eip, uint8_t size_mode, uint32_t ret_eip); +template JIT_API void verrw_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +template JIT_API uint32_t mov_sel_pe_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +JIT_API uint32_t ltr_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); +JIT_API uint32_t lldt_helper(cpu_ctx_t *cpu_ctx, uint16_t sel); template JIT_API uint32_t update_crN_helper(cpu_ctx_t *cpu_ctx, uint32_t new_cr, uint8_t idx); JIT_API void update_drN_helper(cpu_ctx_t *cpu_ctx, uint8_t dr_idx, uint32_t new_dr); -JIT_API uint32_t divd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip); -JIT_API uint32_t divw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip); -JIT_API uint32_t divb_helper(cpu_ctx_t *cpu_ctx, uint8_t d, uint32_t eip); -JIT_API uint32_t idivd_helper(cpu_ctx_t *cpu_ctx, uint32_t d, uint32_t eip); -JIT_API uint32_t idivw_helper(cpu_ctx_t *cpu_ctx, uint16_t d, uint32_t eip); -JIT_API uint32_t idivb_helper(cpu_ctx_t *cpu_ctx, uint8_t d, uint32_t eip); +JIT_API uint32_t divd_helper(cpu_ctx_t *cpu_ctx, uint32_t d); +JIT_API uint32_t divw_helper(cpu_ctx_t *cpu_ctx, uint16_t d); +JIT_API uint32_t divb_helper(cpu_ctx_t *cpu_ctx, uint8_t d); +JIT_API uint32_t idivd_helper(cpu_ctx_t *cpu_ctx, uint32_t d); +JIT_API uint32_t idivw_helper(cpu_ctx_t *cpu_ctx, uint16_t d); +JIT_API uint32_t idivb_helper(cpu_ctx_t *cpu_ctx, uint8_t d); JIT_API void cpuid_helper(cpu_ctx_t *cpu_ctx); JIT_API void cpu_rdtsc_helper(cpu_ctx_t *cpu_ctx); JIT_API uint32_t msr_read_helper(cpu_ctx_t *cpu_ctx); JIT_API uint32_t msr_write_helper(cpu_ctx_t *cpu_ctx); -JIT_API uint32_t hlt_helper(cpu_ctx_t *cpu_ctx); -JIT_API void fxsave_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip); -JIT_API uint32_t fxrstor_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip); +template JIT_API void hlt_helper(cpu_ctx_t *cpu_ctx); +JIT_API void fxsave_helper(cpu_ctx_t *cpu_ctx, addr_t addr); +JIT_API uint32_t fxrstor_helper(cpu_ctx_t *cpu_ctx, addr_t addr); diff --git a/lib86cpu/core/internal.h b/lib86cpu/core/internal.h index 21b5162..a1827ba 100644 --- a/lib86cpu/core/internal.h +++ b/lib86cpu/core/internal.h @@ -13,7 +13,7 @@ template -void tc_invalidate(cpu_ctx_t * cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size = 0, [[maybe_unused]] uint32_t eip = 0); +void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size = 0); template void tc_should_clear_cache_and_tlb(cpu_t *cpu, addr_t start, addr_t end); void tc_cache_clear(cpu_t *cpu); @@ -22,7 +22,6 @@ addr_t get_pc(cpu_ctx_t *cpu_ctx); template JIT_API translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx); JIT_API uint32_t cpu_do_int(cpu_ctx_t *cpu_ctx, uint32_t int_flg); -void halt_loop(cpu_t *cpu); JIT_API void tlb_invalidate_(cpu_ctx_t *cpu_ctx, addr_t addr); @@ -46,7 +45,6 @@ JIT_API void tlb_invalidate_(cpu_ctx_t *cpu_ctx, addr_t addr); #define TRAMP_SHIFT 6 #define CR4_OSFXSR_SHIFT 9 #define CR0_TS_SHIFT 10 -#define INHIBIT_INT_SHIFT 14 #define CR0_MP_SHIFT 15 #define CR4_VME_SHIFT 19 #define CR4_PVI_SHIFT 20 @@ -57,27 +55,29 @@ JIT_API void tlb_invalidate_(cpu_ctx_t *cpu_ctx, addr_t addr); #define HFLG_PE_MODE (1 << PE_MODE_SHIFT) #define HFLG_CR0_EM (1 << CR0_EM_SHIFT) #define HFLG_TRAMP (1 << TRAMP_SHIFT) -#define HFLG_INHIBIT_INT (1 << INHIBIT_INT_SHIFT) #define HFLG_CR0_MP (1 << CR0_MP_SHIFT) #define HFLG_CR0_TS (1 << CR0_TS_SHIFT) #define HFLG_CR4_OSFXSR (1 << CR4_OSFXSR_SHIFT) #define HFLG_CR4_VME (1 << CR4_VME_SHIFT) #define HFLG_CR4_PVI (1 << CR4_PVI_SHIFT) -#define HFLG_CONST (HFLG_CPL | HFLG_CS32 | HFLG_SS32 | HFLG_PE_MODE | HFLG_CR0_EM | HFLG_TRAMP | HFLG_INHIBIT_INT | HFLG_CR0_MP | HFLG_CR0_TS \ +#define HFLG_CONST (HFLG_CPL | HFLG_CS32 | HFLG_SS32 | HFLG_PE_MODE | HFLG_CR0_EM | HFLG_TRAMP | HFLG_CR0_MP | HFLG_CR0_TS \ | HFLG_CR4_OSFXSR | HFLG_CR4_VME | HFLG_CR4_PVI) -#define HFLG_SAVED_MASK (HFLG_CPL | HFLG_CS32 | HFLG_SS32 | HFLG_PE_MODE | HFLG_CR0_EM | HFLG_INHIBIT_INT | HFLG_CR0_MP | HFLG_CR0_TS | HFLG_CR4_OSFXSR | HFLG_CR4_VME | HFLG_CR4_PVI) +#define HFLG_SAVED_MASK (HFLG_CPL | HFLG_CS32 | HFLG_SS32 | HFLG_PE_MODE | HFLG_CR0_EM | HFLG_CR0_MP | HFLG_CR0_TS | HFLG_CR4_OSFXSR | HFLG_CR4_VME | HFLG_CR4_PVI) // cpu interrupt flags -#define CPU_NO_INT 0 -#define CPU_HW_INT (1 << 0) -#define CPU_ABORT_INT (1 << 1) -#define CPU_A20_INT (1 << 2) -#define CPU_REGION_INT (1 << 3) -#define CPU_TIMEOUT_INT (1 << 4) // never set, only returned as a status -#define CPU_SUSPEND_INT (1 << 5) -#define CPU_HANDLER_INT (1 << 6) -#define CPU_NON_HW_INT (CPU_ABORT_INT | CPU_A20_INT | CPU_REGION_INT | CPU_SUSPEND_INT | CPU_HANDLER_INT) -#define CPU_ALL_INT (CPU_HW_INT | CPU_NON_HW_INT) +#define CPU_NO_INT 0 +#define CPU_HALT_TC_INT (1 << 0) +#define CPU_ABORT_INT (1 << 1) +#define CPU_A20_INT (1 << 2) +#define CPU_REGION_INT (1 << 3) +#define CPU_TIMEOUT_INT (1 << 4) // never set, only returned as a status +#define CPU_SUSPEND_INT (1 << 5) +#define CPU_HANDLER_INT (1 << 6) +#define CPU_MASKED_INT (1 << 7) +#define CPU_DBG_TRAP_INT (1 << 8) +#define CPU_HW_INT (1 << 9) // must use the same bit position as if flag so that it can be ANDed with it +#define CPU_NON_HW_INT (CPU_A20_INT | CPU_REGION_INT | CPU_SUSPEND_INT | CPU_HANDLER_INT | CPU_MASKED_INT | CPU_DBG_TRAP_INT | CPU_HALT_TC_INT) +#define CPU_ALL_INT (CPU_HW_INT | CPU_NON_HW_INT | CPU_ABORT_INT | CPU_TIMEOUT_INT) // mmu flags #define MMU_IS_WRITE (1 << 0) @@ -88,11 +88,9 @@ JIT_API void tlb_invalidate_(cpu_ctx_t *cpu_ctx, addr_t addr); #define DISAS_FLG_CS32 (1 << 0) #define DISAS_FLG_SS32 (1 << 1) #define DISAS_FLG_PAGE_CROSS (1 << 2) -#define DISAS_FLG_INHIBIT_INT (1 << 3) #define DISAS_FLG_PAGE_CROSS_NEXT (1 << 5) #define DISAS_FLG_PE HFLG_PE_MODE // (1 << 4) #define DISAS_FLG_FETCH_FAULT DISAS_FLG_PAGE_CROSS // (1 << 2) -#define DISAS_FLG_DBG_FAULT DISAS_FLG_PAGE_CROSS // (1 << 2) #define DISAS_FLG_ONE_INSTR CPU_DISAS_ONE // (1 << 7) // tc struct flags/offsets diff --git a/lib86cpu/core/linux/clock.cpp b/lib86cpu/core/linux/clock.cpp index a1c09cd..7426dda 100644 --- a/lib86cpu/core/linux/clock.cpp +++ b/lib86cpu/core/linux/clock.cpp @@ -42,13 +42,9 @@ cpu_timer_set_now(cpu_t *cpu) uint32_t cpu_timer_helper(cpu_ctx_t *cpu_ctx) { - // always check for interrupts first. Otherwise, if the cpu consistently timeouts at every code block, it will never check for interrupts - if (uint32_t ret = cpu_do_int(cpu_ctx, cpu_ctx->cpu->read_int_fn(cpu_ctx))) { - return ret; - } - uint64_t elapsed_us = get_current_time() - cpu_ctx->cpu->timer.last_time; if (elapsed_us >= cpu_ctx->cpu->timer.timeout_time) { + cpu_ctx->cpu->exit_requested = true; return CPU_TIMEOUT_INT; } diff --git a/lib86cpu/core/memory_management.cpp b/lib86cpu/core/memory_management.cpp index 44a03d4..a1ee820 100644 --- a/lib86cpu/core/memory_management.cpp +++ b/lib86cpu/core/memory_management.cpp @@ -247,7 +247,7 @@ check_page_privilege(cpu_t *cpu, uint8_t pde_priv, uint8_t pte_priv) template static inline void -mmu_raise_page_fault(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_ctx, uint8_t err_code, uint8_t is_write, uint8_t cpu_lv) +mmu_raise_page_fault(cpu_t *cpu, addr_t addr, disas_ctx_t *disas_ctx, uint8_t err_code, uint8_t is_write, uint8_t cpu_lv) { // NOTE: the u/s bit of the error code should reflect the actual cpl even if the memory access is privileged if constexpr (raise_host_exp) { @@ -255,7 +255,6 @@ mmu_raise_page_fault(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_c cpu->cpu_ctx.exp_info.exp_data.fault_addr = addr; cpu->cpu_ctx.exp_info.exp_data.code = err_code | (is_write << 1) | cpu_lv; cpu->cpu_ctx.exp_info.exp_data.idx = EXP_PF; - cpu->cpu_ctx.exp_info.exp_data.eip = eip; throw host_exp_t::pf_exp; } else { @@ -263,13 +262,12 @@ mmu_raise_page_fault(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_c disas_ctx->exp_data.fault_addr = addr; disas_ctx->exp_data.code = err_code | (is_write << 1) | cpu_lv; disas_ctx->exp_data.idx = EXP_PF; - disas_ctx->exp_data.eip = eip; } } // NOTE: flags: bit 0 -> is_write, bit 1 -> is_priv, bit 4 -> set_code template -addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, disas_ctx_t *disas_ctx = nullptr) +addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, disas_ctx_t *disas_ctx = nullptr) { uint32_t is_write = flags & MMU_IS_WRITE; uint32_t set_code = flags & MMU_SET_CODE; @@ -293,7 +291,7 @@ addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, uint32_t pde = as_memory_dispatch_read(cpu, pde_addr, pde_region); if (!(pde & PTE_PRESENT)) { - mmu_raise_page_fault(cpu, addr, eip, disas_ctx, err_code, is_write, cpu_lv); + mmu_raise_page_fault(cpu, addr, disas_ctx, err_code, is_write, cpu_lv); return 0; } @@ -320,7 +318,7 @@ addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, } } err_code = 1; - mmu_raise_page_fault(cpu, addr, eip, disas_ctx, err_code, is_write, cpu_lv); + mmu_raise_page_fault(cpu, addr, disas_ctx, err_code, is_write, cpu_lv); return 0; } @@ -330,7 +328,7 @@ addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, uint32_t pte = as_memory_dispatch_read(cpu, pte_addr, pte_region); if (!(pte & PTE_PRESENT)) { - mmu_raise_page_fault(cpu, addr, eip, disas_ctx, err_code, is_write, cpu_lv); + mmu_raise_page_fault(cpu, addr, disas_ctx, err_code, is_write, cpu_lv); return 0; } @@ -362,7 +360,7 @@ addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, } err_code = 1; - mmu_raise_page_fault(cpu, addr, eip, disas_ctx, err_code, is_write, cpu_lv); + mmu_raise_page_fault(cpu, addr, disas_ctx, err_code, is_write, cpu_lv); return 0; } } @@ -370,13 +368,13 @@ addr_t mmu_translate_addr(cpu_t *cpu, addr_t addr, uint32_t flags, uint32_t eip, // These functions below only get the address of a single byte and thus do not need to check for a page boundary crossing. They return a corrected // physical address taking into account memory aliasing and region start offset addr_t -get_read_addr_slow(cpu_t* cpu, addr_t addr, uint8_t is_priv, uint32_t eip) +get_read_addr_slow(cpu_t* cpu, addr_t addr, uint8_t is_priv) { - return mmu_translate_addr(cpu, addr, is_priv, eip); + return mmu_translate_addr(cpu, addr, is_priv); } addr_t -get_read_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip) +get_read_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv) { uint32_t idx = (addr >> PAGE_SHIFT) & DTLB_IDX_MASK; uint64_t mem_access = tlb_access[0][(cpu->cpu_ctx.hflags & HFLG_CPL) >> is_priv]; @@ -388,19 +386,19 @@ get_read_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip) } } - return get_read_addr_slow(cpu, addr, is_priv, eip); + return get_read_addr_slow(cpu, addr, is_priv); } addr_t -get_write_addr_slow(cpu_t* cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool* is_code) +get_write_addr_slow(cpu_t* cpu, addr_t addr, uint8_t is_priv, bool* is_code) { - addr_t phys_addr = mmu_translate_addr(cpu, addr, MMU_IS_WRITE | is_priv, eip); + addr_t phys_addr = mmu_translate_addr(cpu, addr, MMU_IS_WRITE | is_priv); *is_code = cpu->smc[phys_addr >> PAGE_SHIFT]; return phys_addr; } addr_t -get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool *is_code) +get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, bool *is_code) { // this also needs to check for the dirty flag, to catch the case where the first access to the page is a read and then a write happens, so that // we give the mmu the chance to set the dirty flag in the pte @@ -413,7 +411,7 @@ get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool *is_ if (((cpu->dtlb[idx][i].entry & mem_access) ^ tag) == 0) { if (!(cpu->dtlb[idx][i].entry & TLB_DIRTY)) { cpu->dtlb[idx][i].entry |= TLB_DIRTY; - mmu_translate_addr(cpu, addr, MMU_IS_WRITE | is_priv, eip); + mmu_translate_addr(cpu, addr, MMU_IS_WRITE | is_priv); } addr_t phys_addr = (cpu->dtlb[idx][i].entry & ~PAGE_MASK) | (addr & PAGE_MASK); *is_code = cpu->smc[phys_addr >> PAGE_SHIFT]; @@ -421,11 +419,11 @@ get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool *is_ } } - return get_write_addr_slow(cpu, addr, MMU_IS_WRITE | is_priv, eip, is_code); + return get_write_addr_slow(cpu, addr, MMU_IS_WRITE | is_priv, is_code); } addr_t -get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip) +get_code_addr(cpu_t *cpu, addr_t addr) { // this is only used for ram fetching, so we don't need to check for privileged accesses @@ -439,11 +437,11 @@ get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip) } } - return mmu_translate_addr(cpu, addr, MMU_SET_CODE, eip); + return mmu_translate_addr(cpu, addr, MMU_SET_CODE); } template -addr_t get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_ctx) +addr_t get_code_addr(cpu_t *cpu, addr_t addr, disas_ctx_t *disas_ctx) { // overloaded get_code_addr that does not throw host exceptions, used in cpu_translate and by the debugger // NOTE: the debugger should not set the smc, since it doesn't execute the instructions @@ -458,7 +456,7 @@ addr_t get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_c } } - return mmu_translate_addr(cpu, addr, set_smc ? MMU_SET_CODE : 0, eip, disas_ctx); + return mmu_translate_addr(cpu, addr, set_smc ? MMU_SET_CODE : 0, disas_ctx); } uint64_t @@ -496,7 +494,7 @@ ram_fetch(cpu_t *cpu, disas_ctx_t *disas_ctx, uint8_t *buffer) // NOTE: annoyingly, this check is already done in cpu_main_loop. If that raises a debug exception, we won't even reach here, // and if it doesn't this check is useless. Perhaps find a way to avoid redoing the check here. Note that this can be skipped only the first // time this is called by decode_instr! - cpu_check_data_watchpoints(cpu, disas_ctx->virt_pc, 1, DR7_TYPE_INSTR, disas_ctx->virt_pc - cpu->cpu_ctx.regs.cs_hidden.base); + cpu_check_data_watchpoints(cpu, disas_ctx->virt_pc, 1, DR7_TYPE_INSTR); if ((disas_ctx->virt_pc & ~PAGE_MASK) != ((disas_ctx->virt_pc + X86_MAX_INSTR_LENGTH - 1) & ~PAGE_MASK)) { size_t bytes_to_read, bytes_in_first_page; @@ -508,7 +506,7 @@ ram_fetch(cpu_t *cpu, disas_ctx_t *disas_ctx, uint8_t *buffer) return; } - addr_t addr = get_code_addr(cpu, disas_ctx->virt_pc + bytes_in_first_page, disas_ctx->virt_pc - cpu->cpu_ctx.regs.cs_hidden.base, disas_ctx); + addr_t addr = get_code_addr(cpu, disas_ctx->virt_pc + bytes_in_first_page, disas_ctx); if (disas_ctx->exp_data.idx == EXP_PF) { // a page fault will be raised when fetching from the second page disas_ctx->instr_buff_size = bytes_in_first_page; @@ -527,7 +525,7 @@ ram_fetch(cpu_t *cpu, disas_ctx_t *disas_ctx, uint8_t *buffer) // memory read helper invoked by the jitted code template -T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv) +T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv) { uint32_t page_idx1 = addr & ~PAGE_MASK; uint32_t page_idx2 = (addr + sizeof(T) - 1) & ~PAGE_MASK; @@ -541,7 +539,7 @@ T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv // reads that cross pages always result in tlb misses for (unsigned i = 0; i < DTLB_NUM_LINES; ++i) { if ((((cpu_ctx->cpu->dtlb[idx][i].entry & mem_access) | page_idx1) ^ tag) == 0) { - cpu_check_data_watchpoints(cpu_ctx->cpu, addr, sizeof(T), DR7_TYPE_DATA_RW, eip); + cpu_check_data_watchpoints(cpu_ctx->cpu, addr, sizeof(T), DR7_TYPE_DATA_RW); tlb_t *tlb = &cpu_ctx->cpu->dtlb[idx][i]; addr_t phys_addr = (tlb->entry & ~PAGE_MASK) | (addr & PAGE_MASK); @@ -589,12 +587,12 @@ T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv } // tlb miss - return mem_read_slow(cpu_ctx->cpu, addr, eip, is_priv); + return mem_read_slow(cpu_ctx->cpu, addr, is_priv); } // memory write helper invoked by the jitted code template -void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint32_t eip, uint8_t is_priv) +void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint8_t is_priv) { // if dont_write is true, then no write will happen and we only check if the access would fault. This is used by the ENTER instruction to check // if a stack push with the final value of (e)sp will cause a page fault @@ -616,13 +614,13 @@ void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint32_t eip, uint return; } - cpu_check_data_watchpoints(cpu_ctx->cpu, addr, sizeof(T), DR7_TYPE_DATA_W, eip); + cpu_check_data_watchpoints(cpu_ctx->cpu, addr, sizeof(T), DR7_TYPE_DATA_W); tlb_t *tlb = &cpu_ctx->cpu->dtlb[idx][i]; addr_t phys_addr = (tlb->entry & ~PAGE_MASK) | (addr & PAGE_MASK); if (cpu_ctx->cpu->smc[phys_addr >> PAGE_SHIFT]) { - tc_invalidate(cpu_ctx, phys_addr, sizeof(T), eip); + tc_invalidate(cpu_ctx, phys_addr, sizeof(T)); } // tlb hit, check the region type @@ -674,63 +672,63 @@ void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint32_t eip, uint if constexpr (dont_write) { // If the tlb misses, then the access might still be valid if the mmu can translate the address if ((sizeof(T) != 1) && ((addr & ~PAGE_MASK) != ((addr + sizeof(T) - 1) & ~PAGE_MASK))) { - volatile addr_t phys_addr_s = mmu_translate_addr(cpu_ctx->cpu, addr, MMU_IS_WRITE | is_priv, eip); - volatile addr_t phys_addr_e = mmu_translate_addr(cpu_ctx->cpu, addr + sizeof(T) - 1, MMU_IS_WRITE | is_priv, eip); + volatile addr_t phys_addr_s = mmu_translate_addr(cpu_ctx->cpu, addr, MMU_IS_WRITE | is_priv); + volatile addr_t phys_addr_e = mmu_translate_addr(cpu_ctx->cpu, addr + sizeof(T) - 1, MMU_IS_WRITE | is_priv); } else { - volatile addr_t phys_addr = mmu_translate_addr(cpu_ctx->cpu, addr, MMU_IS_WRITE | is_priv, eip); + volatile addr_t phys_addr = mmu_translate_addr(cpu_ctx->cpu, addr, MMU_IS_WRITE | is_priv); } } else { // tlb miss - mem_write_slow(cpu_ctx->cpu, addr, val, eip, is_priv); + mem_write_slow(cpu_ctx->cpu, addr, val, is_priv); } } // io read helper invoked by the jitted code template -T io_read_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t eip) +T io_read_helper(cpu_ctx_t *cpu_ctx, port_t port) { - cpu_check_io_watchpoints(cpu_ctx->cpu, port, sizeof(T), DR7_TYPE_IO_RW, eip); + cpu_check_io_watchpoints(cpu_ctx->cpu, port, sizeof(T), DR7_TYPE_IO_RW); return io_read(cpu_ctx->cpu, port); } // io write helper invoked by the jitted code template -void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, T val, uint32_t eip) +void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, T val) { - cpu_check_io_watchpoints(cpu_ctx->cpu, port, sizeof(T), DR7_TYPE_IO_RW, eip); + cpu_check_io_watchpoints(cpu_ctx->cpu, port, sizeof(T), DR7_TYPE_IO_RW); io_write(cpu_ctx->cpu, port, val); } -template JIT_API uint8_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API uint16_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API uint32_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API uint64_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API uint80_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API uint128_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint16_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint64_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint80_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint128_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint16_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint64_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint80_t val, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint128_t val, uint32_t eip, uint8_t is_priv); - -template JIT_API uint8_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t eip); -template JIT_API uint16_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t eip); -template JIT_API uint32_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t eip); -template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint8_t val, uint32_t eip); -template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint16_t val, uint32_t eip); -template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t val, uint32_t eip); - -template addr_t get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_ctx); -template addr_t get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_ctx); +template JIT_API uint8_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API uint16_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API uint32_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API uint64_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API uint80_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API uint128_t mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint16_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint64_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint80_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint128_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint16_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint64_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint80_t val, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint128_t val, uint8_t is_priv); + +template JIT_API uint8_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port); +template JIT_API uint16_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port); +template JIT_API uint32_t io_read_helper(cpu_ctx_t *cpu_ctx, port_t port); +template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint8_t val); +template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint16_t val); +template JIT_API void io_write_helper(cpu_ctx_t *cpu_ctx, port_t port, uint32_t val); + +template addr_t get_code_addr(cpu_t *cpu, addr_t addr, disas_ctx_t *disas_ctx); +template addr_t get_code_addr(cpu_t *cpu, addr_t addr, disas_ctx_t *disas_ctx); template void tlb_flush(cpu_t *cpu); template void tlb_flush(cpu_t *cpu); diff --git a/lib86cpu/core/memory_management.h b/lib86cpu/core/memory_management.h index b90a498..2e0ed17 100644 --- a/lib86cpu/core/memory_management.h +++ b/lib86cpu/core/memory_management.h @@ -20,20 +20,20 @@ while (region->aliased_region) { \ template void tlb_flush(cpu_t * cpu); inline void *get_rom_host_ptr(const memory_region_t *rom, addr_t addr); inline void *get_ram_host_ptr(cpu_t *cpu, const memory_region_t *ram, addr_t addr); -addr_t get_read_addr_slow(cpu_t * cpu, addr_t addr, uint8_t is_priv, uint32_t eip); -addr_t get_write_addr_slow(cpu_t * cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool* is_code); -addr_t get_read_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip); -addr_t get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, uint32_t eip, bool *is_code); -addr_t get_code_addr(cpu_t *cpu, addr_t addr, uint32_t eip); -template addr_t get_code_addr(cpu_t * cpu, addr_t addr, uint32_t eip, disas_ctx_t *disas_ctx); +addr_t get_read_addr_slow(cpu_t * cpu, addr_t addr, uint8_t is_priv); +addr_t get_write_addr_slow(cpu_t * cpu, addr_t addr, uint8_t is_priv, bool* is_code); +addr_t get_read_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv); +addr_t get_write_addr(cpu_t *cpu, addr_t addr, uint8_t is_priv, bool *is_code); +addr_t get_code_addr(cpu_t *cpu, addr_t addr); +template addr_t get_code_addr(cpu_t * cpu, addr_t addr, disas_ctx_t *disas_ctx); template T ram_read(cpu_t *cpu, void *ram_ptr); template void ram_write(cpu_t *cpu, void *ram_ptr, T value); void ram_fetch(cpu_t *cpu, disas_ctx_t *disas_ctx, uint8_t *buffer); uint64_t as_ram_dispatch_read(cpu_t *cpu, addr_t addr, uint64_t size, const memory_region_t *region, uint8_t *buffer); -template JIT_API T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint32_t eip, uint8_t is_priv); -template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint32_t eip, uint8_t is_priv); -template JIT_API T io_read_helper(cpu_ctx_t * cpu_ctx, port_t port, uint32_t eip); -template JIT_API void io_write_helper(cpu_ctx_t * cpu_ctx, port_t port, T val, uint32_t eip); +template JIT_API T mem_read_helper(cpu_ctx_t *cpu_ctx, addr_t addr, uint8_t is_priv); +template JIT_API void mem_write_helper(cpu_ctx_t *cpu_ctx, addr_t addr, T val, uint8_t is_priv); +template JIT_API T io_read_helper(cpu_ctx_t * cpu_ctx, port_t port); +template JIT_API void io_write_helper(cpu_ctx_t * cpu_ctx, port_t port, T val); inline constexpr uint64_t tlb_access[2][4] = { { TLB_SUP_READ, TLB_SUP_READ, TLB_SUP_READ, TLB_USER_READ }, @@ -248,14 +248,14 @@ void ram_write(cpu_t *cpu, void *ram_ptr, T value) * memory accessors */ template -T mem_read_slow(cpu_t *cpu, addr_t addr, uint32_t eip, uint8_t is_priv) +T mem_read_slow(cpu_t *cpu, addr_t addr, uint8_t is_priv) { if ((sizeof(T) != 1) && ((addr & ~PAGE_MASK) != ((addr + sizeof(T) - 1) & ~PAGE_MASK))) { T value = 0; uint8_t i = 0; - addr_t phys_addr_s = get_read_addr_slow(cpu, addr, is_priv, eip); - addr_t phys_addr_e = get_read_addr_slow(cpu, addr + sizeof(T) - 1, is_priv, eip); - cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_RW, eip); + addr_t phys_addr_s = get_read_addr_slow(cpu, addr, is_priv); + addr_t phys_addr_e = get_read_addr_slow(cpu, addr + sizeof(T) - 1, is_priv); + cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_RW); addr_t phys_addr = phys_addr_s; uint8_t bytes_in_page = ((addr + sizeof(T) - 1) & ~PAGE_MASK) - addr; while (i < sizeof(T)) { @@ -270,28 +270,28 @@ T mem_read_slow(cpu_t *cpu, addr_t addr, uint32_t eip, uint8_t is_priv) return value; } else { - addr_t phys_addr = get_read_addr_slow(cpu, addr, is_priv, eip); - cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_RW, eip); + addr_t phys_addr = get_read_addr_slow(cpu, addr, is_priv); + cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_RW); return as_memory_dispatch_read(cpu, phys_addr, as_memory_search_addr(cpu, phys_addr)); } } template -void mem_write_slow(cpu_t *cpu, addr_t addr, T value, uint32_t eip, uint8_t is_priv) +void mem_write_slow(cpu_t *cpu, addr_t addr, T value, uint8_t is_priv) { if ((sizeof(T) != 1) && ((addr & ~PAGE_MASK) != ((addr + sizeof(T) - 1) & ~PAGE_MASK))) { bool is_code1, is_code2; uint8_t i = 0; - addr_t phys_addr_s = get_write_addr_slow(cpu, addr, is_priv, eip, &is_code1); - addr_t phys_addr_e = get_write_addr_slow(cpu, addr + sizeof(T) - 1, is_priv, eip, &is_code2); - cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_W, eip); + addr_t phys_addr_s = get_write_addr_slow(cpu, addr, is_priv, &is_code1); + addr_t phys_addr_e = get_write_addr_slow(cpu, addr + sizeof(T) - 1, is_priv, &is_code2); + cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_W); addr_t phys_addr = phys_addr_s; uint8_t bytes_in_page = ((addr + sizeof(T) - 1) & ~PAGE_MASK) - addr; if (is_code1) { - tc_invalidate(&cpu->cpu_ctx, phys_addr_s, bytes_in_page, eip); + tc_invalidate(&cpu->cpu_ctx, phys_addr_s, bytes_in_page); } if (is_code2) { - tc_invalidate(&cpu->cpu_ctx, phys_addr_e, sizeof(T) - bytes_in_page, eip); + tc_invalidate(&cpu->cpu_ctx, phys_addr_e, sizeof(T) - bytes_in_page); } while (i < sizeof(T)) { const memory_region_t *region = as_memory_search_addr(cpu, phys_addr); @@ -305,10 +305,10 @@ void mem_write_slow(cpu_t *cpu, addr_t addr, T value, uint32_t eip, uint8_t is_p } else { bool is_code; - addr_t phys_addr = get_write_addr_slow(cpu, addr, is_priv, eip, &is_code); - cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_W, eip); + addr_t phys_addr = get_write_addr_slow(cpu, addr, is_priv, &is_code); + cpu_check_data_watchpoints(cpu, addr, sizeof(T), DR7_TYPE_DATA_W); if (is_code) { - tc_invalidate(&cpu->cpu_ctx, phys_addr, sizeof(T), eip); + tc_invalidate(&cpu->cpu_ctx, phys_addr, sizeof(T)); } as_memory_dispatch_write(cpu, phys_addr, value, as_memory_search_addr(cpu, phys_addr)); } diff --git a/lib86cpu/core/translate.cpp b/lib86cpu/core/translate.cpp index 2f2af98..f553026 100644 --- a/lib86cpu/core/translate.cpp +++ b/lib86cpu/core/translate.cpp @@ -72,7 +72,6 @@ check_dbl_exp(cpu_ctx_t *cpu_ctx) if ((old_contributory && curr_contributory) || (cpu_ctx->exp_info.old_exp == EXP_PF && (curr_contributory || (idx == EXP_PF)))) { cpu_ctx->exp_info.exp_data.code = 0; - cpu_ctx->exp_info.exp_data.eip = 0; idx = EXP_DF; } @@ -96,7 +95,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) uint32_t fault_addr = cpu_ctx->exp_info.exp_data.fault_addr; uint16_t code = cpu_ctx->exp_info.exp_data.code; uint16_t idx = cpu_ctx->exp_info.exp_data.idx; - uint32_t eip = cpu_ctx->exp_info.exp_data.eip; + uint32_t eip = cpu_ctx->regs.eip; uint32_t old_eflags = read_eflags(cpu); if (cpu_ctx->hflags & HFLG_PE_MODE) { @@ -113,8 +112,8 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) } if ((is_intn == 2) && (((cpu_ctx->regs.eflags & VM_MASK) | (cpu_ctx->hflags & HFLG_CR4_VME)) == (VM_MASK | HFLG_CR4_VME))) { - uint16_t offset = mem_read_helper(cpu_ctx, cpu_ctx->regs.tr_hidden.base + 102, eip, 0); - uint8_t io_int_table_byte = mem_read_helper(cpu_ctx, cpu_ctx->regs.tr_hidden.base + offset - 32 + idx / 8, eip, 0); + uint16_t offset = mem_read_helper(cpu_ctx, cpu_ctx->regs.tr_hidden.base + 102, 0); + uint8_t io_int_table_byte = mem_read_helper(cpu_ctx, cpu_ctx->regs.tr_hidden.base + offset - 32 + idx / 8, 0); if ((io_int_table_byte & (1 << (idx % 8))) == 0) { if (iopl < 3) { old_eflags = ((old_eflags & VIF_MASK) >> 10) | (old_eflags & ~(IF_MASK | IOPL_MASK)) | IOPL_MASK; @@ -123,12 +122,12 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) uint32_t stack_mask = cpu_ctx->hflags & HFLG_SS32 ? 0xFFFFFFFF : 0xFFFF; uint32_t stack_base = cpu_ctx->regs.ss_hidden.base; esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, 0); esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 0); esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, eip, 0); - uint32_t vec_entry = mem_read_helper(cpu_ctx, idx * 4, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, 0); + uint32_t vec_entry = mem_read_helper(cpu_ctx, idx * 4, 0); uint32_t eflags_mask = TF_MASK; cpu_ctx->regs.esp = (cpu_ctx->regs.esp & ~stack_mask) | (esp & stack_mask); cpu_ctx->regs.cs = vec_entry >> 16; @@ -138,8 +137,6 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) eflags_mask |= (IF_MASK | VIF_MASK); } cpu_ctx->regs.eflags &= ~eflags_mask; - cpu_ctx->hflags &= ~HFLG_INHIBIT_INT; - cpu_ctx->cpu->cpu_flags &= ~CPU_INHIBIT_DBG_TRAP; cpu_ctx->exp_info.old_exp = EXP_INVALID; if (idx == EXP_PF) { cpu_ctx->regs.cr2 = fault_addr; @@ -165,7 +162,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) return cpu_raise_exception(cpu_ctx); } - uint64_t desc = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + idx * 8, eip, 2); + uint64_t desc = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + idx * 8, 2); uint16_t type = (desc >> 40) & 0x1F; uint32_t new_eip, eflags; switch (type) @@ -215,7 +212,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) addr_t code_desc_addr; uint64_t code_desc; - if (read_seg_desc_helper(cpu, sel, code_desc_addr, code_desc, eip)) { + if (read_seg_desc_helper(cpu, sel, code_desc_addr, code_desc)) { cpu_ctx->exp_info.exp_data.code += ext_flg; return cpu_raise_exception(cpu_ctx); } @@ -237,7 +234,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) dpl = cpl; } - set_access_flg_seg_desc_helper(cpu, code_desc, code_desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, code_desc, code_desc_addr); const auto &exp_has_code = [idx]() -> uint8_t { @@ -277,7 +274,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) { addr_t ss_desc_addr; - if (read_stack_ptr_from_tss_helper(cpu, dpl, new_esp, new_ss, eip, is_vm86 ? 2 : 0)) { + if (read_stack_ptr_from_tss_helper(cpu, dpl, new_esp, new_ss, is_vm86 ? 2 : 0)) { cpu_ctx->exp_info.exp_data.code += ext_flg; return true; } @@ -288,7 +285,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) return true; } - if (read_seg_desc_helper(cpu, new_ss, ss_desc_addr, ss_desc, eip)) { + if (read_seg_desc_helper(cpu, new_ss, ss_desc_addr, ss_desc)) { cpu_ctx->exp_info.exp_data.code += ext_flg; cpu_ctx->exp_info.exp_data.idx = EXP_TS; return true; @@ -307,7 +304,7 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) return true; } - set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr, eip); + set_access_flg_seg_desc_helper(cpu, ss_desc, ss_desc_addr); return false; }; @@ -333,26 +330,26 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) constexpr uint32_t push_size = sizeof(T); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.gs, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.gs, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.fs, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.fs, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ds, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ds, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.es, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.es, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 2); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, 2); if (exp_has_code()) { esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), code, eip, 2); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), code, 2); } }; @@ -381,8 +378,6 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) cpu_ctx->regs.eflags = (eflags & ~(VM_MASK | RF_MASK | NT_MASK | TF_MASK)); cpu_ctx->regs.esp = (cpu_ctx->regs.esp & ~stack_mask) | (esp & stack_mask); cpu_ctx->regs.eip = new_eip; - cpu_ctx->hflags &= ~HFLG_INHIBIT_INT; - cpu_ctx->cpu->cpu_flags &= ~CPU_INHIBIT_DBG_TRAP; cpu_ctx->exp_info.old_exp = EXP_INVALID; if (idx == EXP_PF) { cpu_ctx->regs.cr2 = fault_addr; @@ -434,19 +429,19 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) if constexpr (stack_switch) { esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.ss, is_priv); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.esp, is_priv); } esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, is_priv); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, is_priv); esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, is_priv); if (has_code) { esp -= push_size; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), code, eip, is_priv); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), code, is_priv); } }; @@ -496,16 +491,16 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) return cpu_raise_exception(cpu_ctx); } - uint32_t vec_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + idx * 4, eip, 0); + uint32_t vec_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + idx * 4, 0); uint32_t stack_mask = 0xFFFF; uint32_t stack_base = cpu_ctx->regs.ss_hidden.base; uint32_t esp = cpu_ctx->regs.esp; esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), old_eflags, 0); esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), cpu_ctx->regs.cs, 0); esp -= 2; - mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, eip, 0); + mem_write_helper(cpu_ctx, stack_base + (esp & stack_mask), eip, 0); cpu_ctx->regs.eflags &= ~(AC_MASK | RF_MASK | IF_MASK | TF_MASK); cpu_ctx->regs.esp = (cpu_ctx->regs.esp & ~stack_mask) | (esp & stack_mask); @@ -514,8 +509,6 @@ translated_code_t *cpu_raise_exception(cpu_ctx_t *cpu_ctx) cpu_ctx->regs.eip = vec_entry & 0xFFFF; } - cpu_ctx->hflags &= ~HFLG_INHIBIT_INT; - cpu_ctx->cpu->cpu_flags &= ~CPU_INHIBIT_DBG_TRAP; cpu_ctx->exp_info.old_exp = EXP_INVALID; if (idx == EXP_DB) { cpu_ctx->regs.dr[7] &= ~DR7_GD_MASK; @@ -551,16 +544,10 @@ tc_hash(addr_t pc) } template -void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size, [[maybe_unused]] uint32_t eip) +void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size) { bool halt_tc = false; - if constexpr (!remove_hook) { - if (cpu_ctx->cpu->cpu_flags & CPU_ALLOW_CODE_WRITE) { - return; - } - } - // find all tc's in the page phys_addr belongs to auto it_map = cpu_ctx->cpu->tc_page_map.find(phys_addr >> PAGE_SHIFT); if (it_map != cpu_ctx->cpu->tc_page_map.end()) { @@ -635,13 +622,10 @@ void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_ if (it->get() == tc_in_page) { try { if (it->get()->cs_base == cpu_ctx->regs.cs_hidden.base && - it->get()->pc == get_code_addr(cpu_ctx->cpu, get_pc(cpu_ctx), cpu_ctx->regs.eip) && + it->get()->pc == get_code_addr(cpu_ctx->cpu, get_pc(cpu_ctx)) && it->get()->guest_flags == flags) { // worst case: the write overlaps with the tc we are currently executing halt_tc = true; - if constexpr (!remove_hook) { - cpu_ctx->cpu->cpu_flags |= (CPU_DISAS_ONE | CPU_ALLOW_CODE_WRITE); - } } } catch (host_exp_t type) { @@ -676,16 +660,12 @@ void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_ } if (halt_tc) { - // in this case the tc we were executing must be interrupted and to do that, we must return to the translator with an exception - if constexpr (!remove_hook) { - cpu_ctx->regs.eip = eip; - } - throw host_exp_t::halt_tc; + cpu_ctx->cpu->raise_int_fn(cpu_ctx, CPU_HALT_TC_INT); } } -template void tc_invalidate(cpu_ctx_t * cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size, [[maybe_unused]] uint32_t eip); -template void tc_invalidate(cpu_ctx_t * cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size, [[maybe_unused]] uint32_t eip); +template void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size); +template void tc_invalidate(cpu_ctx_t *cpu_ctx, addr_t phys_addr, [[maybe_unused]] uint8_t size); static translated_code_t * tc_cache_search(cpu_t *cpu, addr_t pc) @@ -904,8 +884,7 @@ cpu_translate(cpu_t *cpu) catch (host_exp_t type) { // this happens on instr breakpoints (not int3) assert(type == host_exp_t::db_exp); - cpu->jit->gen_raise_exp_inline(0, 0, EXP_DB, cpu->instr_eip); - disas_ctx->flags |= DISAS_FLG_DBG_FAULT; + cpu->jit->gen_raise_exp_inline(0, 0, EXP_DB); return; } @@ -935,16 +914,16 @@ cpu_translate(cpu_t *cpu) case ZYDIS_STATUS_DECODING_ERROR: // illegal and/or undefined instruction, or lock prefix used on an instruction which does not accept it or used as source operand, // or the instruction encodes a register that cannot be used (e.g. mov cs, edx) - cpu->jit->gen_raise_exp_inline(0, 0, EXP_UD, cpu->instr_eip); + cpu->jit->gen_raise_exp_inline(0, 0, EXP_UD); return; case ZYDIS_STATUS_NO_MORE_DATA: // buffer < 15 bytes - cpu->cpu_flags &= ~(CPU_DISAS_ONE | CPU_ALLOW_CODE_WRITE); + cpu->cpu_flags &= ~CPU_DISAS_ONE; if (disas_ctx->exp_data.idx == EXP_PF) { // buffer size reduced because of page fault on second page disas_ctx->flags |= DISAS_FLG_FETCH_FAULT; - cpu->jit->gen_raise_exp_inline(disas_ctx->exp_data.fault_addr, disas_ctx->exp_data.code, disas_ctx->exp_data.idx, disas_ctx->exp_data.eip); + cpu->jit->gen_raise_exp_inline(disas_ctx->exp_data.fault_addr, disas_ctx->exp_data.code, disas_ctx->exp_data.idx); return; } else { @@ -954,14 +933,14 @@ cpu_translate(cpu_t *cpu) case ZYDIS_STATUS_INSTRUCTION_TOO_LONG: { // instruction length > 15 bytes - cpu->cpu_flags &= ~(CPU_DISAS_ONE | CPU_ALLOW_CODE_WRITE); - volatile addr_t addr = get_code_addr(cpu, disas_ctx->virt_pc + X86_MAX_INSTR_LENGTH, disas_ctx->virt_pc - cpu->cpu_ctx.regs.cs_hidden.base, disas_ctx); + cpu->cpu_flags &= ~CPU_DISAS_ONE; + volatile addr_t addr = get_code_addr(cpu, disas_ctx->virt_pc + X86_MAX_INSTR_LENGTH, disas_ctx); if (disas_ctx->exp_data.idx == EXP_PF) { disas_ctx->flags |= DISAS_FLG_FETCH_FAULT; - cpu->jit->gen_raise_exp_inline(disas_ctx->exp_data.fault_addr, disas_ctx->exp_data.code, disas_ctx->exp_data.idx, disas_ctx->exp_data.eip); + cpu->jit->gen_raise_exp_inline(disas_ctx->exp_data.fault_addr, disas_ctx->exp_data.code, disas_ctx->exp_data.idx); } else { - cpu->jit->gen_raise_exp_inline(0, 0, EXP_GP, disas_ctx->virt_pc - cpu->cpu_ctx.regs.cs_hidden.base); + cpu->jit->gen_raise_exp_inline(0, 0, EXP_GP); } return; } @@ -1627,6 +1606,11 @@ cpu_translate(cpu_t *cpu) cpu->virt_pc += cpu->instr_bytes; cpu->tc->size += cpu->instr_bytes; + // Only generate an interrupt check if the current instruction didn't terminate this tc. Terminating instructions already check for interrupts + if (cpu->translate_next == 1) { + cpu->jit->gen_interrupt_check(); + } + } while ((cpu->translate_next | (disas_ctx->flags & (DISAS_FLG_PAGE_CROSS | DISAS_FLG_ONE_INSTR | DISAS_FLG_PAGE_CROSS_NEXT))) == 1); } @@ -1639,25 +1623,24 @@ cpu_do_int(cpu_ctx_t *cpu_ctx, uint32_t int_flg) throw lc86_exp_abort("Received abort signal, terminating the emulation", lc86_status::success); } - if (int_flg & CPU_SUSPEND_INT) { - cpu_ctx->cpu->clear_int_fn(cpu_ctx, CPU_SUSPEND_INT); - cpu_ctx->cpu->is_suspended.test_and_set(); - if (cpu_ctx->cpu->suspend_should_throw.load() && cpu_ctx->cpu->suspend_flg.test()) { - throw lc86_exp_abort("Received pause signal, suspending the emulation", lc86_status::paused); - } - else { - cpu_ctx->cpu->suspend_flg.wait(true); - } - cpu_ctx->cpu->is_suspended.clear(); - if (cpu_ctx->cpu->state_loaded) { - cpu_ctx->cpu->state_loaded = false; - return CPU_NON_HW_INT; + if (int_flg & CPU_NON_HW_INT) { + cpu_t *cpu = cpu_ctx->cpu; + uint32_t int_clear_flg = CPU_MASKED_INT | CPU_HALT_TC_INT; + if (int_flg & CPU_DBG_TRAP_INT) { + int_clear_flg |= CPU_DBG_TRAP_INT; + if (cpu_ctx->exp_info.exp_data.idx != EXP_DB) { + // This happens when another exception is generated by the instruction after a debug trap exception was detected by a memory handler. Since the info of the trap + // was overwritten by the new exception, we forget the trap here + // FIXME: this is wrong, the Intel docs document the priority among different exceptions/interrupts when they happen simultaneously in the same + // instruction, and debug traps have higher priority than almost all the others. However, since this feature was never implemented before, it's not + // a regression for now + LOG(log_level::warn, "Forgetting debug trap exception"); + } + else { + cpu_raise_exception(cpu_ctx); + } } - } - if (int_flg & (CPU_A20_INT | CPU_REGION_INT | CPU_HANDLER_INT)) { - cpu_t *cpu = cpu_ctx->cpu; - uint32_t int_clear_flg = 0; if (int_flg & CPU_HANDLER_INT) { int_clear_flg |= CPU_HANDLER_INT; std::for_each(cpu->regions_updated.begin(), cpu->regions_updated.end(), [cpu](const auto &data) { @@ -1714,15 +1697,31 @@ cpu_do_int(cpu_ctx_t *cpu_ctx, uint32_t int_flg) tlb_flush(cpu); cpu->regions_changed.clear(); } + + if (int_flg & CPU_SUSPEND_INT) { + int_clear_flg |= CPU_SUSPEND_INT; + cpu_ctx->cpu->is_suspended.test_and_set(); + if (cpu_ctx->cpu->suspend_should_throw.load() && cpu_ctx->cpu->suspend_flg.test()) { + cpu_ctx->cpu->clear_int_fn(cpu_ctx, int_clear_flg); + throw lc86_exp_abort("Received pause signal, suspending the emulation", lc86_status::paused); + } + else { + cpu_ctx->cpu->suspend_flg.wait(true); + } + cpu_ctx->cpu->is_suspended.clear(); + if (cpu_ctx->cpu->state_loaded) { + cpu_ctx->cpu->state_loaded = false; + } + } + cpu_ctx->cpu->clear_int_fn(cpu_ctx, int_clear_flg); return CPU_NON_HW_INT; } - if (((int_flg & CPU_HW_INT) | (cpu_ctx->regs.eflags & IF_MASK) | (cpu_ctx->hflags & HFLG_INHIBIT_INT)) == (IF_MASK | CPU_HW_INT)) { + if (((int_flg & CPU_HW_INT) | (cpu_ctx->regs.eflags & IF_MASK)) == (IF_MASK | CPU_HW_INT)) { cpu_ctx->exp_info.exp_data.fault_addr = 0; cpu_ctx->exp_info.exp_data.code = 0; cpu_ctx->exp_info.exp_data.idx = cpu_ctx->cpu->int_data.first(cpu_ctx->cpu->int_data.second); - cpu_ctx->exp_info.exp_data.eip = cpu_ctx->regs.eip; cpu_raise_exception(cpu_ctx); return CPU_HW_INT; } @@ -1743,7 +1742,7 @@ void cpu_suppress_trampolines(cpu_t *cpu) } } -template +template void cpu_main_loop(cpu_t *cpu, T &&lambda) { translated_code_t *prev_tc = nullptr, *ptr_tc = nullptr; @@ -1755,8 +1754,8 @@ void cpu_main_loop(cpu_t *cpu, T &&lambda) retry: try { virt_pc = get_pc(&cpu->cpu_ctx); - cpu_check_data_watchpoints(cpu, virt_pc, 1, DR7_TYPE_INSTR, cpu->cpu_ctx.regs.eip); - pc = get_code_addr(cpu, virt_pc, cpu->cpu_ctx.regs.eip); + cpu_check_data_watchpoints(cpu, virt_pc, 1, DR7_TYPE_INSTR); + pc = get_code_addr(cpu, virt_pc); } catch (host_exp_t type) { assert((type == host_exp_t::pf_exp) || (type == host_exp_t::db_exp)); @@ -1778,11 +1777,7 @@ void cpu_main_loop(cpu_t *cpu, T &&lambda) goto retry; } - if constexpr (!is_trap) { - // if we are executing a trapped instr, we must always emit a new tc to run it and not consider other tc's in the cache. Doing so avoids having to invalidate - // the tc in the cache that contains the trapped instr - ptr_tc = tc_cache_search(cpu, pc); - } + ptr_tc = tc_cache_search(cpu, pc); if (ptr_tc == nullptr) { @@ -1796,37 +1791,28 @@ void cpu_main_loop(cpu_t *cpu, T &&lambda) cpu->disas_ctx.flags = ((cpu->cpu_ctx.hflags & HFLG_CS32) >> CS32_SHIFT) | ((cpu->cpu_ctx.hflags & HFLG_SS32) >> (SS32_SHIFT - 1)) | (cpu->cpu_ctx.hflags & HFLG_PE_MODE) | - ((cpu->cpu_ctx.hflags & HFLG_INHIBIT_INT) >> 11) | (cpu->cpu_flags & CPU_DISAS_ONE) | ((cpu->cpu_flags & CPU_SINGLE_STEP) >> 3) | ((cpu->cpu_ctx.regs.eflags & RF_MASK) >> 9) | // if rf is set, we need to clear it after the first instr executed - ((cpu->cpu_ctx.regs.eflags & TF_MASK) >> 1) | // if tf is set, we need to raise a DB exp after every instruction - ((cpu->cpu_ctx.hflags & HFLG_INHIBIT_INT) >> 7); // if interrupts are inhibited, we need to enable them after the first instr executed + ((cpu->cpu_ctx.regs.eflags & TF_MASK) >> 1); // if tf is set, we need to raise a DB exp after every instruction cpu->disas_ctx.virt_pc = virt_pc; cpu->disas_ctx.pc = pc; - if constexpr (is_trap) { - // don't take hooks if we are executing a trapped instr. Otherwise, if the trapped instr is also hooked, we will take the hook instead of executing it - cpu_translate(cpu); + const auto it = cpu->hook_map.find(cpu->disas_ctx.virt_pc); + bool take_hook; + if constexpr (is_tramp) { + take_hook = (it != cpu->hook_map.end()) && !(cpu->cpu_ctx.hflags & HFLG_TRAMP); } else { - const auto it = cpu->hook_map.find(cpu->disas_ctx.virt_pc); - bool take_hook; - if constexpr (is_tramp) { - take_hook = (it != cpu->hook_map.end()) && !(cpu->cpu_ctx.hflags & HFLG_TRAMP); - } - else { - take_hook = it != cpu->hook_map.end(); - } + take_hook = it != cpu->hook_map.end(); + } - if (take_hook) { - cpu->instr_eip = cpu->disas_ctx.virt_pc - cpu->cpu_ctx.regs.cs_hidden.base; - cpu->jit->gen_hook(it->second); - } - else { - // start guest code translation - cpu_translate(cpu); - } + if (take_hook) { + cpu->jit->gen_hook(it->second); + } + else { + // start guest code translation + cpu_translate(cpu); } cpu->jit->gen_tc_epilogue(); @@ -1855,7 +1841,7 @@ void cpu_main_loop(cpu_t *cpu, T &&lambda) uint32_t cpu_flags = cpu->cpu_flags; cpu_suppress_trampolines(cpu); - cpu->cpu_flags &= ~(CPU_DISAS_ONE | CPU_ALLOW_CODE_WRITE | CPU_FORCE_INSERT); + cpu->cpu_flags &= ~(CPU_DISAS_ONE | CPU_FORCE_INSERT); prev_tc = tc_run_code(&cpu->cpu_ctx, ptr_tc); if (!(cpu_flags & CPU_FORCE_INSERT)) { cpu->jit->free_code_block(reinterpret_cast(ptr_tc->jmp_offset[2])); @@ -1900,30 +1886,18 @@ tc_run_code(cpu_ctx_t *cpu_ctx, translated_code_t *tc) return cpu_raise_exception(cpu_ctx); } catch (host_exp_t type) { - assert((type == host_exp_t::pf_exp) || (type == host_exp_t::db_exp)); + assert(type == host_exp_t::pf_exp); - // page fault or debug exception while delivering another exception + // page fault exception while delivering another exception goto retry_exp; } } break; - case host_exp_t::db_exp: { - // debug exception trap (mem/io r/w watch) while executing the translated code. - // We set CPU_INHIBIT_DBG_TRAP, so that we can execute the trapped instruction without triggering again a de exp, - // and then jump to the debug handler. Note that eip points to the trapped instr, so we can execute it. - assert(cpu_ctx->exp_info.exp_data.idx == EXP_DB); - - cpu_ctx->cpu->cpu_flags |= (CPU_DISAS_ONE | CPU_INHIBIT_DBG_TRAP); - cpu_ctx->regs.eip = cpu_ctx->exp_info.exp_data.eip; - // run the main loop only once, since we only execute the trapped instr - int i = 0; - cpu_main_loop(cpu_ctx->cpu, [&i]() { return i++ == 0; }); - return nullptr; - } - - case host_exp_t::halt_tc: - return nullptr; + case host_exp_t::db_exp: + // because debug trap exceptions are handled at runtime with the debug interrupt, this cannot happen, so it must be a bug + LIB86CPU_ABORT_msg("Unexpected debug trap exception while running code"); + break; default: LIB86CPU_ABORT_msg("Unknown host exception in %s", __func__); @@ -1971,20 +1945,20 @@ lc86_status cpu_start(cpu_t *cpu) try { if constexpr (run_forever) { - cpu_main_loop(cpu, []() { return true; }); + cpu_main_loop(cpu, []() { return true; }); } else { cpu_timer_set_now(cpu); - cpu->cpu_ctx.exit_requested = 0; - if (cpu->cpu_ctx.is_halted) { + cpu->exit_requested = false; + if (cpu->is_halted) { // if the cpu was previously halted, then we must keep waiting until the next hw int - halt_loop(cpu); - if (cpu->cpu_ctx.is_halted) { + hlt_helper(&cpu->cpu_ctx); + if (cpu->is_halted) { // if it is still halted, then it must be a timeout return set_last_error(lc86_status::timeout); } } - cpu_main_loop(cpu, [cpu]() { return !cpu->cpu_ctx.exit_requested; }); + cpu_main_loop(cpu, [cpu]() { return !cpu->exit_requested; }); cpu->cpu_thr_id = std::thread::id(); return set_last_error(lc86_status::timeout); } @@ -2009,7 +1983,7 @@ cpu_exec_trampoline(cpu_t *cpu, const uint32_t ret_eip) { // set the trampoline flag, so that we can call the trampoline tc instead of the hook tc cpu->cpu_ctx.hflags |= HFLG_TRAMP; - cpu_main_loop(cpu, [cpu, ret_eip]() { return cpu->cpu_ctx.regs.eip != ret_eip; }); + cpu_main_loop(cpu, [cpu, ret_eip]() { return cpu->cpu_ctx.regs.eip != ret_eip; }); } void @@ -2018,7 +1992,7 @@ dbg_exec_original_instr(cpu_t *cpu) cpu->cpu_flags |= CPU_DISAS_ONE; // run the main loop only once, since we only execute the original instr that was replaced by int3 int i = 0; - cpu_main_loop(cpu, [&i]() { return i++ == 0; }); + cpu_main_loop(cpu, [&i]() { return i++ == 0; }); } template JIT_API translated_code_t *cpu_raise_exception<0, true>(cpu_ctx_t *cpu_ctx); diff --git a/lib86cpu/core/windows/clock.cpp b/lib86cpu/core/windows/clock.cpp index bf9e750..b7033a2 100644 --- a/lib86cpu/core/windows/clock.cpp +++ b/lib86cpu/core/windows/clock.cpp @@ -44,15 +44,11 @@ cpu_timer_set_now(cpu_t *cpu) uint32_t cpu_timer_helper(cpu_ctx_t *cpu_ctx) { - // always check for interrupts first. Otherwise, if the cpu consistently timeouts at every code block, it will never check for interrupts - if (uint32_t ret = cpu_do_int(cpu_ctx, cpu_ctx->cpu->read_int_fn(cpu_ctx))) { - return ret; - } - uint64_t elapsed_us = get_current_time() - cpu_ctx->cpu->timer.last_time; elapsed_us *= 1000000; elapsed_us /= cpu_ctx->cpu->timer.host_freq; if (elapsed_us >= cpu_ctx->cpu->timer.timeout_time) { + cpu_ctx->cpu->exit_requested = true; return CPU_TIMEOUT_INT; } diff --git a/lib86cpu/dbg/debugger.cpp b/lib86cpu/dbg/debugger.cpp index e0679e2..4e59588 100644 --- a/lib86cpu/dbg/debugger.cpp +++ b/lib86cpu/dbg/debugger.cpp @@ -229,7 +229,7 @@ dbg_add_exp_hook(cpu_ctx_t *cpu_ctx) } unsigned exp_idx = EXP_BP; for (int i = 0; i < 2; ++i) { - uint64_t desc = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + exp_idx * 8, cpu_ctx->regs.eip, 2); + uint64_t desc = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + exp_idx * 8, 2); uint16_t type = (desc >> 40) & 0x1F; uint32_t new_eip, new_base; switch (type) @@ -273,7 +273,7 @@ dbg_add_exp_hook(cpu_ctx_t *cpu_ctx) LOG(log_level::warn, "Failed to install hook for the exception handler: GDT or LDT limit exceeded"); return; } - desc = mem_read_helper(cpu_ctx, base + sel_idx * 8, cpu_ctx->regs.eip, 2); + desc = mem_read_helper(cpu_ctx, base + sel_idx * 8, 2); if ((desc & SEG_DESC_P) == 0) { LOG(log_level::warn, "Failed to install hook for the exception handler: GDT or LDT descriptor not present"); return; @@ -293,9 +293,9 @@ dbg_add_exp_hook(cpu_ctx_t *cpu_ctx) LOG(log_level::warn, "Failed to install hook for the exception handler: IDT limit exceeded"); return; } - uint32_t vec_bp_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + EXP_BP * 4, cpu_ctx->regs.eip, 0); + uint32_t vec_bp_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + EXP_BP * 4, 0); cpu_ctx->cpu->bp_addr = ((vec_bp_entry >> 16) << 4) + (vec_bp_entry & 0xFFFF); - uint32_t vec_db_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + EXP_DB * 4, cpu_ctx->regs.eip, 0); + uint32_t vec_db_entry = mem_read_helper(cpu_ctx, cpu_ctx->regs.idtr_hidden.base + EXP_DB * 4, 0); cpu_ctx->cpu->db_addr = ((vec_db_entry >> 16) << 4) + (vec_db_entry & 0xFFFF); } } @@ -330,7 +330,7 @@ dbg_disas_code_block(cpu_t *cpu, disas_ctx_t *disas_ctx, ZydisDecoder *decoder, addr_t next_pc = disas_ctx->virt_pc + bytes; if ((disas_ctx->virt_pc & ~PAGE_MASK) != ((next_pc - 1) & ~PAGE_MASK)) { // page crossing, needs to translate virt_pc again - disas_ctx->pc = get_code_addr(cpu, next_pc, disas_ctx->virt_pc - cpu->cpu_ctx.regs.cs_hidden.base, disas_ctx); + disas_ctx->pc = get_code_addr(cpu, next_pc, disas_ctx); if (disas_ctx->exp_data.idx == EXP_PF) { // page fault in the new page, cannot display remaining instr disas_ctx->virt_pc = next_pc; @@ -359,7 +359,7 @@ dbg_disas_code_block(cpu_t *cpu, addr_t pc, unsigned instr_num) ((cpu->cpu_ctx.hflags & HFLG_SS32) >> (SS32_SHIFT - 1)) | (cpu->cpu_ctx.hflags & HFLG_PE_MODE); disas_ctx.virt_pc = pc; - disas_ctx.pc = get_code_addr(cpu, disas_ctx.virt_pc, cpu->cpu_ctx.regs.eip, &disas_ctx); + disas_ctx.pc = get_code_addr(cpu, disas_ctx.virt_pc, &disas_ctx); if (disas_ctx.exp_data.idx == EXP_PF) { // page fault, cannot display instr return {}; @@ -386,6 +386,7 @@ dbg_ram_read(cpu_t *cpu, uint8_t *buff) uint32_t actual_size; if (!LC86_SUCCESS(mem_read_block_virt(cpu, mem_pc, PAGE_SIZE, buff, &actual_size))) { std::memset(&buff[actual_size], 0, PAGE_SIZE - actual_size); + LOG(log_level::info, "Failed to read at address 0x%08" PRIX32, mem_pc); } } @@ -402,7 +403,7 @@ dbg_ram_write(uint8_t *data, size_t off, uint8_t val) try { bool is_code; - addr_t phys_addr = get_write_addr(g_cpu, addr, 2, addr - g_cpu->cpu_ctx.regs.cs_hidden.base, &is_code); + addr_t phys_addr = get_write_addr(g_cpu, addr, 2, &is_code); const memory_region_t *region = as_memory_search_addr(g_cpu, phys_addr); retry: @@ -411,7 +412,8 @@ dbg_ram_write(uint8_t *data, size_t off, uint8_t val) case mem_type::ram: ram_write(g_cpu, get_ram_host_ptr(g_cpu, region, phys_addr), val); if (is_code) { - tc_invalidate(&g_cpu->cpu_ctx, phys_addr, 1, g_cpu->cpu_ctx.regs.eip); + tc_invalidate(&g_cpu->cpu_ctx, phys_addr, 1); + g_cpu->clear_int_fn(&g_cpu->cpu_ctx, CPU_HALT_TC_INT); } // also update the read mem buffer used by dbg_ram_read data[off] = val; @@ -431,10 +433,8 @@ dbg_ram_write(uint8_t *data, size_t off, uint8_t val) } } catch (host_exp_t type) { - // just fallthrough - if (type == host_exp_t::halt_tc) { - g_cpu->cpu_flags &= ~(CPU_DISAS_ONE | CPU_ALLOW_CODE_WRITE); - } + // NOTE: debug exceptions cannot happen here because we are not accessing any memory here, only translating an address + LOG(log_level::info, "Failed to write to address 0x%08" PRIX32, addr); } (g_cpu->cpu_ctx.regs.cr0 &= ~CR0_WP_MASK) |= old_wp; @@ -446,7 +446,7 @@ dbg_single_step_handler(cpu_ctx_t *cpu_ctx) // NOTE1: this is called from the emulation thread // NOTE2: since the cpu has just pushed the ret_eip on the stack of the exception handler and no other guest code runs before we are called // in this hook, then mem_read_helper cannot raise page faults now - uint32_t ret_eip = mem_read_helper(cpu_ctx, cpu_ctx->regs.esp, 0, 0); + uint32_t ret_eip = mem_read_helper(cpu_ctx, cpu_ctx->regs.esp, 0); addr_t pc = cpu_ctx->regs.cs_hidden.base + ret_eip; if (cpu_ctx->cpu->cpu_flags & CPU_SINGLE_STEP) { // disable all breakpoints so that we can show the original instructions in the disassembler @@ -464,13 +464,13 @@ dbg_single_step_handler(cpu_ctx_t *cpu_ctx) try { // execute an iret instruction so that we can correctly return to the interrupted code if (cpu_ctx->hflags & HFLG_PE_MODE) { - if (lret_pe_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16, cpu_ctx->regs.eip)) { + if (lret_pe_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16)) { // we can't handle an exception here, so abort LIB86CPU_ABORT_msg("Unhandled exception while returning from a single step"); } } else { - iret_real_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16, cpu_ctx->regs.eip); + iret_real_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16); } } catch (host_exp_t type) { @@ -490,7 +490,7 @@ dbg_sw_breakpoint_handler(cpu_ctx_t *cpu_ctx) // NOTE1: this is called from the emulation thread // NOTE2: since the cpu has just pushed the ret_eip on the stack of the exception handler and no other guest code runs before we are called // in this hook, then mem_read_helper cannot raise page faults now - uint32_t ret_eip = mem_read_helper(cpu_ctx, cpu_ctx->regs.esp, 0, 0); + uint32_t ret_eip = mem_read_helper(cpu_ctx, cpu_ctx->regs.esp, 0); addr_t pc = cpu_ctx->regs.cs_hidden.base + ret_eip - 1; // if this is our int3, it will always be one byte large if (break_list.contains(pc)) { // disable all breakpoints so that we can show the original instructions in the disassembler @@ -506,13 +506,13 @@ dbg_sw_breakpoint_handler(cpu_ctx_t *cpu_ctx) try { // execute an iret instruction so that we can correctly return to the interrupted code if (cpu_ctx->hflags & HFLG_PE_MODE) { - if (lret_pe_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16, cpu_ctx->regs.eip)) { + if (lret_pe_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16)) { // we can't handle an exception here, so abort LIB86CPU_ABORT_msg("Unhandled exception while returning from a breakpoint"); } } else { - iret_real_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16, cpu_ctx->regs.eip); + iret_real_helper(cpu_ctx, (cpu_ctx->hflags & HFLG_CS32) ? SIZE32 : SIZE16); } cpu_ctx->regs.eip = ret_eip - 1; dbg_remove_sw_breakpoints(cpu_ctx->cpu, pc); @@ -555,7 +555,7 @@ dbg_insert_sw_breakpoint(cpu_t *cpu, addr_t addr) try { bool is_code; - addr_t phys_addr = get_write_addr(cpu, addr, 0, cpu->cpu_ctx.regs.eip, &is_code); + addr_t phys_addr = get_write_addr(cpu, addr, 0, &is_code); if (as_memory_search_addr(cpu, phys_addr)->type == mem_type::ram) { inserted = true; } @@ -607,8 +607,8 @@ dbg_apply_sw_breakpoints(cpu_t *cpu) addr_t addr = elem.first; // the mem accesses below cannot raise page faults since break_list can only contain valid pages because of the checks done in dbg_insert_sw_breakpoint - uint8_t original_byte = mem_read_helper(&cpu->cpu_ctx, addr, cpu->cpu_ctx.regs.eip, 0); - mem_write_helper(&cpu->cpu_ctx, addr, 0xCC, cpu->cpu_ctx.regs.eip, 0); + uint8_t original_byte = mem_read_helper(&cpu->cpu_ctx, addr, 0); + mem_write_helper(&cpu->cpu_ctx, addr, 0xCC, 0); break_list.insert_or_assign(addr, original_byte); } }); @@ -619,8 +619,8 @@ dbg_apply_sw_breakpoints(cpu_t *cpu, addr_t addr) { dbg_update_sw_breakpoints(cpu, [addr](cpu_t *cpu) { // the mem accesses below cannot raise page faults since break_list can only contain valid pages because of the checks done in dbg_insert_sw_breakpoint - uint8_t original_byte = mem_read_helper(&cpu->cpu_ctx, addr, cpu->cpu_ctx.regs.eip, 0); - mem_write_helper(&cpu->cpu_ctx, addr, 0xCC, cpu->cpu_ctx.regs.eip, 0); + uint8_t original_byte = mem_read_helper(&cpu->cpu_ctx, addr, 0); + mem_write_helper(&cpu->cpu_ctx, addr, 0xCC, 0); break_list.insert_or_assign(addr, original_byte); }); } @@ -633,7 +633,7 @@ dbg_remove_sw_breakpoints(cpu_t *cpu) const auto &[addr, original_byte] = elem; try { - mem_write_helper(&cpu->cpu_ctx, addr, original_byte, cpu->cpu_ctx.regs.eip, 0); + mem_write_helper(&cpu->cpu_ctx, addr, original_byte, 0); } catch (host_exp_t type) { LIB86CPU_ABORT_msg("Unhandled page fault while removing a sw breakpoint"); @@ -648,7 +648,7 @@ dbg_remove_sw_breakpoints(cpu_t *cpu, addr_t addr) dbg_update_sw_breakpoints(cpu, [addr](cpu_t *cpu) { if (auto it = break_list.find(addr); it != break_list.end()) { try { - mem_write_helper(&cpu->cpu_ctx, addr, it->second, cpu->cpu_ctx.regs.eip, 0); + mem_write_helper(&cpu->cpu_ctx, addr, it->second, 0); } catch (host_exp_t type) { LIB86CPU_ABORT_msg("Unhandled page fault while removing a sw breakpoint"); diff --git a/lib86cpu/interface.cpp b/lib86cpu/interface.cpp index 1aa4042..8ecbbf8 100644 --- a/lib86cpu/interface.cpp +++ b/lib86cpu/interface.cpp @@ -559,7 +559,7 @@ uint8_t * get_host_ptr(cpu_t *cpu, addr_t addr) { try { - addr_t phys_addr = get_read_addr(cpu, addr, 0, 0); + addr_t phys_addr = get_read_addr(cpu, addr, 0); const memory_region_t* region = as_memory_search_addr(cpu, phys_addr); switch (region->type) @@ -847,7 +847,7 @@ lc86_status mem_read_block(cpu_t *cpu, addr_t addr, uint32_t size, uint8_t *out, addr_t phys_addr; uint32_t bytes_to_read = std::min(PAGE_SIZE - page_offset, size_left); if constexpr (is_virt) { - phys_addr = get_read_addr(cpu, addr, 0, 0); + phys_addr = get_read_addr(cpu, addr, 0); } else { phys_addr = addr; @@ -944,14 +944,14 @@ lc86_status mem_write_handler(cpu_t *cpu, addr_t addr, uint32_t size, const void addr_t phys_addr; uint32_t bytes_to_write = std::min(PAGE_SIZE - page_offset, size_left); if constexpr (is_virt) { - phys_addr = get_write_addr(cpu, addr, 0, 0, &is_code); + phys_addr = get_write_addr(cpu, addr, 0, &is_code); if (is_code) { - tc_invalidate(&cpu->cpu_ctx, phys_addr, bytes_to_write, cpu->cpu_ctx.regs.eip); + tc_invalidate(&cpu->cpu_ctx, phys_addr, bytes_to_write); } } else { phys_addr = addr; - tc_invalidate(&cpu->cpu_ctx, phys_addr, bytes_to_write, cpu->cpu_ctx.regs.eip); + tc_invalidate(&cpu->cpu_ctx, phys_addr, bytes_to_write); } const memory_region_t *region = as_memory_search_addr(cpu, phys_addr); @@ -1062,7 +1062,7 @@ template static lc86_status io_read_handler(cpu_t *cpu, port_t port, T &out) { try { - out = io_read_helper(&cpu->cpu_ctx, port, 0); + out = io_read_helper(&cpu->cpu_ctx, port); return lc86_status::success; } catch (host_exp_t type) { @@ -1100,7 +1100,7 @@ template static lc86_status io_write_handler(cpu_t *cpu, port_t port, T val) { try { - io_write_helper(&cpu->cpu_ctx, port, val, 0); + io_write_helper(&cpu->cpu_ctx, port, val); return lc86_status::success; } catch (host_exp_t type) { @@ -1182,8 +1182,8 @@ hook_add(cpu_t *cpu, addr_t addr, hook_t hook_addr) // of the translation of a new code block) try { - addr_t phys_addr = get_code_addr(cpu, addr, cpu->cpu_ctx.regs.eip); - tc_invalidate(&cpu->cpu_ctx, phys_addr, 1, cpu->cpu_ctx.regs.eip); + addr_t phys_addr = get_code_addr(cpu, addr); + tc_invalidate(&cpu->cpu_ctx, phys_addr, 1); } catch (host_exp_t type) { return set_last_error(lc86_status::guest_exp); @@ -1210,7 +1210,7 @@ hook_remove(cpu_t *cpu, addr_t addr) try { bool is_code; - addr_t phys_addr = get_write_addr(cpu, addr, 2, cpu->cpu_ctx.regs.eip, &is_code); + addr_t phys_addr = get_write_addr(cpu, addr, 2, &is_code); cpu->hook_map.erase(it); tc_invalidate(&cpu->cpu_ctx, phys_addr); } diff --git a/lib86cpu/lib86cpu_priv.h b/lib86cpu/lib86cpu_priv.h index 5590c43..0bb356c 100644 --- a/lib86cpu/lib86cpu_priv.h +++ b/lib86cpu/lib86cpu_priv.h @@ -56,7 +56,6 @@ enum class mem_type { enum class host_exp_t : int { pf_exp, db_exp, - halt_tc, }; template @@ -93,7 +92,6 @@ struct exp_data_t { uint32_t fault_addr; // addr that caused the exception uint16_t code; // error code used by the exception (if any) uint16_t idx; // index number of the exception - uint32_t eip; // eip to return to after the exception is serviced }; struct exp_info_t { @@ -163,8 +161,6 @@ struct cpu_ctx_t { uint32_t hflags; exp_info_t exp_info; uint32_t int_pending; - uint8_t exit_requested; - uint8_t is_halted; fpu_data_t fpu_data; }; @@ -199,8 +195,8 @@ struct cpu_t { tlb_t itlb[ITLB_NUM_SETS][ITLB_NUM_LINES]; // instruction tlb tlb_t dtlb[DTLB_NUM_SETS][DTLB_NUM_LINES]; // data tlb uint16_t num_tc; // num of tc actually emitted, tc's might not be present in the code cache - uint8_t microcode_updated; - bool state_loaded; + uint8_t microcode_updated, is_halted; + bool state_loaded, exit_requested; struct _tsc_clock { uint64_t offset; uint64_t last_host_ticks; diff --git a/lib86cpu/support.cpp b/lib86cpu/support.cpp index 3d119e4..2676ebe 100644 --- a/lib86cpu/support.cpp +++ b/lib86cpu/support.cpp @@ -14,7 +14,7 @@ #endif // This should be updated whenever cpu members that need to be saved are added/removed -#define SAVE_STATE_ID 6 +#define SAVE_STATE_ID 7 void @@ -100,7 +100,7 @@ cpu_save_state(cpu_t *cpu, cpu_save_state_t *cpu_state, ram_save_state_t *ram_st cpu_state->eflags_aux = cpu->cpu_ctx.lazy_eflags.auxbits; cpu_state->ftop = cpu->cpu_ctx.fpu_data.ftop; cpu_state->frp = cpu->cpu_ctx.fpu_data.frp; - cpu_state->is_halted = cpu->cpu_ctx.is_halted; + cpu_state->is_halted = cpu->is_halted; cpu_state->microcode_updated = cpu->microcode_updated; cpu_state->hflags = (cpu->cpu_ctx.hflags & HFLG_SAVED_MASK); cpu_state->a20_mask = cpu->a20_mask; @@ -130,7 +130,7 @@ cpu_load_state(cpu_t *cpu, cpu_save_state_t *cpu_state, ram_save_state_t *ram_st cpu->cpu_ctx.lazy_eflags.auxbits = cpu_state->eflags_aux; cpu->cpu_ctx.fpu_data.ftop = cpu_state->ftop; cpu->cpu_ctx.fpu_data.frp = cpu_state->frp; - cpu->cpu_ctx.is_halted = cpu->cpu_ctx.is_halted; + cpu->is_halted = cpu->is_halted; cpu->microcode_updated = cpu_state->microcode_updated; cpu->cpu_ctx.hflags = cpu_state->hflags; cpu->cpu_flags &= CPU_PRESERVED_FLG_MASK; diff --git a/lib86cpu/support.h b/lib86cpu/support.h index dde5977..95c2353 100644 --- a/lib86cpu/support.h +++ b/lib86cpu/support.h @@ -12,9 +12,7 @@ // these flags are ORed with the flags in lib86cpu.h, so avoid conflicts #define CPU_TIMEOUT (1 << 3) -#define CPU_INHIBIT_DBG_TRAP (1 << 4) #define CPU_DISAS_ONE (1 << 7) -#define CPU_ALLOW_CODE_WRITE (1 << 8) #define CPU_FORCE_INSERT (1 << 9) #define CPU_SINGLE_STEP (1 << 10) #define CPU_PRESERVED_FLG_MASK (CPU_SYNTAX_MASK | CPU_TIMEOUT | CPU_SINGLE_STEP | CPU_DBG_PRESENT | CPU_ABORT_ON_HLT)