summaryrefslogtreecommitdiff
path: root/zjit/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'zjit/src/backend')
-rw-r--r--zjit/src/backend/arm64/mod.rs49
-rw-r--r--zjit/src/backend/lir.rs26
-rw-r--r--zjit/src/backend/x86_64/mod.rs67
3 files changed, 81 insertions, 61 deletions
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index f7e871523e..dd1eb52d34 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -211,11 +211,6 @@ impl Assembler
vec![X1_REG, X9_REG, X10_REG, X11_REG, X12_REG, X13_REG, X14_REG, X15_REG]
}
- /// Get the address that the current frame returns to
- pub fn return_addr_opnd() -> Opnd {
- Opnd::Reg(X30_REG)
- }
-
/// Split platform-specific instructions
/// The transformations done here are meant to make our lives simpler in later
/// stages of the compilation pipeline.
@@ -1345,14 +1340,30 @@ impl Assembler
}
}
-/*
#[cfg(test)]
mod tests {
use super::*;
- use crate::disasm::*;
+ use crate::assertions::assert_disasm;
+
+ static TEMP_REGS: [Reg; 5] = [X1_REG, X9_REG, X10_REG, X14_REG, X15_REG];
fn setup_asm() -> (Assembler, CodeBlock) {
- (Assembler::new(0), CodeBlock::new_dummy(1024))
+ (Assembler::new(), CodeBlock::new_dummy())
+ }
+
+ #[test]
+ fn test_mul_with_immediate() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let out = asm.mul(Opnd::Reg(TEMP_REGS[1]), 3.into());
+ asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
+ asm.compile_with_num_regs(&mut cb, 2);
+
+ assert_disasm!(cb, "600080d2207d009be10300aa", {"
+ 0x0: mov x0, #3
+ 0x4: mul x0, x9, x0
+ 0x8: mov x1, x0
+ "});
}
#[test]
@@ -1361,7 +1372,7 @@ mod tests {
let opnd = asm.add(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG));
asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd);
- asm.compile_with_regs(&mut cb, None, vec![X3_REG]);
+ asm.compile_with_regs(&mut cb, vec![X3_REG]);
// Assert that only 2 instructions were written.
assert_eq!(8, cb.get_write_pos());
@@ -1425,6 +1436,7 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 0);
}
+ /*
#[test]
fn test_emit_lea_label() {
let (mut asm, mut cb) = setup_asm();
@@ -1438,6 +1450,7 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 1);
}
+ */
#[test]
fn test_emit_load_mem_disp_fits_into_load() {
@@ -1648,6 +1661,7 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 2);
}
+ /*
#[test]
fn test_bcond_straddling_code_pages() {
const LANDING_PAGE: usize = 65;
@@ -1784,20 +1798,5 @@ mod tests {
0x8: mov x1, x11
"});
}
-
- #[test]
- fn test_mul_with_immediate() {
- let (mut asm, mut cb) = setup_asm();
-
- let out = asm.mul(Opnd::Reg(TEMP_REGS[1]), 3.into());
- asm.mov(Opnd::Reg(TEMP_REGS[0]), out);
- asm.compile_with_num_regs(&mut cb, 2);
-
- assert_disasm!(cb, "6b0080d22b7d0b9be1030baa", {"
- 0x0: mov x11, #3
- 0x4: mul x11, x9, x11
- 0x8: mov x1, x11
- "});
- }
+ */
}
-*/
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index e9ae8730f6..f46b35ded5 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -1,8 +1,8 @@
use std::collections::HashMap;
use std::fmt;
use std::mem::take;
-use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32, VM_ENV_DATA_SIZE};
-use crate::state::ZJITState;
+use crate::codegen::local_size_and_idx_to_ep_offset;
+use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32};
use crate::{cruby::VALUE};
use crate::backend::current::*;
use crate::virtualmem::CodePtr;
@@ -1751,6 +1751,15 @@ impl Assembler
ret
}
+ /// Compile with a limited number of registers. Used only for unit tests.
+ #[cfg(test)]
+ pub fn compile_with_num_regs(self, cb: &mut CodeBlock, num_regs: usize) -> (CodePtr, Vec<u32>)
+ {
+ let mut alloc_regs = Self::get_alloc_regs();
+ let alloc_regs = alloc_regs.drain(0..num_regs).collect();
+ self.compile_with_regs(cb, alloc_regs).unwrap()
+ }
+
/// Compile Target::SideExit and convert it into Target::CodePtr for all instructions
#[must_use]
pub fn compile_side_exits(&mut self) -> Option<()> {
@@ -1788,7 +1797,7 @@ impl Assembler
asm_comment!(self, "write locals: {locals:?}");
for (idx, &opnd) in locals.iter().enumerate() {
let opnd = split_store_source(self, opnd);
- self.store(Opnd::mem(64, SP, (-(VM_ENV_DATA_SIZE as i32) - locals.len() as i32 + idx as i32) * SIZEOF_VALUE_I32), opnd);
+ self.store(Opnd::mem(64, SP, (-local_size_and_idx_to_ep_offset(locals.len(), idx) - 1) * SIZEOF_VALUE_I32), opnd);
}
asm_comment!(self, "save cfp->pc");
@@ -1800,10 +1809,6 @@ impl Assembler
let cfp_sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
self.store(cfp_sp, Opnd::Reg(Assembler::SCRATCH_REG));
- asm_comment!(self, "rewind caller frames");
- self.mov(C_ARG_OPNDS[0], Assembler::return_addr_opnd());
- self.ccall(Self::rewind_caller_frames as *const u8, vec![]);
-
asm_comment!(self, "exit to the interpreter");
self.frame_teardown();
self.mov(C_RET_OPND, Opnd::UImm(Qundef.as_u64()));
@@ -1814,13 +1819,6 @@ impl Assembler
}
Some(())
}
-
- #[unsafe(no_mangle)]
- extern "C" fn rewind_caller_frames(addr: *const u8) {
- if ZJITState::is_iseq_return_addr(addr) {
- unimplemented!("Can't side-exit from JIT-JIT call: rewind_caller_frames is not implemented yet");
- }
- }
}
impl fmt::Debug for Assembler {
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index cf62cdd7f5..d83fc184f9 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -109,11 +109,6 @@ impl Assembler
vec![RAX_REG, RCX_REG, RDX_REG, RSI_REG, RDI_REG, R8_REG, R9_REG, R10_REG, R11_REG]
}
- /// Get the address that the current frame returns to
- pub fn return_addr_opnd() -> Opnd {
- Opnd::mem(64, Opnd::Reg(RSP_REG), 0)
- }
-
// These are the callee-saved registers in the x86-64 SysV ABI
// RBX, RSP, RBP, and R12–R15
@@ -298,19 +293,24 @@ impl Assembler
let opnd1 = asm.load(*src);
asm.mov(*dest, opnd1);
},
- (Opnd::Mem(_), Opnd::UImm(value)) => {
- // 32-bit values will be sign-extended
- if imm_num_bits(*value as i64) > 32 {
+ (Opnd::Mem(Mem { num_bits, .. }), Opnd::UImm(value)) => {
+ // For 64 bit destinations, 32-bit values will be sign-extended
+ if *num_bits == 64 && imm_num_bits(*value as i64) > 32 {
let opnd1 = asm.load(*src);
asm.mov(*dest, opnd1);
} else {
asm.mov(*dest, *src);
}
},
- (Opnd::Mem(_), Opnd::Imm(value)) => {
- if imm_num_bits(*value) > 32 {
+ (Opnd::Mem(Mem { num_bits, .. }), Opnd::Imm(value)) => {
+ // For 64 bit destinations, 32-bit values will be sign-extended
+ if *num_bits == 64 && imm_num_bits(*value) > 32 {
let opnd1 = asm.load(*src);
asm.mov(*dest, opnd1);
+ } else if uimm_num_bits(*value as u64) <= *num_bits {
+ // If the bit string is short enough for the destination, use the unsigned representation.
+ // Note that 64-bit and negative values are ruled out.
+ asm.mov(*dest, Opnd::UImm(*value as u64));
} else {
asm.mov(*dest, *src);
}
@@ -859,20 +859,17 @@ impl Assembler
}
}
-/*
#[cfg(test)]
mod tests {
- use crate::disasm::assert_disasm;
- #[cfg(feature = "disasm")]
- use crate::disasm::{unindent, disasm_addr_range};
-
+ use crate::assertions::assert_disasm;
use super::*;
fn setup_asm() -> (Assembler, CodeBlock) {
- (Assembler::new(0), CodeBlock::new_dummy(1024))
+ (Assembler::new(), CodeBlock::new_dummy())
}
#[test]
+ #[ignore]
fn test_emit_add_lt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -883,6 +880,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_add_gt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -893,6 +891,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_and_lt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -903,6 +902,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_and_gt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -957,6 +957,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_or_lt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -967,6 +968,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_or_gt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -977,6 +979,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_sub_lt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -987,6 +990,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_sub_gt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -1017,6 +1021,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_xor_lt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -1027,6 +1032,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_emit_xor_gt_32_bits() {
let (mut asm, mut cb) = setup_asm();
@@ -1050,6 +1056,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_merge_lea_mem() {
let (mut asm, mut cb) = setup_asm();
@@ -1064,6 +1071,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_replace_cmp_0() {
let (mut asm, mut cb) = setup_asm();
@@ -1216,6 +1224,7 @@ mod tests {
}
#[test]
+ #[ignore]
fn test_reorder_c_args_with_insn_out() {
let (mut asm, mut cb) = setup_asm();
@@ -1259,15 +1268,16 @@ mod tests {
asm.compile_with_num_regs(&mut cb, 1);
- assert_disasm!(cb, "48837b1001b804000000480f4f03488903", {"
+ assert_disasm!(cb, "48837b1001bf04000000480f4f3b48893b", {"
0x0: cmp qword ptr [rbx + 0x10], 1
- 0x5: mov eax, 4
- 0xa: cmovg rax, qword ptr [rbx]
- 0xe: mov qword ptr [rbx], rax
+ 0x5: mov edi, 4
+ 0xa: cmovg rdi, qword ptr [rbx]
+ 0xe: mov qword ptr [rbx], rdi
"});
}
#[test]
+ #[ignore]
fn test_csel_split() {
let (mut asm, mut cb) = setup_asm();
@@ -1284,6 +1294,19 @@ mod tests {
0x13: mov qword ptr [rbx], rax
"});
}
-}
-*/
+ #[test]
+ fn test_mov_m32_imm32() {
+ let (mut asm, mut cb) = setup_asm();
+
+ let shape_opnd = Opnd::mem(32, C_RET_OPND, 0);
+ asm.mov(shape_opnd, Opnd::UImm(0x8000_0001));
+ asm.mov(shape_opnd, Opnd::Imm(0x8000_0001));
+
+ asm.compile_with_num_regs(&mut cb, 0);
+ assert_disasm!(cb, "c70001000080c70001000080", {"
+ 0x0: mov dword ptr [rax], 0x80000001
+ 0x6: mov dword ptr [rax], 0x80000001
+ "});
+ }
+}