summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.github/actions/compilers/entrypoint.sh17
-rw-r--r--.github/actions/setup/directories/action.yml2
-rw-r--r--.github/workflows/zjit-macos.yml43
-rw-r--r--.github/workflows/zjit-ubuntu.yml43
-rw-r--r--bootstraptest/test_yjit.rb2
-rw-r--r--common.mk3
-rw-r--r--ext/date/date_core.c16
-rw-r--r--ext/socket/lib/socket.rb34
-rw-r--r--file.c4
-rw-r--r--gc.c10
-rw-r--r--gc/mmtk/src/abi.rs7
-rw-r--r--hash.c9
-rw-r--r--imemo.c2
-rw-r--r--include/ruby/internal/fl_type.h35
-rw-r--r--internal/class.h27
-rw-r--r--internal/string.h1
-rw-r--r--internal/thread.h3
-rw-r--r--lib/bundler/cli.rb2
-rw-r--r--lib/rubygems/ssl_certs/rubygems.org/GlobalSign.pem (renamed from lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA_R3.pem)0
-rw-r--r--lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem21
-rw-r--r--misc/lldb_rb/commands/print_flags_command.py2
-rw-r--r--namespace.c3
-rw-r--r--object.c4
-rw-r--r--prism_compile.c17
-rw-r--r--ractor.c130
-rw-r--r--ractor_sync.c1
-rw-r--r--re.c15
-rw-r--r--ruby_atomic.h6
-rw-r--r--rubyparser.h2
-rw-r--r--scheduler.c29
-rw-r--r--shape.c137
-rw-r--r--shape.h73
-rw-r--r--spec/bundler/bundler/friendly_errors_spec.rb3
-rw-r--r--st.c9
-rw-r--r--string.c11
-rw-r--r--struct.c3
-rw-r--r--test/ruby/namespace/instance_variables.rb21
-rw-r--r--test/ruby/test_compile_prism.rb3
-rw-r--r--test/ruby/test_data.rb6
-rw-r--r--test/ruby/test_namespace.rb20
-rw-r--r--test/ruby/test_object_id.rb46
-rw-r--r--test/ruby/test_ractor.rb35
-rw-r--r--test/ruby/test_struct.rb6
-rw-r--r--test/ruby/test_variable.rb13
-rw-r--r--test/ruby/test_zjit.rb54
-rw-r--r--test/socket/test_socket.rb26
-rw-r--r--thread.c73
-rw-r--r--thread_none.c6
-rw-r--r--thread_pthread.c25
-rw-r--r--thread_win32.c6
-rw-r--r--time.c50
-rw-r--r--variable.c273
-rw-r--r--vm.c4
-rw-r--r--vm_callinfo.h5
-rw-r--r--vm_insnhelper.c4
-rw-r--r--yjit.c8
-rw-r--r--yjit/bindgen/src/main.rs3
-rw-r--r--yjit/src/codegen.rs10
-rw-r--r--yjit/src/cruby.rs12
-rw-r--r--yjit/src/cruby_bindings.inc.rs27
-rw-r--r--zjit/bindgen/src/main.rs1
-rw-r--r--zjit/src/backend/arm64/mod.rs5
-rw-r--r--zjit/src/backend/lir.rs17
-rw-r--r--zjit/src/backend/x86_64/mod.rs5
-rw-r--r--zjit/src/codegen.rs121
-rw-r--r--zjit/src/cruby.rs12
-rw-r--r--zjit/src/cruby_bindings.inc.rs24
-rw-r--r--zjit/src/hir.rs15
-rw-r--r--zjit/src/state.rs30
69 files changed, 1071 insertions, 621 deletions
diff --git a/.github/actions/compilers/entrypoint.sh b/.github/actions/compilers/entrypoint.sh
index 503143b293..ad9fa87a11 100755
--- a/.github/actions/compilers/entrypoint.sh
+++ b/.github/actions/compilers/entrypoint.sh
@@ -85,7 +85,6 @@ setup_launchable() {
export LAUNCHABLE_SESSION_DIR=${builddir}
local github_ref="${GITHUB_REF//\//_}"
local build_name="${github_ref}"_"${GITHUB_PR_HEAD_SHA}"
- btests+=--launchable-test-reports="${btest_report_path}"
launchable record build --name "${build_name}" || true
launchable record session \
--build "${build_name}" \
@@ -98,9 +97,8 @@ setup_launchable() {
--flavor cppflags="${INPUT_CPPFLAGS}" \
--test-suite btest \
> "${builddir}"/${btest_session_file} \
- || true
+ && btests+=--launchable-test-reports="${btest_report_path}" || :
if [ "$INPUT_CHECK" = "true" ]; then
- tests+=--launchable-test-reports="${test_report_path}"
launchable record session \
--build "${build_name}" \
--flavor test_task=test-all \
@@ -112,9 +110,8 @@ setup_launchable() {
--flavor cppflags="${INPUT_CPPFLAGS}" \
--test-suite test-all \
> "${builddir}"/${test_all_session_file} \
- || true
+ && tests+=--launchable-test-reports="${test_report_path}" || :
mkdir "${builddir}"/"${test_spec_report_path}"
- spec_opts+=--launchable-test-reports="${test_spec_report_path}"
launchable record session \
--build "${build_name}" \
--flavor test_task=test-spec \
@@ -126,7 +123,7 @@ setup_launchable() {
--flavor cppflags="${INPUT_CPPFLAGS}" \
--test-suite test-spec \
> "${builddir}"/${test_spec_session_file} \
- || true
+ spec_opts+=--launchable-test-reports="${test_spec_report_path}" || :
fi
}
launchable_record_test() {
@@ -145,11 +142,13 @@ if [ "$LAUNCHABLE_ENABLED" = "true" ]; then
test_all_session_file='launchable_test_all_session.txt'
btest_session_file='launchable_btest_session.txt'
test_spec_session_file='launchable_test_spec_session.txt'
- setup_launchable & setup_pid=$!
- (sleep 180; echo "setup_launchable timed out; killing"; kill "$setup_pid" 2> /dev/null) & sleep_pid=$!
+ setup_pid=$$
+ (sleep 180; echo "setup_launchable timed out; killing"; kill -INT "$setup_pid" 2> /dev/null) & sleep_pid=$!
launchable_failed=false
- wait -f "$setup_pid" || launchable_failed=true
+ trap "launchable_failed=true" INT
+ setup_launchable
kill "$sleep_pid" 2> /dev/null
+ trap - INT
echo "::endgroup::"
$launchable_failed || trap launchable_record_test EXIT
fi
diff --git a/.github/actions/setup/directories/action.yml b/.github/actions/setup/directories/action.yml
index 48e2c64a96..f16ce21e0e 100644
--- a/.github/actions/setup/directories/action.yml
+++ b/.github/actions/setup/directories/action.yml
@@ -183,3 +183,5 @@ runs:
${{ steps.clean.outputs.distclean }}
${{ steps.clean.outputs.remained-files }}
${{ steps.clean.outputs.final }}
+ # rmdir randomly fails due to launchable files
+ continue-on-error: true
diff --git a/.github/workflows/zjit-macos.yml b/.github/workflows/zjit-macos.yml
index eb7dacd4e2..fa161b31a2 100644
--- a/.github/workflows/zjit-macos.yml
+++ b/.github/workflows/zjit-macos.yml
@@ -42,6 +42,9 @@ jobs:
configure: '--enable-zjit=dev'
tests: '../src/test/ruby/test_zjit.rb'
+ - test_task: 'btest'
+ configure: '--enable-zjit=dev'
+
env:
GITPULLOPTIONS: --no-tags origin ${{ github.ref }}
RUN_OPTS: ${{ matrix.zjit_opts }}
@@ -100,6 +103,45 @@ jobs:
ruby -ne 'raise "Disassembly seems broken in dev build (output has too few lines)" unless $_.to_i > 10'
if: ${{ contains(matrix.configure, 'jit=dev') }}
+ - name: btest
+ run: |
+ RUST_BACKTRACE=1 ruby --disable=gems ../src/bootstraptest/runner.rb --ruby="./miniruby -I../src/lib -I. -I.ext/common --zjit-call-threshold=1" \
+ ../src/bootstraptest/test_attr.rb \
+ ../src/bootstraptest/test_constant_cache.rb \
+ ../src/bootstraptest/test_env.rb \
+ ../src/bootstraptest/test_finalizer.rb \
+ ../src/bootstraptest/test_flip.rb \
+ ../src/bootstraptest/test_literal.rb \
+ ../src/bootstraptest/test_literal_suffix.rb \
+ ../src/bootstraptest/test_string.rb \
+ ../src/bootstraptest/test_struct.rb \
+ ../src/bootstraptest/test_yjit_30k_ifelse.rb \
+ ../src/bootstraptest/test_yjit_30k_methods.rb
+ # ../src/bootstraptest/test_autoload.rb \
+ # ../src/bootstraptest/test_block.rb \
+ # ../src/bootstraptest/test_class.rb \
+ # ../src/bootstraptest/test_eval.rb \
+ # ../src/bootstraptest/test_exception.rb \
+ # ../src/bootstraptest/test_fiber.rb \
+ # ../src/bootstraptest/test_flow.rb \
+ # ../src/bootstraptest/test_fork.rb \
+ # ../src/bootstraptest/test_gc.rb \
+ # ../src/bootstraptest/test_insns.rb \
+ # ../src/bootstraptest/test_io.rb \
+ # ../src/bootstraptest/test_jump.rb \
+ # ../src/bootstraptest/test_load.rb \
+ # ../src/bootstraptest/test_marshal.rb \
+ # ../src/bootstraptest/test_massign.rb \
+ # ../src/bootstraptest/test_method.rb \
+ # ../src/bootstraptest/test_objectspace.rb \
+ # ../src/bootstraptest/test_proc.rb \
+ # ../src/bootstraptest/test_ractor.rb \
+ # ../src/bootstraptest/test_syntax.rb \
+ # ../src/bootstraptest/test_thread.rb \
+ # ../src/bootstraptest/test_yjit.rb \
+ # ../src/bootstraptest/test_yjit_rust_port.rb \
+ if: ${{ matrix.test_task == 'btest' }}
+
- name: make ${{ matrix.test_task }}
run: >-
make -s ${{ matrix.test_task }} ${TESTS:+TESTS="$TESTS"}
@@ -113,6 +155,7 @@ jobs:
PRECHECK_BUNDLED_GEMS: 'no'
TESTS: ${{ matrix.tests }}
continue-on-error: ${{ matrix.continue-on-test_task || false }}
+ if: ${{ matrix.test_task != 'btest' }}
result:
if: ${{ always() }}
diff --git a/.github/workflows/zjit-ubuntu.yml b/.github/workflows/zjit-ubuntu.yml
index d5b6c71f31..7a6c1dfe0b 100644
--- a/.github/workflows/zjit-ubuntu.yml
+++ b/.github/workflows/zjit-ubuntu.yml
@@ -44,6 +44,9 @@ jobs:
configure: '--enable-zjit=dev'
tests: '../src/test/ruby/test_zjit.rb'
+ - test_task: 'btest'
+ configure: '--enable-zjit=dev'
+
env:
GITPULLOPTIONS: --no-tags origin ${{ github.ref }}
RUN_OPTS: ${{ matrix.zjit_opts }}
@@ -122,6 +125,45 @@ jobs:
run: ./miniruby --zjit -v | grep "+ZJIT"
if: ${{ matrix.configure != '--disable-zjit' }}
+ - name: btest
+ run: |
+ RUST_BACKTRACE=1 ruby --disable=gems ../src/bootstraptest/runner.rb --ruby="./miniruby -I../src/lib -I. -I.ext/common --zjit-call-threshold=1" \
+ ../src/bootstraptest/test_attr.rb \
+ ../src/bootstraptest/test_constant_cache.rb \
+ ../src/bootstraptest/test_env.rb \
+ ../src/bootstraptest/test_finalizer.rb \
+ ../src/bootstraptest/test_flip.rb \
+ ../src/bootstraptest/test_literal.rb \
+ ../src/bootstraptest/test_literal_suffix.rb \
+ ../src/bootstraptest/test_massign.rb \
+ ../src/bootstraptest/test_string.rb \
+ ../src/bootstraptest/test_struct.rb \
+ ../src/bootstraptest/test_yjit_30k_ifelse.rb \
+ ../src/bootstraptest/test_yjit_30k_methods.rb
+ # ../src/bootstraptest/test_autoload.rb \
+ # ../src/bootstraptest/test_block.rb \
+ # ../src/bootstraptest/test_class.rb \
+ # ../src/bootstraptest/test_eval.rb \
+ # ../src/bootstraptest/test_exception.rb \
+ # ../src/bootstraptest/test_fiber.rb \
+ # ../src/bootstraptest/test_flow.rb \
+ # ../src/bootstraptest/test_fork.rb \
+ # ../src/bootstraptest/test_gc.rb \
+ # ../src/bootstraptest/test_insns.rb \
+ # ../src/bootstraptest/test_io.rb \
+ # ../src/bootstraptest/test_jump.rb \
+ # ../src/bootstraptest/test_load.rb \
+ # ../src/bootstraptest/test_marshal.rb \
+ # ../src/bootstraptest/test_method.rb \
+ # ../src/bootstraptest/test_objectspace.rb \
+ # ../src/bootstraptest/test_proc.rb \
+ # ../src/bootstraptest/test_ractor.rb \
+ # ../src/bootstraptest/test_syntax.rb \
+ # ../src/bootstraptest/test_thread.rb \
+ # ../src/bootstraptest/test_yjit.rb \
+ # ../src/bootstraptest/test_yjit_rust_port.rb \
+ if: ${{ matrix.test_task == 'btest' }}
+
- name: make ${{ matrix.test_task }}
run: >-
make -s ${{ matrix.test_task }} ${TESTS:+TESTS="$TESTS"}
@@ -137,6 +179,7 @@ jobs:
LIBCLANG_PATH: ${{ matrix.libclang_path }}
TESTS: ${{ matrix.tests }}
continue-on-error: ${{ matrix.continue-on-test_task || false }}
+ if: ${{ matrix.test_task != 'btest' }}
result:
if: ${{ always() }}
diff --git a/bootstraptest/test_yjit.rb b/bootstraptest/test_yjit.rb
index 8d02998254..d480369c75 100644
--- a/bootstraptest/test_yjit.rb
+++ b/bootstraptest/test_yjit.rb
@@ -220,7 +220,7 @@ assert_equal 'Sub', %q{
call(Sub.new('o')).class
}
-# String#dup with FL_EXIVAR
+# String#dup with generic ivars
assert_equal '["str", "ivar"]', %q{
def str_dup(str) = str.dup
str = "str"
diff --git a/common.mk b/common.mk
index 98f4baf938..f94ad33d88 100644
--- a/common.mk
+++ b/common.mk
@@ -15119,6 +15119,8 @@ re.$(OBJEXT): {$(VPATH)}missing.h
re.$(OBJEXT): {$(VPATH)}node.h
re.$(OBJEXT): {$(VPATH)}onigmo.h
re.$(OBJEXT): {$(VPATH)}oniguruma.h
+re.$(OBJEXT): {$(VPATH)}ractor.h
+re.$(OBJEXT): {$(VPATH)}ractor_core.h
re.$(OBJEXT): {$(VPATH)}re.c
re.$(OBJEXT): {$(VPATH)}re.h
re.$(OBJEXT): {$(VPATH)}regenc.h
@@ -15134,6 +15136,7 @@ re.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
re.$(OBJEXT): {$(VPATH)}thread_native.h
re.$(OBJEXT): {$(VPATH)}util.h
re.$(OBJEXT): {$(VPATH)}vm_core.h
+re.$(OBJEXT): {$(VPATH)}vm_debug.h
re.$(OBJEXT): {$(VPATH)}vm_opts.h
regcomp.$(OBJEXT): $(hdrdir)/ruby.h
regcomp.$(OBJEXT): $(hdrdir)/ruby/ruby.h
diff --git a/ext/date/date_core.c b/ext/date/date_core.c
index d01b99206f..44dbf4fbcf 100644
--- a/ext/date/date_core.c
+++ b/ext/date/date_core.c
@@ -7517,10 +7517,7 @@ d_lite_marshal_dump_old(VALUE self)
m_of_in_day(dat),
DBL2NUM(m_sg(dat)));
- if (FL_TEST(self, FL_EXIVAR)) {
- rb_copy_generic_ivar(a, self);
- FL_SET(a, FL_EXIVAR);
- }
+ rb_copy_generic_ivar(a, self);
return a;
}
@@ -7542,10 +7539,8 @@ d_lite_marshal_dump(VALUE self)
INT2FIX(m_of(dat)),
DBL2NUM(m_sg(dat)));
- if (FL_TEST(self, FL_EXIVAR)) {
- rb_copy_generic_ivar(a, self);
- FL_SET(a, FL_EXIVAR);
- }
+
+ rb_copy_generic_ivar(a, self);
return a;
}
@@ -7618,10 +7613,7 @@ d_lite_marshal_load(VALUE self, VALUE a)
HAVE_JD | HAVE_DF);
}
- if (FL_TEST(a, FL_EXIVAR)) {
- rb_copy_generic_ivar(self, a);
- FL_SET(self, FL_EXIVAR);
- }
+ rb_copy_generic_ivar(self, a);
return self;
}
diff --git a/ext/socket/lib/socket.rb b/ext/socket/lib/socket.rb
index 60dd45bd4f..7c3f6f5b91 100644
--- a/ext/socket/lib/socket.rb
+++ b/ext/socket/lib/socket.rb
@@ -643,6 +643,7 @@ class Socket < BasicSocket
#
# [:resolv_timeout] Specifies the timeout in seconds from when the hostname resolution starts.
# [:connect_timeout] This method sequentially attempts connecting to all candidate destination addresses.<br>The +connect_timeout+ specifies the timeout in seconds from the start of the connection attempt to the last candidate.<br>By default, all connection attempts continue until the timeout occurs.<br>When +fast_fallback:false+ is explicitly specified,<br>a timeout is set for each connection attempt and any connection attempt that exceeds its timeout will be canceled.
+ # [:open_timeout] Specifies the timeout in seconds from the start of the method execution.<br>If this timeout is reached while there are still addresses that have not yet been attempted for connection, no further attempts will be made.
# [:fast_fallback] Enables the Happy Eyeballs Version 2 algorithm (enabled by default).
#
# If a block is given, the block is called with the socket.
@@ -656,11 +657,16 @@ class Socket < BasicSocket
# sock.close_write
# puts sock.read
# }
- def self.tcp(host, port, local_host = nil, local_port = nil, connect_timeout: nil, resolv_timeout: nil, fast_fallback: tcp_fast_fallback, &) # :yield: socket
+ def self.tcp(host, port, local_host = nil, local_port = nil, connect_timeout: nil, resolv_timeout: nil, open_timeout: nil, fast_fallback: tcp_fast_fallback, &) # :yield: socket
+
+ if open_timeout && (connect_timeout || resolv_timeout)
+ raise ArgumentError, "Cannot specify open_timeout along with connect_timeout or resolv_timeout"
+ end
+
sock = if fast_fallback && !(host && ip_address?(host))
- tcp_with_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:)
+ tcp_with_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:, open_timeout:)
else
- tcp_without_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:)
+ tcp_without_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:, open_timeout:)
end
if block_given?
@@ -674,7 +680,7 @@ class Socket < BasicSocket
end
end
- def self.tcp_with_fast_fallback(host, port, local_host = nil, local_port = nil, connect_timeout: nil, resolv_timeout: nil)
+ def self.tcp_with_fast_fallback(host, port, local_host = nil, local_port = nil, connect_timeout: nil, resolv_timeout: nil, open_timeout: nil)
if local_host || local_port
local_addrinfos = Addrinfo.getaddrinfo(local_host, local_port, nil, :STREAM, timeout: resolv_timeout)
resolving_family_names = local_addrinfos.map { |lai| ADDRESS_FAMILIES.key(lai.afamily) }.uniq
@@ -692,6 +698,7 @@ class Socket < BasicSocket
resolution_delay_expires_at = nil
connection_attempt_delay_expires_at = nil
user_specified_connect_timeout_at = nil
+ user_specified_open_timeout_at = open_timeout ? now + open_timeout : nil
last_error = nil
last_error_from_thread = false
@@ -784,7 +791,10 @@ class Socket < BasicSocket
ends_at =
if resolution_store.any_addrinfos?
- resolution_delay_expires_at || connection_attempt_delay_expires_at
+ [(resolution_delay_expires_at || connection_attempt_delay_expires_at),
+ user_specified_open_timeout_at].compact.min
+ elsif user_specified_open_timeout_at
+ user_specified_open_timeout_at
else
[user_specified_resolv_timeout_at, user_specified_connect_timeout_at].compact.max
end
@@ -885,6 +895,8 @@ class Socket < BasicSocket
end
end
+ raise(Errno::ETIMEDOUT, 'user specified timeout') if expired?(now, user_specified_open_timeout_at)
+
if resolution_store.empty_addrinfos?
if connecting_sockets.empty? && resolution_store.resolved_all_families?
if last_error_from_thread
@@ -912,7 +924,7 @@ class Socket < BasicSocket
end
end
- def self.tcp_without_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:)
+ def self.tcp_without_fast_fallback(host, port, local_host, local_port, connect_timeout:, resolv_timeout:, open_timeout:)
last_error = nil
ret = nil
@@ -921,7 +933,10 @@ class Socket < BasicSocket
local_addr_list = Addrinfo.getaddrinfo(local_host, local_port, nil, :STREAM, nil)
end
- Addrinfo.foreach(host, port, nil, :STREAM, timeout: resolv_timeout) {|ai|
+ timeout = open_timeout ? open_timeout : resolv_timeout
+ starts_at = current_clock_time
+
+ Addrinfo.foreach(host, port, nil, :STREAM, timeout:) {|ai|
if local_addr_list
local_addr = local_addr_list.find {|local_ai| local_ai.afamily == ai.afamily }
next unless local_addr
@@ -929,9 +944,10 @@ class Socket < BasicSocket
local_addr = nil
end
begin
+ timeout = open_timeout ? open_timeout - (current_clock_time - starts_at) : connect_timeout
sock = local_addr ?
- ai.connect_from(local_addr, timeout: connect_timeout) :
- ai.connect(timeout: connect_timeout)
+ ai.connect_from(local_addr, timeout:) :
+ ai.connect(timeout:)
rescue SystemCallError
last_error = $!
next
diff --git a/file.c b/file.c
index 322df6dbec..936e0cdb95 100644
--- a/file.c
+++ b/file.c
@@ -662,7 +662,7 @@ rb_stat_dev(VALUE self)
#if RUBY_USE_STATX
unsigned int m = get_stat(self)->stx_dev_major;
unsigned int n = get_stat(self)->stx_dev_minor;
- return DEVT2NUM(makedev(m, n));
+ return ULL2NUM(makedev(m, n));
#elif SIZEOF_STRUCT_STAT_ST_DEV <= SIZEOF_DEV_T
return DEVT2NUM(get_stat(self)->st_dev);
#elif SIZEOF_STRUCT_STAT_ST_DEV <= SIZEOF_LONG
@@ -833,7 +833,7 @@ rb_stat_rdev(VALUE self)
#if RUBY_USE_STATX
unsigned int m = get_stat(self)->stx_rdev_major;
unsigned int n = get_stat(self)->stx_rdev_minor;
- return DEVT2NUM(makedev(m, n));
+ return ULL2NUM(makedev(m, n));
#elif !defined(HAVE_STRUCT_STAT_ST_RDEV)
return Qnil;
#elif SIZEOF_STRUCT_STAT_ST_RDEV <= SIZEOF_DEV_T
diff --git a/gc.c b/gc.c
index aefe8a116b..eacd8dae86 100644
--- a/gc.c
+++ b/gc.c
@@ -2070,9 +2070,8 @@ rb_gc_obj_free_vm_weak_references(VALUE obj)
{
obj_free_object_id(obj);
- if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
rb_free_generic_ivar((VALUE)obj);
- FL_UNSET_RAW(obj, FL_EXIVAR);
}
switch (BUILTIN_TYPE(obj)) {
@@ -2317,7 +2316,7 @@ rb_obj_memsize_of(VALUE obj)
return 0;
}
- if (FL_TEST(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
size += rb_generic_ivar_memsize(obj);
}
@@ -3142,7 +3141,7 @@ rb_gc_mark_children(void *objspace, VALUE obj)
{
struct gc_mark_classext_foreach_arg foreach_args;
- if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
rb_mark_generic_ivar(obj);
}
@@ -4011,8 +4010,7 @@ vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
case ST_DELETE:
free_gen_fields_tbl((VALUE)key, (struct gen_fields_tbl *)value);
-
- FL_UNSET((VALUE)key, FL_EXIVAR);
+ RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
return ST_DELETE;
case ST_REPLACE: {
diff --git a/gc/mmtk/src/abi.rs b/gc/mmtk/src/abi.rs
index b425d9e50d..81e24679f0 100644
--- a/gc/mmtk/src/abi.rs
+++ b/gc/mmtk/src/abi.rs
@@ -12,9 +12,6 @@ pub const GC_THREAD_KIND_WORKER: libc::c_int = 1;
const HIDDEN_SIZE_MASK: usize = 0x0000FFFFFFFFFFFF;
-// Should keep in sync with C code.
-const RUBY_FL_EXIVAR: usize = 1 << 10;
-
// An opaque type for the C counterpart.
#[allow(non_camel_case_types)]
pub struct st_table;
@@ -93,10 +90,6 @@ impl RubyObjectAccess {
unsafe { self.flags_field().load::<usize>() }
}
- pub fn has_exivar_flag(&self) -> bool {
- (self.load_flags() & RUBY_FL_EXIVAR) != 0
- }
-
pub fn prefix_size() -> usize {
// Currently, a hidden size field of word size is placed before each object.
OBJREF_OFFSET
diff --git a/hash.c b/hash.c
index 2cc6828bb0..be26e0eb3f 100644
--- a/hash.c
+++ b/hash.c
@@ -1597,10 +1597,11 @@ VALUE
rb_hash_dup(VALUE hash)
{
const VALUE flags = RBASIC(hash)->flags;
- VALUE ret = hash_dup(hash, rb_obj_class(hash),
- flags & (FL_EXIVAR|RHASH_PROC_DEFAULT));
- if (flags & FL_EXIVAR)
+ VALUE ret = hash_dup(hash, rb_obj_class(hash), flags & RHASH_PROC_DEFAULT);
+
+ if (rb_obj_exivar_p(hash)) {
rb_copy_generic_ivar(ret, hash);
+ }
return ret;
}
@@ -2920,7 +2921,7 @@ hash_aset(st_data_t *key, st_data_t *val, struct update_arg *arg, int existing)
VALUE
rb_hash_key_str(VALUE key)
{
- if (!RB_FL_ANY_RAW(key, FL_EXIVAR) && RBASIC_CLASS(key) == rb_cString) {
+ if (!rb_obj_exivar_p(key) && RBASIC_CLASS(key) == rb_cString) {
return rb_fstring(key);
}
else {
diff --git a/imemo.c b/imemo.c
index 6477be9d78..ebea6f6f25 100644
--- a/imemo.c
+++ b/imemo.c
@@ -155,11 +155,13 @@ rb_imemo_class_fields_clone(VALUE fields_obj)
if (rb_shape_too_complex_p(shape_id)) {
clone = rb_imemo_class_fields_new_complex(CLASS_OF(fields_obj), 0);
+ RBASIC_SET_SHAPE_ID(clone, shape_id);
st_table *src_table = rb_imemo_class_fields_complex_tbl(fields_obj);
st_replace(rb_imemo_class_fields_complex_tbl(clone), src_table);
}
else {
clone = imemo_class_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id));
+ RBASIC_SET_SHAPE_ID(clone, shape_id);
MEMCPY(rb_imemo_class_fields_ptr(clone), rb_imemo_class_fields_ptr(fields_obj), VALUE, RSHAPE_LEN(shape_id));
}
diff --git a/include/ruby/internal/fl_type.h b/include/ruby/internal/fl_type.h
index 701118ef25..e52ccecedd 100644
--- a/include/ruby/internal/fl_type.h
+++ b/include/ruby/internal/fl_type.h
@@ -253,6 +253,21 @@ ruby_fl_type {
= 0,
/**
+ * @deprecated This flag was an implementation detail that should never have
+ * no been exposed. Exists here for backwards
+ * compatibility only. You can safely forget about it.
+ */
+ RUBY_FL_EXIVAR
+
+#if defined(RBIMPL_HAVE_ENUM_ATTRIBUTE)
+ RBIMPL_ATTR_DEPRECATED(("FL_EXIVAR is an outdated implementation detail, it shoudl be used."))
+#elif defined(_MSC_VER)
+# pragma deprecated(RUBY_FL_EXIVAR)
+#endif
+
+ = 0,
+
+ /**
* This flag has something to do with Ractor. Multiple Ractors run without
* protecting each other. Sharing an object among Ractors are basically
* dangerous, disabled by default. This flag is used to bypass that
@@ -286,18 +301,12 @@ ruby_fl_type {
*/
RUBY_FL_UNUSED9 = (1<<9),
- /**
- * This flag has something to do with instance variables. 3rd parties need
- * not know, but there are several ways to store an object's instance
- * variables. Objects with this flag use so-called "generic" backend
- * storage. This distinction is purely an implementation detail. People
- * need not be aware of this working behind-the-scene.
- *
- * @internal
- *
- * As of writing everything except ::RObject and RModule use this scheme.
- */
- RUBY_FL_EXIVAR = (1<<10),
+ /**
+ * This flag is no longer in use
+ *
+ * @internal
+ */
+ RUBY_FL_UNUSED10 = (1<<10),
/**
* This flag has something to do with data immutability. When this flag is
@@ -399,7 +408,7 @@ enum {
# pragma deprecated(RUBY_FL_DUPPED)
#endif
- = (int)RUBY_T_MASK | (int)RUBY_FL_EXIVAR
+ = (int)RUBY_T_MASK
};
#undef RBIMPL_HAVE_ENUM_ATTRIBUTE
diff --git a/internal/class.h b/internal/class.h
index ff3486472a..2250d3f343 100644
--- a/internal/class.h
+++ b/internal/class.h
@@ -403,10 +403,6 @@ RCLASS_EXT_WRITABLE_LOOKUP(VALUE obj, const rb_namespace_t *ns)
if (ext)
return ext;
- if (!rb_shape_obj_too_complex_p(obj)) {
- rb_evict_ivars_to_hash(obj); // fallback to ivptr for ivars from shapes
- }
-
RB_VM_LOCKING() {
// re-check the classext is not created to avoid the multi-thread race
ext = RCLASS_EXT_TABLE_LOOKUP_INTERNAL(obj, ns);
@@ -525,17 +521,10 @@ RCLASS_WRITE_SUPER(VALUE klass, VALUE super)
}
static inline VALUE
-RCLASS_FIELDS_OBJ(VALUE obj)
+RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(VALUE obj)
{
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
- return RCLASSEXT_FIELDS_OBJ(RCLASS_EXT_READABLE(obj));
-}
-
-static inline VALUE
-RCLASS_ENSURE_FIELDS_OBJ(VALUE obj)
-{
- RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
- rb_classext_t *ext = RCLASS_EXT_READABLE(obj);
+ rb_classext_t *ext = RCLASS_EXT_WRITABLE(obj);
if (!ext->fields_obj) {
RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_class_fields_new(obj, 1));
}
@@ -553,22 +542,26 @@ static inline void
RCLASSEXT_SET_FIELDS_OBJ(VALUE obj, rb_classext_t *ext, VALUE fields_obj)
{
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
- RB_OBJ_WRITE(obj, &ext->fields_obj, fields_obj);
+
+ VALUE old_fields_obj = ext->fields_obj;
+ RUBY_ATOMIC_VALUE_SET(ext->fields_obj, fields_obj);
+ RB_OBJ_WRITTEN(obj, old_fields_obj, fields_obj);
}
static inline void
-RCLASS_SET_FIELDS_OBJ(VALUE obj, VALUE fields_obj)
+RCLASS_WRITABLE_SET_FIELDS_OBJ(VALUE obj, VALUE fields_obj)
{
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
- RCLASSEXT_SET_FIELDS_OBJ(obj, RCLASS_EXT_PRIME(obj), fields_obj);
+ RCLASSEXT_SET_FIELDS_OBJ(obj, RCLASS_EXT_WRITABLE(obj), fields_obj);
}
static inline uint32_t
RCLASS_FIELDS_COUNT(VALUE obj)
{
RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE));
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
if (rb_shape_obj_too_complex_p(fields_obj)) {
return (uint32_t)rb_st_table_size(rb_imemo_class_fields_complex_tbl(fields_obj));
diff --git a/internal/string.h b/internal/string.h
index 50561924f2..d6fea62061 100644
--- a/internal/string.h
+++ b/internal/string.h
@@ -30,6 +30,7 @@ enum ruby_rstring_private_flags {
#endif
/* string.c */
+VALUE rb_str_dup_m(VALUE str);
VALUE rb_fstring(VALUE);
VALUE rb_fstring_cstr(const char *str);
VALUE rb_fstring_enc_new(const char *ptr, long len, rb_encoding *enc);
diff --git a/internal/thread.h b/internal/thread.h
index 8403ac2663..928126c3b0 100644
--- a/internal/thread.h
+++ b/internal/thread.h
@@ -83,6 +83,8 @@ RUBY_SYMBOL_EXPORT_END
int rb_threadptr_execute_interrupts(struct rb_thread_struct *th, int blocking_timing);
bool rb_thread_mn_schedulable(VALUE thread);
+bool rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread);
+
// interrupt exec
typedef VALUE (rb_interrupt_exec_func_t)(void *data);
@@ -90,6 +92,7 @@ typedef VALUE (rb_interrupt_exec_func_t)(void *data);
enum rb_interrupt_exec_flag {
rb_interrupt_exec_flag_none = 0x00,
rb_interrupt_exec_flag_value_data = 0x01,
+ rb_interrupt_exec_flag_new_thread = 0x02,
};
// interrupt the target_th and run func.
diff --git a/lib/bundler/cli.rb b/lib/bundler/cli.rb
index 51f71af501..c0c7d9f899 100644
--- a/lib/bundler/cli.rb
+++ b/lib/bundler/cli.rb
@@ -130,7 +130,7 @@ module Bundler
if man_pages.include?(command)
man_page = man_pages[command]
- if Bundler.which("man") && !man_path.match?(%r{^file:/.+!/META-INF/jruby.home/.+})
+ if Bundler.which("man") && !man_path.match?(%r{^(?:file:/.+!|uri:classloader:)/META-INF/jruby.home/.+})
Kernel.exec("man", man_page)
else
puts File.read("#{man_path}/#{File.basename(man_page)}.ronn")
diff --git a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA_R3.pem b/lib/rubygems/ssl_certs/rubygems.org/GlobalSign.pem
index 8afb219058..8afb219058 100644
--- a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA_R3.pem
+++ b/lib/rubygems/ssl_certs/rubygems.org/GlobalSign.pem
diff --git a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem b/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem
deleted file mode 100644
index f4ce4ca43d..0000000000
--- a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
-A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
-b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
-MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
-YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
-aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
-jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
-xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
-1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
-snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
-U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
-9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
-BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
-AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
-yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
-38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
-AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
-DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
-HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
------END CERTIFICATE-----
diff --git a/misc/lldb_rb/commands/print_flags_command.py b/misc/lldb_rb/commands/print_flags_command.py
index 2b056dd098..bc494ae01a 100644
--- a/misc/lldb_rb/commands/print_flags_command.py
+++ b/misc/lldb_rb/commands/print_flags_command.py
@@ -17,7 +17,7 @@ class PrintFlagsCommand(RbBaseCommand):
flags = [
"RUBY_FL_WB_PROTECTED", "RUBY_FL_PROMOTED", "RUBY_FL_FINALIZE",
- "RUBY_FL_SHAREABLE", "RUBY_FL_EXIVAR", "RUBY_FL_FREEZE",
+ "RUBY_FL_SHAREABLE", "RUBY_FL_FREEZE",
"RUBY_FL_USER0", "RUBY_FL_USER1", "RUBY_FL_USER2", "RUBY_FL_USER3", "RUBY_FL_USER4",
"RUBY_FL_USER5", "RUBY_FL_USER6", "RUBY_FL_USER7", "RUBY_FL_USER8", "RUBY_FL_USER9",
"RUBY_FL_USER10", "RUBY_FL_USER11", "RUBY_FL_USER12", "RUBY_FL_USER13", "RUBY_FL_USER14",
diff --git a/namespace.c b/namespace.c
index 44afdd8f21..af7fb4459c 100644
--- a/namespace.c
+++ b/namespace.c
@@ -450,9 +450,6 @@ namespace_initialize(VALUE namespace)
// If a code in the namespace adds a constant, the constant will be visible even from root/main.
RCLASS_SET_PRIME_CLASSEXT_WRITABLE(namespace, true);
- // fallback to ivptr for ivars from shapes to manipulate the constant table
- rb_evict_ivars_to_hash(namespace);
-
// Get a clean constant table of Object even by writable one
// because ns was just created, so it has not touched any constants yet.
object_classext = RCLASS_EXT_WRITABLE_IN_NS(rb_cObject, ns);
diff --git a/object.c b/object.c
index a4da42d12f..03474389fd 100644
--- a/object.c
+++ b/object.c
@@ -373,9 +373,9 @@ init_copy(VALUE dest, VALUE obj)
if (OBJ_FROZEN(dest)) {
rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest));
}
- RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR);
+ RBASIC(dest)->flags &= ~T_MASK;
// Copies the shape id from obj to dest
- RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR);
+ RBASIC(dest)->flags |= RBASIC(obj)->flags & T_MASK;
switch (BUILTIN_TYPE(obj)) {
case T_IMEMO:
rb_bug("Unreacheable");
diff --git a/prism_compile.c b/prism_compile.c
index c71c1429b2..2ae6c1db9e 100644
--- a/prism_compile.c
+++ b/prism_compile.c
@@ -5164,6 +5164,20 @@ pm_compile_target_node(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *cons
break;
}
+ case PM_SPLAT_NODE: {
+ // Splat nodes capture all values into an array. They can be used
+ // as targets in assignments or for loops.
+ //
+ // for *x in []; end
+ //
+ const pm_splat_node_t *cast = (const pm_splat_node_t *) node;
+
+ if (cast->expression != NULL) {
+ pm_compile_target_node(iseq, cast->expression, parents, writes, cleanup, scope_node, state);
+ }
+
+ break;
+ }
default:
rb_bug("Unexpected node type: %s", pm_node_type_to_str(PM_NODE_TYPE(node)));
break;
@@ -5277,7 +5291,8 @@ pm_compile_for_node_index(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *c
case PM_INSTANCE_VARIABLE_TARGET_NODE:
case PM_CONSTANT_PATH_TARGET_NODE:
case PM_CALL_TARGET_NODE:
- case PM_INDEX_TARGET_NODE: {
+ case PM_INDEX_TARGET_NODE:
+ case PM_SPLAT_NODE: {
// For other targets, we need to potentially compile the parent or
// owning expression of this target, then retrieve the value, expand it,
// and then compile the necessary writes.
diff --git a/ractor.c b/ractor.c
index 177906ea1c..cce376c543 100644
--- a/ractor.c
+++ b/ractor.c
@@ -1357,8 +1357,25 @@ make_shareable_check_shareable(VALUE obj)
}
}
- if (RB_TYPE_P(obj, T_IMEMO)) {
+ switch (TYPE(obj)) {
+ case T_IMEMO:
return traverse_skip;
+ case T_OBJECT:
+ {
+ // If a T_OBJECT is shared and has no free capacity, we can't safely store the object_id inline,
+ // as it would require to move the object content into an external buffer.
+ // This is only a problem for T_OBJECT, given other types have external fields and can do RCU.
+ // To avoid this issue, we proactively create the object_id.
+ shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
+ attr_index_t capacity = RSHAPE_CAPACITY(shape_id);
+ attr_index_t free_capacity = capacity - RSHAPE_LEN(shape_id);
+ if (!rb_shape_has_object_id(shape_id) && capacity && !free_capacity) {
+ rb_obj_id(obj);
+ }
+ }
+ break;
+ default:
+ break;
}
if (!RB_OBJ_FROZEN_RAW(obj)) {
@@ -1639,7 +1656,7 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
else if (data->replacement != _val) { RB_OBJ_WRITE(obj, &v, data->replacement); } \
} while (0)
- if (UNLIKELY(FL_TEST_RAW(obj, FL_EXIVAR))) {
+ if (UNLIKELY(rb_obj_exivar_p(obj))) {
struct gen_fields_tbl *fields_tbl;
rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
@@ -1868,7 +1885,7 @@ move_leave(VALUE obj, struct obj_traverse_replace_data *data)
rb_gc_obj_id_moved(data->replacement);
- if (UNLIKELY(FL_TEST_RAW(obj, FL_EXIVAR))) {
+ if (UNLIKELY(rb_obj_exivar_p(obj))) {
rb_replace_generic_ivar(data->replacement, obj);
}
@@ -2241,6 +2258,28 @@ struct cross_ractor_require {
ID name;
};
+static void
+cross_ractor_require_mark(void *ptr)
+{
+ struct cross_ractor_require *crr = (struct cross_ractor_require *)ptr;
+ rb_gc_mark(crr->port);
+ rb_gc_mark(crr->result);
+ rb_gc_mark(crr->exception);
+ rb_gc_mark(crr->feature);
+ rb_gc_mark(crr->module);
+}
+
+static const rb_data_type_t cross_ractor_require_data_type = {
+ "ractor/cross_ractor_require",
+ {
+ cross_ractor_require_mark,
+ RUBY_DEFAULT_FREE,
+ NULL, // memsize
+ NULL, // compact
+ },
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
+};
+
static VALUE
require_body(VALUE data)
{
@@ -2287,8 +2326,11 @@ require_result_copy_resuce(VALUE data, VALUE errinfo)
}
static VALUE
-ractor_require_protect(struct cross_ractor_require *crr, VALUE (*func)(VALUE))
+ractor_require_protect(VALUE crr_obj, VALUE (*func)(VALUE))
{
+ struct cross_ractor_require *crr;
+ TypedData_Get_Struct(crr_obj, struct cross_ractor_require, &cross_ractor_require_data_type, crr);
+
// catch any error
rb_rescue2(func, (VALUE)crr,
require_rescue, (VALUE)crr, rb_eException, 0);
@@ -2297,43 +2339,49 @@ ractor_require_protect(struct cross_ractor_require *crr, VALUE (*func)(VALUE))
require_result_copy_resuce, (VALUE)crr, rb_eException, 0);
ractor_port_send(GET_EC(), crr->port, Qtrue, Qfalse);
+ RB_GC_GUARD(crr_obj);
return Qnil;
}
static VALUE
-ractor_require_func(void *data)
+ractor_require_func(void *crr_obj)
{
- struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
- return ractor_require_protect(crr, require_body);
+ return ractor_require_protect((VALUE)crr_obj, require_body);
}
VALUE
rb_ractor_require(VALUE feature)
{
- // TODO: make feature shareable
- struct cross_ractor_require crr = {
- .feature = feature, // TODO: ractor
- .port = ractor_port_new(GET_RACTOR()),
- .result = Qundef,
- .exception = Qundef,
- };
+ struct cross_ractor_require *crr;
+ VALUE crr_obj = TypedData_Make_Struct(0, struct cross_ractor_require, &cross_ractor_require_data_type, crr);
+ FL_SET_RAW(crr_obj, RUBY_FL_SHAREABLE);
+
+ // Convert feature to proper file path and make it shareable as fstring
+ crr->feature = rb_fstring(FilePathValue(feature));
+ crr->port = ractor_port_new(GET_RACTOR());
+ crr->result = Qundef;
+ crr->exception = Qundef;
rb_execution_context_t *ec = GET_EC();
rb_ractor_t *main_r = GET_VM()->ractor.main_ractor;
- rb_ractor_interrupt_exec(main_r, ractor_require_func, &crr, 0);
+ rb_ractor_interrupt_exec(main_r, ractor_require_func, (void *)crr_obj, rb_interrupt_exec_flag_value_data);
// wait for require done
- ractor_port_receive(ec, crr.port);
- ractor_port_close(ec, crr.port);
+ ractor_port_receive(ec, crr->port);
+ ractor_port_close(ec, crr->port);
+
+ VALUE exc = crr->exception;
+ VALUE result = crr->result;
+ RB_GC_GUARD(crr_obj);
- if (crr.exception != Qundef) {
- ractor_reset_belonging(crr.exception);
- rb_exc_raise(crr.exception);
+ if (exc != Qundef) {
+ ractor_reset_belonging(exc);
+ rb_exc_raise(exc);
}
else {
- RUBY_ASSERT(crr.result != Qundef);
- ractor_reset_belonging(crr.result);
- return crr.result;
+ RUBY_ASSERT(result != Qundef);
+ ractor_reset_belonging(result);
+ return result;
}
}
@@ -2352,36 +2400,40 @@ autoload_load_body(VALUE data)
}
static VALUE
-ractor_autoload_load_func(void *data)
+ractor_autoload_load_func(void *crr_obj)
{
- struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
- return ractor_require_protect(crr, autoload_load_body);
+ return ractor_require_protect((VALUE)crr_obj, autoload_load_body);
}
VALUE
rb_ractor_autoload_load(VALUE module, ID name)
{
- struct cross_ractor_require crr = {
- .module = module,
- .name = name,
- .port = ractor_port_new(GET_RACTOR()),
- .result = Qundef,
- .exception = Qundef,
- };
+ struct cross_ractor_require *crr;
+ VALUE crr_obj = TypedData_Make_Struct(0, struct cross_ractor_require, &cross_ractor_require_data_type, crr);
+ FL_SET_RAW(crr_obj, RUBY_FL_SHAREABLE);
+ crr->module = module;
+ crr->name = name;
+ crr->port = ractor_port_new(GET_RACTOR());
+ crr->result = Qundef;
+ crr->exception = Qundef;
rb_execution_context_t *ec = GET_EC();
rb_ractor_t *main_r = GET_VM()->ractor.main_ractor;
- rb_ractor_interrupt_exec(main_r, ractor_autoload_load_func, &crr, 0);
+ rb_ractor_interrupt_exec(main_r, ractor_autoload_load_func, (void *)crr_obj, rb_interrupt_exec_flag_value_data);
// wait for require done
- ractor_port_receive(ec, crr.port);
- ractor_port_close(ec, crr.port);
+ ractor_port_receive(ec, crr->port);
+ ractor_port_close(ec, crr->port);
+
+ VALUE exc = crr->exception;
+ VALUE result = crr->result;
+ RB_GC_GUARD(crr_obj);
- if (crr.exception != Qundef) {
- rb_exc_raise(crr.exception);
+ if (exc != Qundef) {
+ rb_exc_raise(exc);
}
else {
- return crr.result;
+ return result;
}
}
diff --git a/ractor_sync.c b/ractor_sync.c
index 0fcc293504..204c800a06 100644
--- a/ractor_sync.c
+++ b/ractor_sync.c
@@ -1197,6 +1197,7 @@ ractor_send_basket(rb_execution_context_t *ec, const struct ractor_port *rp, str
RUBY_DEBUG_LOG("closed:%u@r%u", (unsigned int)ractor_port_id(rp), rb_ractor_id(rp->r));
if (raise_on_error) {
+ ractor_basket_free(b);
rb_raise(rb_eRactorClosedError, "The port was already closed");
}
}
diff --git a/re.c b/re.c
index 3cf99c1210..e666a7c3d4 100644
--- a/re.c
+++ b/re.c
@@ -28,6 +28,7 @@
#include "ruby/encoding.h"
#include "ruby/re.h"
#include "ruby/util.h"
+#include "ractor_core.h"
VALUE rb_eRegexpError, rb_eRegexpTimeoutError;
@@ -3499,12 +3500,16 @@ static VALUE reg_cache;
VALUE
rb_reg_regcomp(VALUE str)
{
- if (reg_cache && RREGEXP_SRC_LEN(reg_cache) == RSTRING_LEN(str)
- && ENCODING_GET(reg_cache) == ENCODING_GET(str)
- && memcmp(RREGEXP_SRC_PTR(reg_cache), RSTRING_PTR(str), RSTRING_LEN(str)) == 0)
- return reg_cache;
+ if (rb_ractor_main_p()) {
+ if (reg_cache && RREGEXP_SRC_LEN(reg_cache) == RSTRING_LEN(str)
+ && ENCODING_GET(reg_cache) == ENCODING_GET(str)
+ && memcmp(RREGEXP_SRC_PTR(reg_cache), RSTRING_PTR(str), RSTRING_LEN(str)) == 0)
+ return reg_cache;
- return reg_cache = rb_reg_new_str(str, 0);
+ return reg_cache = rb_reg_new_str(str, 0);
+ } else {
+ return rb_reg_new_str(str, 0);
+ }
}
static st_index_t reg_hash(VALUE re);
diff --git a/ruby_atomic.h b/ruby_atomic.h
index f5f32191af..04c5d6d9f8 100644
--- a/ruby_atomic.h
+++ b/ruby_atomic.h
@@ -36,8 +36,10 @@ rbimpl_atomic_load_relaxed(volatile rb_atomic_t *ptr)
}
#define ATOMIC_LOAD_RELAXED(var) rbimpl_atomic_load_relaxed(&(var))
+typedef RBIMPL_ALIGNAS(8) uint64_t rbimpl_atomic_uint64_t;
+
static inline uint64_t
-rbimpl_atomic_u64_load_relaxed(const volatile uint64_t *value)
+rbimpl_atomic_u64_load_relaxed(const volatile rbimpl_atomic_uint64_t *value)
{
#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
return __atomic_load_n(value, __ATOMIC_RELAXED);
@@ -54,7 +56,7 @@ rbimpl_atomic_u64_load_relaxed(const volatile uint64_t *value)
#define ATOMIC_U64_LOAD_RELAXED(var) rbimpl_atomic_u64_load_relaxed(&(var))
static inline void
-rbimpl_atomic_u64_set_relaxed(volatile uint64_t *address, uint64_t value)
+rbimpl_atomic_u64_set_relaxed(volatile rbimpl_atomic_uint64_t *address, uint64_t value)
{
#if defined(HAVE_GCC_ATOMIC_BUILTINS_64)
__atomic_store_n(address, value, __ATOMIC_RELAXED);
diff --git a/rubyparser.h b/rubyparser.h
index 16f5cac81f..7525069fcb 100644
--- a/rubyparser.h
+++ b/rubyparser.h
@@ -1153,7 +1153,7 @@ typedef struct RNode_ERROR {
#define RNODE_FILE(node) ((rb_node_file_t *)(node))
#define RNODE_ENCODING(node) ((rb_node_encoding_t *)(node))
-/* FL : 0..4: T_TYPES, 5: KEEP_WB, 6: PROMOTED, 7: FINALIZE, 8: UNUSED, 9: UNUSED, 10: EXIVAR, 11: FREEZE */
+/* FL : 0..4: T_TYPES, 5: KEEP_WB, 6: PROMOTED, 7: FINALIZE, 8..10: UNUSED, 11: FREEZE */
/* NODE_FL: 0..4: UNUSED, 5: UNUSED, 6: UNUSED, 7: NODE_FL_NEWLINE,
* 8..14: nd_type,
* 15..: nd_line
diff --git a/scheduler.c b/scheduler.c
index 11faca01d3..83b9681cc3 100644
--- a/scheduler.c
+++ b/scheduler.c
@@ -63,8 +63,10 @@ typedef enum {
struct rb_fiber_scheduler_blocking_operation {
void *(*function)(void *);
void *data;
+
rb_unblock_function_t *unblock_function;
void *data2;
+
int flags;
struct rb_fiber_scheduler_blocking_operation_state *state;
@@ -208,7 +210,10 @@ rb_fiber_scheduler_blocking_operation_execute(rb_fiber_scheduler_blocking_operat
return -1; // Invalid blocking operation
}
- // Atomically check if we can transition from QUEUED to EXECUTING
+ // Resolve sentinel values for unblock_function and data2:
+ rb_thread_resolve_unblock_function(&blocking_operation->unblock_function, &blocking_operation->data2, GET_THREAD());
+
+ // Atomically check if we can transition from QUEUED to EXECUTING
rb_atomic_t expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
if (RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING) != expected) {
// Already cancelled or in wrong state
@@ -1124,25 +1129,33 @@ rb_fiber_scheduler_blocking_operation_cancel(rb_fiber_scheduler_blocking_operati
rb_atomic_t current_state = RUBY_ATOMIC_LOAD(blocking_operation->status);
- switch (current_state) {
+ switch (current_state) {
case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED:
- // Work hasn't started - just mark as cancelled
+ // Work hasn't started - just mark as cancelled:
if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) == current_state) {
- return 0; // Successfully cancelled before execution
+ // Successfully cancelled before execution:
+ return 0;
}
// Fall through if state changed between load and CAS
case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING:
// Work is running - mark cancelled AND call unblock function
- RUBY_ATOMIC_SET(blocking_operation->status, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED);
- if (blocking_operation->unblock_function) {
+ if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) != current_state) {
+ // State changed between load and CAS - operation may have completed:
+ return 0;
+ }
+ // Otherwise, we successfully marked it as cancelled, so we can call the unblock function:
+ rb_unblock_function_t *unblock_function = blocking_operation->unblock_function;
+ if (unblock_function) {
+ RUBY_ASSERT(unblock_function != (rb_unblock_function_t *)-1 && "unblock_function is still sentinel value -1, should have been resolved earlier");
blocking_operation->unblock_function(blocking_operation->data2);
}
- return 1; // Cancelled during execution (unblock function called)
+ // Cancelled during execution (unblock function called):
+ return 1;
case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED:
case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED:
- // Already finished or cancelled
+ // Already finished or cancelled:
return 0;
}
diff --git a/shape.c b/shape.c
index 6f187552de..06dcb8d610 100644
--- a/shape.c
+++ b/shape.c
@@ -33,9 +33,7 @@
#define MAX_SHAPE_ID (SHAPE_BUFFER_SIZE - 1)
#define ANCESTOR_SEARCH_MAX_DEPTH 2
-static ID id_frozen;
-static ID id_t_object;
-ID ruby_internal_object_id; // extern
+static ID id_object_id;
#define LEAF 0
#define BLACK 0x0
@@ -48,8 +46,8 @@ redblack_left(redblack_node_t *node)
return LEAF;
}
else {
- RUBY_ASSERT(node->l < GET_SHAPE_TREE()->cache_size);
- redblack_node_t *left = &GET_SHAPE_TREE()->shape_cache[node->l - 1];
+ RUBY_ASSERT(node->l < rb_shape_tree.cache_size);
+ redblack_node_t *left = &rb_shape_tree.shape_cache[node->l - 1];
return left;
}
}
@@ -61,8 +59,8 @@ redblack_right(redblack_node_t *node)
return LEAF;
}
else {
- RUBY_ASSERT(node->r < GET_SHAPE_TREE()->cache_size);
- redblack_node_t *right = &GET_SHAPE_TREE()->shape_cache[node->r - 1];
+ RUBY_ASSERT(node->r < rb_shape_tree.cache_size);
+ redblack_node_t *right = &rb_shape_tree.shape_cache[node->r - 1];
return right;
}
}
@@ -120,7 +118,7 @@ redblack_id_for(redblack_node_t *node)
return 0;
}
else {
- redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache;
+ redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache;
redblack_id_t id = (redblack_id_t)(node - redblack_nodes);
return id + 1;
}
@@ -129,7 +127,7 @@ redblack_id_for(redblack_node_t *node)
static redblack_node_t *
redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redblack_node_t *right)
{
- if (GET_SHAPE_TREE()->cache_size + 1 >= REDBLACK_CACHE_SIZE) {
+ if (rb_shape_tree.cache_size + 1 >= REDBLACK_CACHE_SIZE) {
// We're out of cache, just quit
return LEAF;
}
@@ -137,8 +135,8 @@ redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redbl
RUBY_ASSERT(left == LEAF || left->key < key);
RUBY_ASSERT(right == LEAF || right->key > key);
- redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache;
- redblack_node_t *node = &redblack_nodes[(GET_SHAPE_TREE()->cache_size)++];
+ redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache;
+ redblack_node_t *node = &redblack_nodes[(rb_shape_tree.cache_size)++];
node->key = key;
node->value = (rb_shape_t *)((uintptr_t)value | color);
node->l = redblack_id_for(left);
@@ -288,20 +286,20 @@ redblack_insert(redblack_node_t *tree, ID key, rb_shape_t *value)
}
#endif
-rb_shape_tree_t *rb_shape_tree_ptr = NULL;
+rb_shape_tree_t rb_shape_tree = { 0 };
static VALUE shape_tree_obj = Qfalse;
rb_shape_t *
rb_shape_get_root_shape(void)
{
- return GET_SHAPE_TREE()->root_shape;
+ return rb_shape_tree.root_shape;
}
static void
shape_tree_mark(void *data)
{
rb_shape_t *cursor = rb_shape_get_root_shape();
- rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1);
+ rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1);
while (cursor < end) {
if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) {
rb_gc_mark_movable(cursor->edges);
@@ -314,7 +312,7 @@ static void
shape_tree_compact(void *data)
{
rb_shape_t *cursor = rb_shape_get_root_shape();
- rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1);
+ rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1);
while (cursor < end) {
if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) {
cursor->edges = rb_gc_location(cursor->edges);
@@ -326,7 +324,7 @@ shape_tree_compact(void *data)
static size_t
shape_tree_memsize(const void *data)
{
- return GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t);
+ return rb_shape_tree.cache_size * sizeof(redblack_node_t);
}
static const rb_data_type_t shape_tree_type = {
@@ -349,14 +347,14 @@ static inline shape_id_t
raw_shape_id(rb_shape_t *shape)
{
RUBY_ASSERT(shape);
- return (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list);
+ return (shape_id_t)(shape - rb_shape_tree.shape_list);
}
static inline shape_id_t
shape_id(rb_shape_t *shape, shape_id_t previous_shape_id)
{
RUBY_ASSERT(shape);
- shape_id_t raw_id = (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list);
+ shape_id_t raw_id = (shape_id_t)(shape - rb_shape_tree.shape_list);
return raw_id | (previous_shape_id & SHAPE_ID_FLAGS_MASK);
}
@@ -373,22 +371,13 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data)
{
rb_shape_t *start = rb_shape_get_root_shape();
rb_shape_t *cursor = start;
- rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id);
+ rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id);
while (cursor < end) {
callback((shape_id_t)(cursor - start), data);
cursor += 1;
}
}
-RUBY_FUNC_EXPORTED rb_shape_t *
-rb_shape_lookup(shape_id_t shape_id)
-{
- uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK);
- RUBY_ASSERT(offset != INVALID_SHAPE_ID);
-
- return &GET_SHAPE_TREE()->shape_list[offset];
-}
-
RUBY_FUNC_EXPORTED shape_id_t
rb_obj_shape_id(VALUE obj)
{
@@ -397,7 +386,7 @@ rb_obj_shape_id(VALUE obj)
}
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
return RBASIC_SHAPE_ID(fields_obj);
}
@@ -423,14 +412,14 @@ rb_shape_depth(shape_id_t shape_id)
static rb_shape_t *
shape_alloc(void)
{
- shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(GET_SHAPE_TREE()->next_shape_id, 1);
+ shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(rb_shape_tree.next_shape_id, 1);
if (shape_id == (MAX_SHAPE_ID + 1)) {
// TODO: Make an OutOfShapesError ??
rb_bug("Out of shapes");
}
- return &GET_SHAPE_TREE()->shape_list[shape_id];
+ return &rb_shape_tree.shape_list[shape_id];
}
static rb_shape_t *
@@ -494,7 +483,7 @@ redblack_cache_ancestors(rb_shape_t *shape)
static attr_index_t
shape_grow_capa(attr_index_t current_capa)
{
- const attr_index_t *capacities = GET_SHAPE_TREE()->capacities;
+ const attr_index_t *capacities = rb_shape_tree.capacities;
// First try to use the next size that will be embeddable in a larger object slot.
attr_index_t capa;
@@ -573,7 +562,7 @@ retry:
if (!res) {
// If we're not allowed to create a new variation, of if we're out of shapes
// we return TOO_COMPLEX_SHAPE.
- if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) {
+ if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) {
res = NULL;
}
else {
@@ -649,7 +638,7 @@ get_next_shape_internal(rb_shape_t *shape, ID id, enum shape_type shape_type, bo
if (!res) {
// If we're not allowed to create a new variation, of if we're out of shapes
// we return TOO_COMPLEX_SHAPE.
- if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) {
+ if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) {
res = NULL;
}
else {
@@ -723,7 +712,7 @@ shape_transition_object_id(shape_id_t original_shape_id)
RUBY_ASSERT(!rb_shape_has_object_id(original_shape_id));
bool dont_care;
- rb_shape_t *shape = get_next_shape_internal(RSHAPE(original_shape_id), ruby_internal_object_id, SHAPE_OBJ_ID, &dont_care, true);
+ rb_shape_t *shape = get_next_shape_internal(RSHAPE(original_shape_id), id_object_id, SHAPE_OBJ_ID, &dont_care, true);
if (!shape) {
shape = RSHAPE(ROOT_SHAPE_WITH_OBJ_ID);
}
@@ -1155,7 +1144,7 @@ rb_shape_copy_complex_ivars(VALUE dest, VALUE obj, shape_id_t src_shape_id, st_t
// obj is TOO_COMPLEX so we can copy its iv_hash
st_table *table = st_copy(fields_table);
if (rb_shape_has_object_id(src_shape_id)) {
- st_data_t id = (st_data_t)ruby_internal_object_id;
+ st_data_t id = (st_data_t)id_object_id;
st_delete(table, &id, NULL);
}
rb_obj_init_too_complex(dest, table);
@@ -1245,9 +1234,26 @@ rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id)
}
}
+ // Make sure SHAPE_ID_HAS_IVAR_MASK is valid.
+ if (rb_shape_too_complex_p(shape_id)) {
+ RUBY_ASSERT(shape_id & SHAPE_ID_HAS_IVAR_MASK);
+ }
+ else {
+ attr_index_t ivar_count = RSHAPE_LEN(shape_id);
+ if (has_object_id) {
+ ivar_count--;
+ }
+ if (ivar_count) {
+ RUBY_ASSERT(shape_id & SHAPE_ID_HAS_IVAR_MASK);
+ }
+ else {
+ RUBY_ASSERT(!(shape_id & SHAPE_ID_HAS_IVAR_MASK));
+ }
+ }
+
uint8_t flags_heap_index = rb_shape_heap_index(shape_id);
if (RB_TYPE_P(obj, T_OBJECT)) {
- size_t shape_id_slot_size = GET_SHAPE_TREE()->capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic);
+ size_t shape_id_slot_size = rb_shape_tree.capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic);
size_t actual_slot_size = rb_gc_obj_slot_size(obj);
if (shape_id_slot_size != actual_slot_size) {
@@ -1397,7 +1403,7 @@ rb_shape_root_shape(VALUE self)
static VALUE
rb_shape_shapes_available(VALUE self)
{
- return INT2NUM(MAX_SHAPE_ID - (GET_SHAPE_TREE()->next_shape_id - 1));
+ return INT2NUM(MAX_SHAPE_ID - (rb_shape_tree.next_shape_id - 1));
}
static VALUE
@@ -1405,7 +1411,7 @@ rb_shape_exhaust(int argc, VALUE *argv, VALUE self)
{
rb_check_arity(argc, 0, 1);
int offset = argc == 1 ? NUM2INT(argv[0]) : 0;
- GET_SHAPE_TREE()->next_shape_id = MAX_SHAPE_ID - offset + 1;
+ rb_shape_tree.next_shape_id = MAX_SHAPE_ID - offset + 1;
return Qnil;
}
@@ -1461,7 +1467,7 @@ static VALUE
rb_shape_find_by_id(VALUE mod, VALUE id)
{
shape_id_t shape_id = NUM2UINT(id);
- if (shape_id >= GET_SHAPE_TREE()->next_shape_id) {
+ if (shape_id >= rb_shape_tree.next_shape_id) {
rb_raise(rb_eArgError, "Shape ID %d is out of bounds\n", shape_id);
}
return shape_id_t_to_rb_cShape(shape_id);
@@ -1475,8 +1481,6 @@ rb_shape_find_by_id(VALUE mod, VALUE id)
void
Init_default_shapes(void)
{
- rb_shape_tree_ptr = xcalloc(1, sizeof(rb_shape_tree_t));
-
size_t *heap_sizes = rb_gc_heap_sizes();
size_t heaps_count = 0;
while (heap_sizes[heaps_count]) {
@@ -1488,45 +1492,43 @@ Init_default_shapes(void)
for (index = 0; index < heaps_count; index++) {
capacities[index] = (heap_sizes[index] - sizeof(struct RBasic)) / sizeof(VALUE);
}
- GET_SHAPE_TREE()->capacities = capacities;
+ rb_shape_tree.capacities = capacities;
#ifdef HAVE_MMAP
size_t shape_list_mmap_size = rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError);
- rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size,
+ rb_shape_tree.shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (GET_SHAPE_TREE()->shape_list == MAP_FAILED) {
- GET_SHAPE_TREE()->shape_list = 0;
+ if (rb_shape_tree.shape_list == MAP_FAILED) {
+ rb_shape_tree.shape_list = 0;
}
else {
- ruby_annotate_mmap(rb_shape_tree_ptr->shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list");
+ ruby_annotate_mmap(rb_shape_tree.shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list");
}
#else
- GET_SHAPE_TREE()->shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t));
+ rb_shape_tree.shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t));
#endif
- if (!GET_SHAPE_TREE()->shape_list) {
+ if (!rb_shape_tree.shape_list) {
rb_memerror();
}
- id_frozen = rb_make_internal_id();
- id_t_object = rb_make_internal_id();
- ruby_internal_object_id = rb_make_internal_id();
+ id_object_id = rb_make_internal_id();
#ifdef HAVE_MMAP
size_t shape_cache_mmap_size = rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError);
- rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size,
+ rb_shape_tree.shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- rb_shape_tree_ptr->cache_size = 0;
+ rb_shape_tree.cache_size = 0;
// If mmap fails, then give up on the redblack tree cache.
// We set the cache size such that the redblack node allocators think
// the cache is full.
- if (GET_SHAPE_TREE()->shape_cache == MAP_FAILED) {
- GET_SHAPE_TREE()->shape_cache = 0;
- GET_SHAPE_TREE()->cache_size = REDBLACK_CACHE_SIZE;
+ if (rb_shape_tree.shape_cache == MAP_FAILED) {
+ rb_shape_tree.shape_cache = 0;
+ rb_shape_tree.cache_size = REDBLACK_CACHE_SIZE;
}
else {
- ruby_annotate_mmap(rb_shape_tree_ptr->shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache");
+ ruby_annotate_mmap(rb_shape_tree.shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache");
}
#endif
@@ -1537,21 +1539,24 @@ Init_default_shapes(void)
rb_shape_t *root = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID);
root->capacity = 0;
root->type = SHAPE_ROOT;
- GET_SHAPE_TREE()->root_shape = root;
- RUBY_ASSERT(raw_shape_id(GET_SHAPE_TREE()->root_shape) == ROOT_SHAPE_ID);
+ rb_shape_tree.root_shape = root;
+ RUBY_ASSERT(raw_shape_id(rb_shape_tree.root_shape) == ROOT_SHAPE_ID);
+ RUBY_ASSERT(!(raw_shape_id(rb_shape_tree.root_shape) & SHAPE_ID_HAS_IVAR_MASK));
- rb_shape_t *root_with_obj_id = rb_shape_alloc_with_parent_id(0, ROOT_SHAPE_ID);
- root_with_obj_id->type = SHAPE_OBJ_ID;
- root_with_obj_id->edge_name = ruby_internal_object_id;
- root_with_obj_id->next_field_index++;
+ bool dontcare;
+ rb_shape_t *root_with_obj_id = get_next_shape_internal(root, id_object_id, SHAPE_OBJ_ID, &dontcare, true);
RUBY_ASSERT(raw_shape_id(root_with_obj_id) == ROOT_SHAPE_WITH_OBJ_ID);
+ RUBY_ASSERT(root_with_obj_id->type == SHAPE_OBJ_ID);
+ RUBY_ASSERT(root_with_obj_id->edge_name == id_object_id);
+ RUBY_ASSERT(root_with_obj_id->next_field_index == 1);
+ RUBY_ASSERT(!(raw_shape_id(root_with_obj_id) & SHAPE_ID_HAS_IVAR_MASK));
+ (void)root_with_obj_id;
}
void
rb_shape_free_all(void)
{
- xfree((void *)GET_SHAPE_TREE()->capacities);
- xfree(GET_SHAPE_TREE());
+ xfree((void *)rb_shape_tree.capacities);
}
void
diff --git a/shape.h b/shape.h
index ac50e58f71..b23fda4e29 100644
--- a/shape.h
+++ b/shape.h
@@ -23,6 +23,10 @@ STATIC_ASSERT(shape_id_num_bits, SHAPE_ID_NUM_BITS == sizeof(shape_id_t) * CHAR_
#define SHAPE_ID_HEAP_INDEX_MAX ((1 << SHAPE_ID_HEAP_INDEX_BITS) - 1)
#define SHAPE_ID_HEAP_INDEX_MASK (SHAPE_ID_HEAP_INDEX_MAX << SHAPE_ID_HEAP_INDEX_OFFSET)
+// This masks allows to check if a shape_id contains any ivar.
+// It rely on ROOT_SHAPE_WITH_OBJ_ID==1.
+#define SHAPE_ID_HAS_IVAR_MASK (SHAPE_ID_FL_TOO_COMPLEX | (SHAPE_ID_OFFSET_MASK - 1))
+
// The interpreter doesn't care about frozen status or slot size when reading ivars.
// So we normalize shape_id by clearing these bits to improve cache hits.
// JITs however might care about it.
@@ -45,8 +49,6 @@ typedef uint32_t redblack_id_t;
#define ROOT_TOO_COMPLEX_WITH_OBJ_ID (ROOT_SHAPE_WITH_OBJ_ID | SHAPE_ID_FL_TOO_COMPLEX | SHAPE_ID_FL_HAS_OBJECT_ID)
#define SPECIAL_CONST_SHAPE_ID (ROOT_SHAPE_ID | SHAPE_ID_FL_FROZEN)
-extern ID ruby_internal_object_id;
-
typedef struct redblack_node redblack_node_t;
struct rb_shape {
@@ -92,7 +94,10 @@ typedef struct {
redblack_node_t *shape_cache;
unsigned int cache_size;
} rb_shape_tree_t;
-RUBY_EXTERN rb_shape_tree_t *rb_shape_tree_ptr;
+
+RUBY_SYMBOL_EXPORT_BEGIN
+RUBY_EXTERN rb_shape_tree_t rb_shape_tree;
+RUBY_SYMBOL_EXPORT_END
union rb_attr_index_cache {
uint64_t pack;
@@ -102,13 +107,6 @@ union rb_attr_index_cache {
} unpack;
};
-static inline rb_shape_tree_t *
-rb_current_shape_tree(void)
-{
- return rb_shape_tree_ptr;
-}
-#define GET_SHAPE_TREE() rb_current_shape_tree()
-
static inline shape_id_t
RBASIC_SHAPE_ID(VALUE obj)
{
@@ -138,8 +136,6 @@ RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_class_fields));
- RUBY_ASSERT(rb_shape_verify_consistency(obj, shape_id));
-
#if RBASIC_SHAPE_ID_FIELD
RBASIC(obj)->shape_id = (VALUE)shape_id;
#else
@@ -147,13 +143,20 @@ RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
#endif
+ RUBY_ASSERT(rb_shape_verify_consistency(obj, shape_id));
}
-#define RSHAPE rb_shape_lookup
+static inline rb_shape_t *
+RSHAPE(shape_id_t shape_id)
+{
+ uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK);
+ RUBY_ASSERT(offset != INVALID_SHAPE_ID);
+
+ return &rb_shape_tree.shape_list[offset];
+}
int32_t rb_shape_id_offset(void);
-RUBY_FUNC_EXPORTED rb_shape_t *rb_shape_lookup(shape_id_t shape_id);
RUBY_FUNC_EXPORTED shape_id_t rb_obj_shape_id(VALUE obj);
shape_id_t rb_shape_get_next_iv_shape(shape_id_t shape_id, ID id);
bool rb_shape_get_iv_index(shape_id_t shape_id, ID id, attr_index_t *value);
@@ -238,7 +241,7 @@ RSHAPE_EMBEDDED_CAPACITY(shape_id_t shape_id)
{
uint8_t heap_index = rb_shape_heap_index(shape_id);
if (heap_index) {
- return GET_SHAPE_TREE()->capacities[heap_index - 1];
+ return rb_shape_tree.capacities[heap_index - 1];
}
return 0;
}
@@ -327,6 +330,46 @@ rb_shape_obj_has_id(VALUE obj)
return rb_shape_has_object_id(RBASIC_SHAPE_ID(obj));
}
+static inline bool
+rb_shape_has_ivars(shape_id_t shape_id)
+{
+ return shape_id & SHAPE_ID_HAS_IVAR_MASK;
+}
+
+static inline bool
+rb_shape_obj_has_ivars(VALUE obj)
+{
+ return rb_shape_has_ivars(RBASIC_SHAPE_ID(obj));
+}
+
+static inline bool
+rb_shape_has_fields(shape_id_t shape_id)
+{
+ return shape_id & (SHAPE_ID_OFFSET_MASK | SHAPE_ID_FL_TOO_COMPLEX);
+}
+
+static inline bool
+rb_shape_obj_has_fields(VALUE obj)
+{
+ return rb_shape_has_fields(RBASIC_SHAPE_ID(obj));
+}
+
+static inline bool
+rb_obj_exivar_p(VALUE obj)
+{
+ switch (TYPE(obj)) {
+ case T_NONE:
+ case T_OBJECT:
+ case T_CLASS:
+ case T_MODULE:
+ case T_IMEMO:
+ return false;
+ default:
+ break;
+ }
+ return rb_shape_obj_has_fields(obj);
+}
+
// For ext/objspace
RUBY_SYMBOL_EXPORT_BEGIN
typedef void each_shape_callback(shape_id_t shape_id, void *data);
diff --git a/spec/bundler/bundler/friendly_errors_spec.rb b/spec/bundler/bundler/friendly_errors_spec.rb
index e0310344fd..d6a9d4813d 100644
--- a/spec/bundler/bundler/friendly_errors_spec.rb
+++ b/spec/bundler/bundler/friendly_errors_spec.rb
@@ -2,7 +2,8 @@
require "bundler"
require "bundler/friendly_errors"
-require "cgi"
+require "cgi/escape"
+require "cgi/util" unless defined?(CGI::EscapeExt)
RSpec.describe Bundler, "friendly errors" do
context "with invalid YAML in .gemrc" do
diff --git a/st.c b/st.c
index f11e9efaf9..70da7daf83 100644
--- a/st.c
+++ b/st.c
@@ -1495,7 +1495,16 @@ st_update(st_table *tab, st_data_t key,
value = entry->record;
}
old_key = key;
+
+ unsigned int rebuilds_num = tab->rebuilds_num;
+
retval = (*func)(&key, &value, arg, existing);
+
+ // We need to make sure that the callback didn't cause a table rebuild
+ // Ideally we would make sure no operations happened
+ assert(rebuilds_num == tab->rebuilds_num);
+ (void)rebuilds_num;
+
switch (retval) {
case ST_CONTINUE:
if (! existing) {
diff --git a/string.c b/string.c
index 3ddd64ef25..d9ffb29b8e 100644
--- a/string.c
+++ b/string.c
@@ -388,12 +388,7 @@ fstring_hash(VALUE str)
static inline bool
BARE_STRING_P(VALUE str)
{
- if (RBASIC_CLASS(str) != rb_cString) return false;
-
- if (FL_TEST_RAW(str, FL_EXIVAR)) {
- return rb_ivar_count(str) == 0;
- }
- return true;
+ return RBASIC_CLASS(str) == rb_cString && !rb_shape_obj_has_ivars(str);
}
static inline st_index_t
@@ -490,7 +485,7 @@ build_fstring(VALUE str, struct fstr_update_arg *arg)
RUBY_ASSERT(RB_TYPE_P(str, T_STRING));
RUBY_ASSERT(OBJ_FROZEN(str));
RUBY_ASSERT(!FL_TEST_RAW(str, STR_FAKESTR));
- RUBY_ASSERT(!FL_TEST_RAW(str, FL_EXIVAR));
+ RUBY_ASSERT(!rb_obj_exivar_p(str));
RUBY_ASSERT(RBASIC_CLASS(str) == rb_cString);
RUBY_ASSERT(!rb_objspace_garbage_object_p(str));
@@ -2316,7 +2311,7 @@ VALUE
rb_str_dup_m(VALUE str)
{
if (LIKELY(BARE_STRING_P(str))) {
- return str_duplicate(rb_obj_class(str), str);
+ return str_duplicate(rb_cString, str);
}
else {
return rb_obj_dup(str);
diff --git a/struct.c b/struct.c
index 7cfc1f2a16..74ca9369a6 100644
--- a/struct.c
+++ b/struct.c
@@ -52,7 +52,8 @@ struct_ivar_get(VALUE c, ID id)
RUBY_ASSERT(RB_TYPE_P(c, T_CLASS));
ivar = rb_attr_get(c, id);
if (!NIL_P(ivar)) {
- return rb_ivar_set(orig, id, ivar);
+ if (!OBJ_FROZEN(orig)) rb_ivar_set(orig, id, ivar);
+ return ivar;
}
}
}
diff --git a/test/ruby/namespace/instance_variables.rb b/test/ruby/namespace/instance_variables.rb
new file mode 100644
index 0000000000..1562ad5d45
--- /dev/null
+++ b/test/ruby/namespace/instance_variables.rb
@@ -0,0 +1,21 @@
+class String
+ class << self
+ attr_reader :str_ivar1
+
+ def str_ivar2
+ @str_ivar2
+ end
+ end
+
+ @str_ivar1 = 111
+ @str_ivar2 = 222
+end
+
+class StringDelegator < BasicObject
+private
+ def method_missing(...)
+ ::String.public_send(...)
+ end
+end
+
+StringDelegatorObj = StringDelegator.new
diff --git a/test/ruby/test_compile_prism.rb b/test/ruby/test_compile_prism.rb
index 819d0d35aa..86f7f0b14f 100644
--- a/test/ruby/test_compile_prism.rb
+++ b/test/ruby/test_compile_prism.rb
@@ -1053,6 +1053,9 @@ module Prism
assert_prism_eval("for foo, in [1,2,3] do end")
assert_prism_eval("for i, j in {a: 'b'} do; i; j; end")
+
+ # Test splat node as index in for loop
+ assert_prism_eval("for *x in [[1,2], [3,4]] do; x; end")
end
############################################################################
diff --git a/test/ruby/test_data.rb b/test/ruby/test_data.rb
index bb38f8ec91..dd698fdcc4 100644
--- a/test/ruby/test_data.rb
+++ b/test/ruby/test_data.rb
@@ -280,4 +280,10 @@ class TestData < Test::Unit::TestCase
assert_not_same(test, loaded)
assert_predicate(loaded, :frozen?)
end
+
+ def test_frozen_subclass
+ test = Class.new(Data.define(:a)).freeze.new(a: 0)
+ assert_kind_of(Data, test)
+ assert_equal([:a], test.members)
+ end
end
diff --git a/test/ruby/test_namespace.rb b/test/ruby/test_namespace.rb
index 395f244c8e..f13063be48 100644
--- a/test/ruby/test_namespace.rb
+++ b/test/ruby/test_namespace.rb
@@ -222,6 +222,26 @@ class TestNamespace < Test::Unit::TestCase
end;
end
+ def test_instance_variable
+ pend unless Namespace.enabled?
+
+ @n.require_relative('namespace/instance_variables')
+
+ assert_equal [], String.instance_variables
+ assert_equal [:@str_ivar1, :@str_ivar2], @n::StringDelegatorObj.instance_variables
+ assert_equal 111, @n::StringDelegatorObj.str_ivar1
+ assert_equal 222, @n::StringDelegatorObj.str_ivar2
+ assert_equal 222, @n::StringDelegatorObj.instance_variable_get(:@str_ivar2)
+
+ @n::StringDelegatorObj.instance_variable_set(:@str_ivar3, 333)
+ assert_equal 333, @n::StringDelegatorObj.instance_variable_get(:@str_ivar3)
+ @n::StringDelegatorObj.remove_instance_variable(:@str_ivar1)
+ assert_nil @n::StringDelegatorObj.str_ivar1
+ assert_equal [:@str_ivar2, :@str_ivar3], @n::StringDelegatorObj.instance_variables
+
+ assert_equal [], String.instance_variables
+ end
+
def test_methods_added_in_namespace_are_invisible_globally
pend unless Namespace.enabled?
diff --git a/test/ruby/test_object_id.rb b/test/ruby/test_object_id.rb
index 44421ea256..9c0099517b 100644
--- a/test/ruby/test_object_id.rb
+++ b/test/ruby/test_object_id.rb
@@ -198,3 +198,49 @@ class TestObjectIdTooComplexGeneric < TestObjectId
end
end
end
+
+class TestObjectIdRactor < Test::Unit::TestCase
+ def test_object_id_race_free
+ assert_separately([], "#{<<~"begin;"}\n#{<<~'end;'}")
+ begin;
+ Warning[:experimental] = false
+ class MyClass
+ attr_reader :a, :b, :c
+ def initialize
+ @a = @b = @c = nil
+ end
+ end
+ N = 10_000
+ objs = Ractor.make_shareable(N.times.map { MyClass.new })
+ results = 4.times.map{
+ Ractor.new(objs) { |objs|
+ vars = []
+ ids = []
+ objs.each do |obj|
+ vars << obj.a << obj.b << obj.c
+ ids << obj.object_id
+ end
+ [vars, ids]
+ }
+ }.map(&:value)
+ assert_equal 1, results.uniq.size
+ end;
+ end
+
+ def test_external_object_id_ractor_move
+ assert_separately([], "#{<<~"begin;"}\n#{<<~'end;'}")
+ begin;
+ Warning[:experimental] = false
+ class MyClass
+ attr_reader :a, :b, :c
+ def initialize
+ @a = @b = @c = nil
+ end
+ end
+ obj = Ractor.make_shareable(MyClass.new)
+ object_id = obj.object_id
+ obj = Ractor.new { Ractor.receive }.send(obj, move: true).value
+ assert_equal object_id, obj.object_id
+ end;
+ end
+end
diff --git a/test/ruby/test_ractor.rb b/test/ruby/test_ractor.rb
index b423993df1..3fc891da23 100644
--- a/test/ruby/test_ractor.rb
+++ b/test/ruby/test_ractor.rb
@@ -79,6 +79,26 @@ class TestRactor < Test::Unit::TestCase
end;
end
+ def test_class_instance_variables
+ assert_ractor(<<~'RUBY')
+ # Once we're in multi-ractor mode, the codepaths
+ # for class instance variables are a bit different.
+ Ractor.new {}.value
+
+ class TestClass
+ @a = 1
+ @b = 2
+ @c = 3
+ @d = 4
+ end
+
+ assert_equal 4, TestClass.remove_instance_variable(:@d)
+ assert_nil TestClass.instance_variable_get(:@d)
+ assert_equal 4, TestClass.instance_variable_set(:@d, 4)
+ assert_equal 4, TestClass.instance_variable_get(:@d)
+ RUBY
+ end
+
def test_require_raises_and_no_ractor_belonging_issue
assert_ractor(<<~'RUBY')
require "tempfile"
@@ -98,6 +118,21 @@ class TestRactor < Test::Unit::TestCase
RUBY
end
+ def test_require_non_string
+ assert_ractor(<<~'RUBY')
+ require "tempfile"
+ require "pathname"
+ f = Tempfile.new(["file_to_require_from_ractor", ".rb"])
+ f.write("")
+ f.flush
+ result = Ractor.new(f.path) do |path|
+ require Pathname.new(path)
+ "success"
+ end.value
+ assert_equal "success", result
+ RUBY
+ end
+
def assert_make_shareable(obj)
refute Ractor.shareable?(obj), "object was already shareable"
Ractor.make_shareable(obj)
diff --git a/test/ruby/test_struct.rb b/test/ruby/test_struct.rb
index ecd8ed196c..db591c306e 100644
--- a/test/ruby/test_struct.rb
+++ b/test/ruby/test_struct.rb
@@ -550,6 +550,12 @@ module TestStruct
CODE
end
+ def test_frozen_subclass
+ test = Class.new(@Struct.new(:a)).freeze.new(a: 0)
+ assert_kind_of(@Struct, test)
+ assert_equal([:a], test.members)
+ end
+
class TopStruct < Test::Unit::TestCase
include TestStruct
diff --git a/test/ruby/test_variable.rb b/test/ruby/test_variable.rb
index 49fec2d40e..d8d0a1a393 100644
--- a/test/ruby/test_variable.rb
+++ b/test/ruby/test_variable.rb
@@ -407,6 +407,19 @@ class TestVariable < Test::Unit::TestCase
}
end
+ def test_exivar_resize_with_compaction_stress
+ objs = 10_000.times.map do
+ ExIvar.new
+ end
+ EnvUtil.under_gc_compact_stress do
+ 10.times do
+ x = ExIvar.new
+ x.instance_variable_set(:@resize, 1)
+ x
+ end
+ end
+ end
+
def test_local_variables_with_kwarg
bug11674 = '[ruby-core:71437] [Bug #11674]'
v = with_kwargs_11(v1:1,v2:2,v3:3,v4:4,v5:5,v6:6,v7:7,v8:8,v9:9,v10:10,v11:11)
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index 47a9f6f7dc..6095b0b734 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -102,12 +102,39 @@ class TestZJIT < Test::Unit::TestCase
}, call_threshold: 2
end
+ def test_opt_plus_type_guard_exit_with_locals
+ assert_compiles '[6, 6.0]', %q{
+ def test(a)
+ local = 3
+ 1 + a + local
+ end
+ test(1) # profile opt_plus
+ [test(2), test(2.0)]
+ }, call_threshold: 2
+ end
+
def test_opt_plus_type_guard_nested_exit
- omit 'rewind_caller_frames is not implemented yet'
- assert_compiles '[3, 3.0]', %q{
+ assert_compiles '[4, 4.0]', %q{
def side_exit(n) = 1 + n
def jit_frame(n) = 1 + side_exit(n)
def entry(n) = jit_frame(n)
+ entry(2) # profile send
+ [entry(2), entry(2.0)]
+ }, call_threshold: 2
+ end
+
+ def test_opt_plus_type_guard_nested_exit_with_locals
+ assert_compiles '[9, 9.0]', %q{
+ def side_exit(n)
+ local = 2
+ 1 + n + local
+ end
+ def jit_frame(n)
+ local = 3
+ 1 + side_exit(n) + local
+ end
+ def entry(n) = jit_frame(n)
+ entry(2) # profile send
[entry(2), entry(2.0)]
}, call_threshold: 2
end
@@ -130,7 +157,6 @@ class TestZJIT < Test::Unit::TestCase
end
def test_opt_mult_overflow
- omit 'side exits are not implemented yet'
assert_compiles '[6, -6, 9671406556917033397649408, -9671406556917033397649408, 21267647932558653966460912964485513216]', %q{
def test(a, b)
a * b
@@ -610,6 +636,22 @@ class TestZJIT < Test::Unit::TestCase
}
end
+ def test_send_backtrace
+ backtrace = [
+ "-e:2:in 'Object#jit_frame1'",
+ "-e:3:in 'Object#entry'",
+ "-e:5:in 'block in <main>'",
+ "-e:6:in '<main>'",
+ ]
+ assert_compiles backtrace.inspect, %q{
+ def jit_frame2 = caller # 1
+ def jit_frame1 = jit_frame2 # 2
+ def entry = jit_frame1 # 3
+ entry # profile send # 4
+ entry # 5
+ }, call_threshold: 2
+ end
+
# tool/ruby_vm/views/*.erb relies on the zjit instructions a) being contiguous and
# b) being reliably ordered after all the other instructions.
def test_instruction_order
@@ -631,11 +673,7 @@ class TestZJIT < Test::Unit::TestCase
pipe_fd = 3
script = <<~RUBY
- _test_proc = -> {
- RubyVM::ZJIT.assert_compiles
- #{test_script}
- }
- ret_val = _test_proc.call
+ ret_val = (_test_proc = -> { RubyVM::ZJIT.assert_compiles; #{test_script.lstrip} }).call
result = {
ret_val:,
#{ unless insns.empty?
diff --git a/test/socket/test_socket.rb b/test/socket/test_socket.rb
index 165990dd64..4b85d43291 100644
--- a/test/socket/test_socket.rb
+++ b/test/socket/test_socket.rb
@@ -937,6 +937,32 @@ class TestSocket < Test::Unit::TestCase
RUBY
end
+ def test_tcp_socket_open_timeout
+ opts = %w[-rsocket -W1]
+ assert_separately opts, <<~RUBY
+ Addrinfo.define_singleton_method(:getaddrinfo) do |_, _, family, *_|
+ if family == Socket::AF_INET6
+ sleep
+ else
+ [Addrinfo.tcp("127.0.0.1", 12345)]
+ end
+ end
+
+ assert_raise(Errno::ETIMEDOUT) do
+ Socket.tcp("localhost", 12345, open_timeout: 0.01)
+ end
+ RUBY
+ end
+
+ def test_tcp_socket_open_timeout_with_other_timeouts
+ opts = %w[-rsocket -W1]
+ assert_separately opts, <<~RUBY
+ assert_raise(ArgumentError) do
+ Socket.tcp("localhost", 12345, open_timeout: 0.01, resolv_timout: 0.01)
+ end
+ RUBY
+ end
+
def test_tcp_socket_one_hostname_resolution_succeeded_at_least
opts = %w[-rsocket -W1]
assert_separately opts, <<~RUBY
diff --git a/thread.c b/thread.c
index a637c8ec7c..41bd6c9ec6 100644
--- a/thread.c
+++ b/thread.c
@@ -519,12 +519,8 @@ thread_cleanup_func(void *th_ptr, int atfork)
th->locking_mutex = Qfalse;
thread_cleanup_func_before_exec(th_ptr);
- /*
- * Unfortunately, we can't release native threading resource at fork
- * because libc may have unstable locking state therefore touching
- * a threading resource may cause a deadlock.
- */
if (atfork) {
+ native_thread_destroy_atfork(th->nt);
th->nt = NULL;
return;
}
@@ -1544,6 +1540,29 @@ blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
#endif
}
+/*
+ * Resolve sentinel unblock function values to their actual function pointers
+ * and appropriate data2 values. This centralizes the logic for handling
+ * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
+ *
+ * @param unblock_function Pointer to unblock function pointer (modified in place)
+ * @param data2 Pointer to data2 pointer (modified in place)
+ * @param thread Thread context for resolving data2 when needed
+ * @return true if sentinel values were resolved, false otherwise
+ */
+bool
+rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
+{
+ rb_unblock_function_t *ubf = *unblock_function;
+
+ if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
+ *unblock_function = ubf_select;
+ *data2 = thread;
+ return true;
+ }
+ return false;
+}
+
void *
rb_nogvl(void *(*func)(void *), void *data1,
rb_unblock_function_t *ubf, void *data2,
@@ -1570,11 +1589,9 @@ rb_nogvl(void *(*func)(void *), void *data1,
bool is_main_thread = vm->ractor.main_thread == th;
int saved_errno = 0;
- if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
- ubf = ubf_select;
- data2 = th;
- }
- else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
+ rb_thread_resolve_unblock_function(&ubf, &data2, th);
+
+ if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
vm->ubf_async_safe = 1;
}
@@ -6210,7 +6227,11 @@ threadptr_interrupt_exec_exec(rb_thread_t *th)
RUBY_DEBUG_LOG("task:%p", task);
if (task) {
- (*task->func)(task->data);
+ if (task->flags & rb_interrupt_exec_flag_new_thread) {
+ rb_thread_create(task->func, task->data);
+ } else {
+ (*task->func)(task->data);
+ }
ruby_xfree(task);
}
else {
@@ -6233,43 +6254,15 @@ threadptr_interrupt_exec_cleanup(rb_thread_t *th)
rb_native_mutex_unlock(&th->interrupt_lock);
}
-struct interrupt_ractor_new_thread_data {
- rb_interrupt_exec_func_t *func;
- void *data;
-};
-
-static VALUE
-interrupt_ractor_new_thread_func(void *data)
-{
- struct interrupt_ractor_new_thread_data d = *(struct interrupt_ractor_new_thread_data *)data;
- ruby_xfree(data);
-
- d.func(d.data);
- return Qnil;
-}
-
-static VALUE
-interrupt_ractor_func(void *data)
-{
- rb_thread_create(interrupt_ractor_new_thread_func, data);
- return Qnil;
-}
-
// native thread safe
// func/data should be native thread safe
void
rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
{
- struct interrupt_ractor_new_thread_data *d = ALLOC(struct interrupt_ractor_new_thread_data);
-
RUBY_DEBUG_LOG("flags:%d", (int)flags);
- d->func = func;
- d->data = data;
rb_thread_t *main_th = target_r->threads.main;
- rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
-
- // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
+ rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
}
diff --git a/thread_none.c b/thread_none.c
index d535d9af4c..38686e17c1 100644
--- a/thread_none.c
+++ b/thread_none.c
@@ -137,6 +137,12 @@ ruby_mn_threads_params(void)
{
}
+static void
+native_thread_destroy_atfork(struct rb_native_thread *nt)
+{
+ /* no-op */
+}
+
static int
native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
{
diff --git a/thread_pthread.c b/thread_pthread.c
index f9352bbb56..377e1d9f64 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -1817,6 +1817,27 @@ native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
}
static void
+native_thread_destroy_atfork(struct rb_native_thread *nt)
+{
+ if (nt) {
+ /* We can't call rb_native_cond_destroy here because according to the
+ * specs of pthread_cond_destroy:
+ *
+ * Attempting to destroy a condition variable upon which other threads
+ * are currently blocked results in undefined behavior.
+ *
+ * Specifically, glibc's pthread_cond_destroy waits on all the other
+ * listeners. Since after forking all the threads are dead, the condition
+ * variable's listeners will never wake up, so it will hang forever.
+ */
+
+ RB_ALTSTACK_FREE(nt->altstack);
+ ruby_xfree(nt->nt_context);
+ ruby_xfree(nt);
+ }
+}
+
+static void
native_thread_destroy(struct rb_native_thread *nt)
{
if (nt) {
@@ -1826,9 +1847,7 @@ native_thread_destroy(struct rb_native_thread *nt)
rb_native_cond_destroy(&nt->cond.intr);
}
- RB_ALTSTACK_FREE(nt->altstack);
- ruby_xfree(nt->nt_context);
- ruby_xfree(nt);
+ native_thread_destroy_atfork(nt);
}
}
diff --git a/thread_win32.c b/thread_win32.c
index ed8a99dd88..576f617e8d 100644
--- a/thread_win32.c
+++ b/thread_win32.c
@@ -617,6 +617,12 @@ native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
th->ec->machine.stack_maxsize = size - space;
}
+static void
+native_thread_destroy_atfork(struct rb_native_thread *nt)
+{
+ /* no-op */
+}
+
#ifndef InterlockedExchangePointer
#define InterlockedExchangePointer(t, v) \
(void *)InterlockedExchange((long *)(t), (long)(v))
diff --git a/time.c b/time.c
index 1b02cf4259..0e91521db1 100644
--- a/time.c
+++ b/time.c
@@ -249,6 +249,7 @@ divmodv(VALUE n, VALUE d, VALUE *q, VALUE *r)
# define FIXWV2WINT(w) FIX2LONG(WIDEVAL_GET(w))
#endif
+#define SIZEOF_WIDEINT SIZEOF_INT64_T
#define POSFIXWVABLE(wi) ((wi) < FIXWV_MAX+1)
#define NEGFIXWVABLE(wi) ((wi) >= FIXWV_MIN)
#define FIXWV_P(w) FIXWINT_P(WIDEVAL_GET(w))
@@ -1891,7 +1892,7 @@ time_mark(void *ptr)
{
struct time_object *tobj = ptr;
if (!FIXWV_P(tobj->timew)) {
- rb_gc_mark_movable(WIDEVAL_GET(tobj->timew));
+ rb_gc_mark_movable(w2v(tobj->timew));
}
rb_gc_mark_movable(tobj->vtm.year);
rb_gc_mark_movable(tobj->vtm.subsecx);
@@ -1904,7 +1905,7 @@ time_compact(void *ptr)
{
struct time_object *tobj = ptr;
if (!FIXWV_P(tobj->timew)) {
- WIDEVAL_GET(tobj->timew) = rb_gc_location(WIDEVAL_GET(tobj->timew));
+ WIDEVAL_GET(tobj->timew) = WIDEVAL_WRAP(rb_gc_location(w2v(tobj->timew)));
}
tobj->vtm.year = rb_gc_location(tobj->vtm.year);
@@ -1968,11 +1969,11 @@ time_modify(VALUE time)
}
static wideval_t
-timenano2timew(time_t sec, long nsec)
+timenano2timew(wideint_t sec, long nsec)
{
wideval_t timew;
- timew = rb_time_magnify(TIMET2WV(sec));
+ timew = rb_time_magnify(WINT2WV(sec));
if (nsec)
timew = wadd(timew, wmulquoll(WINT2WV(nsec), TIME_SCALE, 1000000000));
return timew;
@@ -2747,15 +2748,15 @@ only_year:
}
static void
-subsec_normalize(time_t *secp, long *subsecp, const long maxsubsec)
+subsec_normalize(wideint_t *secp, long *subsecp, const long maxsubsec)
{
- time_t sec = *secp;
+ wideint_t sec = *secp;
long subsec = *subsecp;
long sec2;
if (UNLIKELY(subsec >= maxsubsec)) { /* subsec positive overflow */
sec2 = subsec / maxsubsec;
- if (TIMET_MAX - sec2 < sec) {
+ if (WIDEINT_MAX - sec2 < sec) {
rb_raise(rb_eRangeError, "out of Time range");
}
subsec -= sec2 * maxsubsec;
@@ -2763,16 +2764,12 @@ subsec_normalize(time_t *secp, long *subsecp, const long maxsubsec)
}
else if (UNLIKELY(subsec < 0)) { /* subsec negative overflow */
sec2 = NDIV(subsec, maxsubsec); /* negative div */
- if (sec < TIMET_MIN - sec2) {
+ if (sec < WIDEINT_MIN - sec2) {
rb_raise(rb_eRangeError, "out of Time range");
}
subsec -= sec2 * maxsubsec;
sec += sec2;
}
-#ifndef NEGATIVE_TIME_T
- if (sec < 0)
- rb_raise(rb_eArgError, "time must be positive");
-#endif
*secp = sec;
*subsecp = subsec;
}
@@ -2780,13 +2777,6 @@ subsec_normalize(time_t *secp, long *subsecp, const long maxsubsec)
#define time_usec_normalize(secp, usecp) subsec_normalize(secp, usecp, 1000000)
#define time_nsec_normalize(secp, nsecp) subsec_normalize(secp, nsecp, 1000000000)
-static wideval_t
-nsec2timew(time_t sec, long nsec)
-{
- time_nsec_normalize(&sec, &nsec);
- return timenano2timew(sec, nsec);
-}
-
static VALUE
time_new_timew(VALUE klass, wideval_t timew)
{
@@ -2800,25 +2790,39 @@ time_new_timew(VALUE klass, wideval_t timew)
return time;
}
+static wideint_t
+TIMETtoWIDEINT(time_t t)
+{
+#if SIZEOF_TIME_T * CHAR_BIT - (SIGNEDNESS_OF_TIME_T < 0) > \
+ SIZEOF_WIDEINT * CHAR_BIT - 1
+ /* compare in bit size without sign bit */
+ if (t > WIDEINT_MAX) rb_raise(rb_eArgError, "out of Time range");
+#endif
+ return (wideint_t)t;
+}
+
VALUE
rb_time_new(time_t sec, long usec)
{
- time_usec_normalize(&sec, &usec);
- return time_new_timew(rb_cTime, timenano2timew(sec, usec * 1000));
+ wideint_t isec = TIMETtoWIDEINT(sec);
+ time_usec_normalize(&isec, &usec);
+ return time_new_timew(rb_cTime, timenano2timew(isec, usec * 1000));
}
/* returns localtime time object */
VALUE
rb_time_nano_new(time_t sec, long nsec)
{
- return time_new_timew(rb_cTime, nsec2timew(sec, nsec));
+ wideint_t isec = TIMETtoWIDEINT(sec);
+ time_nsec_normalize(&isec, &nsec);
+ return time_new_timew(rb_cTime, timenano2timew(isec, nsec));
}
VALUE
rb_time_timespec_new(const struct timespec *ts, int offset)
{
struct time_object *tobj;
- VALUE time = time_new_timew(rb_cTime, nsec2timew(ts->tv_sec, ts->tv_nsec));
+ VALUE time = rb_time_nano_new(ts->tv_sec, ts->tv_nsec);
if (-86400 < offset && offset < 86400) { /* fixoff */
GetTimeval(time, tobj);
diff --git a/variable.c b/variable.c
index 3c8b2c6cc2..6bd9f69d06 100644
--- a/variable.c
+++ b/variable.c
@@ -1255,20 +1255,23 @@ rb_mark_generic_ivar(VALUE obj)
void
rb_free_generic_ivar(VALUE obj)
{
- st_data_t key = (st_data_t)obj, value;
+ if (rb_obj_exivar_p(obj)) {
+ st_data_t key = (st_data_t)obj, value;
- bool too_complex = rb_shape_obj_too_complex_p(obj);
+ bool too_complex = rb_shape_obj_too_complex_p(obj);
- RB_VM_LOCKING() {
- if (st_delete(generic_fields_tbl_no_ractor_check(obj), &key, &value)) {
- struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)value;
+ RB_VM_LOCKING() {
+ if (st_delete(generic_fields_tbl_no_ractor_check(obj), &key, &value)) {
+ struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)value;
- if (UNLIKELY(too_complex)) {
- st_free_table(fields_tbl->as.complex.table);
- }
+ if (UNLIKELY(too_complex)) {
+ st_free_table(fields_tbl->as.complex.table);
+ }
- xfree(fields_tbl);
+ xfree(fields_tbl);
+ }
}
+ RBASIC_SET_SHAPE_ID(obj, ROOT_SHAPE_ID);
}
}
@@ -1307,7 +1310,7 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id)
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
ASSERT_vm_locking();
- VALUE field_obj = RCLASS_FIELDS_OBJ(obj);
+ VALUE field_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (field_obj) {
return rb_obj_field_get(field_obj, target_shape_id);
}
@@ -1325,7 +1328,7 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id)
fields_hash = ROBJECT_FIELDS_HASH(obj);
break;
default:
- RUBY_ASSERT(FL_TEST_RAW(obj, FL_EXIVAR));
+ RUBY_ASSERT(rb_obj_exivar_p(obj));
struct gen_fields_tbl *fields_tbl = NULL;
rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
RUBY_ASSERT(fields_tbl);
@@ -1356,7 +1359,7 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id)
fields = ROBJECT_FIELDS(obj);
break;
default:
- RUBY_ASSERT(FL_TEST_RAW(obj, FL_EXIVAR));
+ RUBY_ASSERT(rb_obj_exivar_p(obj));
struct gen_fields_tbl *fields_tbl = NULL;
rb_ivar_generic_fields_tbl_lookup(obj, &fields_tbl);
RUBY_ASSERT(fields_tbl);
@@ -1371,44 +1374,36 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
{
if (SPECIAL_CONST_P(obj)) return undef;
- if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
- VALUE val = undef;
- RB_VM_LOCK_ENTER();
- {
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
- if (fields_obj) {
- val = rb_ivar_lookup(fields_obj, id, undef);
- }
- }
- RB_VM_LOCK_LEAVE();
-
- if (val != undef &&
- rb_is_instance_id(id) &&
- UNLIKELY(!rb_ractor_main_p()) &&
- !rb_ractor_shareable_p(val)) {
- rb_raise(rb_eRactorIsolationError,
- "can not get unshareable values from instance variables of classes/modules from non-main Ractors");
- }
- return val;
- }
-
shape_id_t shape_id;
- VALUE * ivar_list;
- shape_id = RBASIC_SHAPE_ID(obj);
+ VALUE *ivar_list;
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
{
- rb_bug("Unreachable");
+ VALUE val = undef;
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
+ if (fields_obj) {
+ val = rb_ivar_lookup(fields_obj, id, undef);
+ }
+
+ if (val != undef &&
+ rb_is_instance_id(id) &&
+ UNLIKELY(!rb_ractor_main_p()) &&
+ !rb_ractor_shareable_p(val)) {
+ rb_raise(rb_eRactorIsolationError,
+ "can not get unshareable values from instance variables of classes/modules from non-main Ractors");
+ }
+ return val;
}
case T_IMEMO:
// Handled like T_OBJECT
{
RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields));
+ shape_id = RBASIC_SHAPE_ID(obj);
if (rb_shape_too_complex_p(shape_id)) {
- st_table * iv_table = rb_imemo_class_fields_complex_tbl(obj);
+ st_table *iv_table = rb_imemo_class_fields_complex_tbl(obj);
VALUE val;
if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) {
return val;
@@ -1424,8 +1419,9 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
}
case T_OBJECT:
{
+ shape_id = RBASIC_SHAPE_ID(obj);
if (rb_shape_too_complex_p(shape_id)) {
- st_table * iv_table = ROBJECT_FIELDS_HASH(obj);
+ st_table *iv_table = ROBJECT_FIELDS_HASH(obj);
VALUE val;
if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) {
return val;
@@ -1440,7 +1436,8 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef)
break;
}
default:
- if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ shape_id = RBASIC_SHAPE_ID(obj);
+ if (rb_obj_exivar_p(obj)) {
struct gen_fields_tbl *fields_tbl;
rb_gen_fields_tbl_get(obj, id, &fields_tbl);
@@ -1492,15 +1489,18 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id);
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
- RB_VM_LOCK_ENTER();
- {
+ if (rb_multi_ractor_p()) {
+ fields_obj = rb_imemo_class_fields_clone(fields_obj);
+ val = rb_ivar_delete(fields_obj, id, undef);
+ RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, fields_obj);
+ }
+ else {
val = rb_ivar_delete(fields_obj, id, undef);
}
- RB_VM_LOCK_LEAVE();
- return val;
}
+ return val;
}
shape_id_t old_shape_id = rb_obj_shape_id(obj);
@@ -1545,13 +1545,18 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef)
RUBY_ASSERT(removed_shape_id != INVALID_SHAPE_ID);
- attr_index_t new_fields_count = RSHAPE_LEN(next_shape_id);
-
attr_index_t removed_index = RSHAPE_INDEX(removed_shape_id);
val = fields[removed_index];
- size_t trailing_fields = new_fields_count - removed_index;
- MEMMOVE(&fields[removed_index], &fields[removed_index + 1], VALUE, trailing_fields);
+ attr_index_t new_fields_count = RSHAPE_LEN(next_shape_id);
+ if (new_fields_count) {
+ size_t trailing_fields = new_fields_count - removed_index;
+
+ MEMMOVE(&fields[removed_index], &fields[removed_index + 1], VALUE, trailing_fields);
+ }
+ else {
+ rb_free_generic_ivar(obj);
+ }
if (RB_TYPE_P(obj, T_OBJECT) &&
!RB_FL_TEST_RAW(obj, ROBJECT_EMBED) &&
@@ -1614,8 +1619,7 @@ static shape_id_t
obj_transition_too_complex(VALUE obj, st_table *table)
{
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
- RUBY_ASSERT(RCLASS_FIELDS_OBJ(obj));
- return obj_transition_too_complex(RCLASS_FIELDS_OBJ(obj), table);
+ return obj_transition_too_complex(RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(obj), table);
}
RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
@@ -1814,56 +1818,40 @@ general_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val, void *data,
struct gen_fields_lookup_ensure_size {
VALUE obj;
ID id;
- struct gen_fields_tbl *fields_tbl;
shape_id_t shape_id;
bool resize;
};
-static int
-generic_fields_lookup_ensure_size(st_data_t *k, st_data_t *v, st_data_t u, int existing)
-{
- ASSERT_vm_locking();
-
- struct gen_fields_lookup_ensure_size *fields_lookup = (struct gen_fields_lookup_ensure_size *)u;
- struct gen_fields_tbl *fields_tbl = existing ? (struct gen_fields_tbl *)*v : NULL;
-
- if (!existing || fields_lookup->resize) {
- if (existing) {
- RUBY_ASSERT(RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_IVAR) || RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_OBJ_ID));
- RUBY_ASSERT(RSHAPE_CAPACITY(RSHAPE_PARENT(fields_lookup->shape_id)) < RSHAPE_CAPACITY(fields_lookup->shape_id));
- }
- else {
- FL_SET_RAW((VALUE)*k, FL_EXIVAR);
- }
-
- fields_tbl = gen_fields_tbl_resize(fields_tbl, RSHAPE_CAPACITY(fields_lookup->shape_id));
- *v = (st_data_t)fields_tbl;
- }
-
- RUBY_ASSERT(FL_TEST((VALUE)*k, FL_EXIVAR));
-
- fields_lookup->fields_tbl = fields_tbl;
- if (fields_lookup->shape_id) {
- rb_obj_set_shape_id(fields_lookup->obj, fields_lookup->shape_id);
- }
-
- return ST_CONTINUE;
-}
-
static VALUE *
generic_ivar_set_shape_fields(VALUE obj, void *data)
{
RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
struct gen_fields_lookup_ensure_size *fields_lookup = data;
+ struct gen_fields_tbl *fields_tbl = NULL;
+ // We can't use st_update, since when resizing the fields table GC can
+ // happen, which will modify the st_table and may rebuild it
RB_VM_LOCKING() {
- st_update(generic_fields_tbl(obj, fields_lookup->id, false), (st_data_t)obj, generic_fields_lookup_ensure_size, (st_data_t)fields_lookup);
- }
+ st_table *tbl = generic_fields_tbl(obj, fields_lookup->id, false);
+ int existing = st_lookup(tbl, (st_data_t)obj, (st_data_t *)&fields_tbl);
- FL_SET_RAW(obj, FL_EXIVAR);
+ if (!existing || fields_lookup->resize) {
+ if (existing) {
+ RUBY_ASSERT(RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_IVAR) || RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_OBJ_ID));
+ RUBY_ASSERT(RSHAPE_CAPACITY(RSHAPE_PARENT(fields_lookup->shape_id)) < RSHAPE_CAPACITY(fields_lookup->shape_id));
+ }
- return fields_lookup->fields_tbl->as.shape.fields;
+ fields_tbl = gen_fields_tbl_resize(fields_tbl, RSHAPE_CAPACITY(fields_lookup->shape_id));
+ st_insert(tbl, (st_data_t)obj, (st_data_t)fields_tbl);
+ }
+
+ if (fields_lookup->shape_id) {
+ rb_obj_set_shape_id(fields_lookup->obj, fields_lookup->shape_id);
+ }
+ }
+
+ return fields_tbl->as.shape.fields;
}
static void
@@ -1886,7 +1874,6 @@ static shape_id_t
generic_ivar_set_transition_too_complex(VALUE obj, void *_data)
{
shape_id_t new_shape_id = rb_evict_fields_to_hash(obj);
- FL_SET_RAW(obj, FL_EXIVAR);
return new_shape_id;
}
@@ -1903,8 +1890,6 @@ generic_ivar_set_too_complex_table(VALUE obj, void *data)
RB_VM_LOCKING() {
st_insert(generic_fields_tbl(obj, fields_lookup->id, false), (st_data_t)obj, (st_data_t)fields_tbl);
}
-
- FL_SET_RAW(obj, FL_EXIVAR);
}
RUBY_ASSERT(rb_shape_obj_too_complex_p(obj));
@@ -2062,7 +2047,7 @@ rb_obj_set_shape_id(VALUE obj, shape_id_t shape_id)
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
// Avoid creating the fields_obj just to freeze the class
if (!(shape_id == SPECIAL_CONST_SHAPE_ID && old_shape_id == ROOT_SHAPE_ID)) {
- RBASIC_SET_SHAPE_ID(RCLASS_ENSURE_FIELDS_OBJ(obj), shape_id);
+ RBASIC_SET_SHAPE_ID(RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(obj), shape_id);
}
}
// FIXME: How to do multi-shape?
@@ -2128,8 +2113,6 @@ rb_ivar_set_internal(VALUE obj, ID id, VALUE val)
ivar_set(obj, id, val);
}
-static void class_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val);
-
void
rb_obj_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val)
{
@@ -2139,8 +2122,8 @@ rb_obj_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val)
break;
case T_CLASS:
case T_MODULE:
- ASSERT_vm_locking();
- class_field_set(obj, target_shape_id, val);
+ // The only field is object_id and T_CLASS handle it differently.
+ rb_bug("Unreachable");
break;
default:
generic_field_set(obj, target_shape_id, val);
@@ -2200,8 +2183,8 @@ rb_ivar_defined(VALUE obj, ID id)
switch (BUILTIN_TYPE(obj)) {
case T_CLASS:
case T_MODULE:
- RB_VM_LOCKING() {
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+ {
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
defined = ivar_defined0(fields_obj, id);
}
@@ -2347,8 +2330,8 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
rb_check_frozen(dest);
- if (!FL_TEST(obj, FL_EXIVAR)) {
- goto clear;
+ if (!rb_obj_exivar_p(obj)) {
+ return;
}
unsigned long src_num_ivs = rb_ivar_count(obj);
@@ -2362,8 +2345,6 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
if (gen_fields_tbl_count(obj, obj_fields_tbl) == 0)
goto clear;
- FL_SET(dest, FL_EXIVAR);
-
if (rb_shape_too_complex_p(src_shape_id)) {
rb_shape_copy_complex_ivars(dest, obj, src_shape_id, obj_fields_tbl->as.complex.table);
return;
@@ -2387,7 +2368,6 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
if (!RSHAPE_LEN(dest_shape_id)) {
rb_obj_set_shape_id(dest, dest_shape_id);
- FL_UNSET(dest, FL_EXIVAR);
return;
}
@@ -2412,25 +2392,16 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj)
return;
clear:
- if (FL_TEST(dest, FL_EXIVAR)) {
- RBASIC_SET_SHAPE_ID(dest, ROOT_SHAPE_ID);
- rb_free_generic_ivar(dest);
- FL_UNSET(dest, FL_EXIVAR);
- }
+ rb_free_generic_ivar(dest);
}
void
rb_replace_generic_ivar(VALUE clone, VALUE obj)
{
- RUBY_ASSERT(FL_TEST(obj, FL_EXIVAR));
-
RB_VM_LOCKING() {
st_data_t fields_tbl, obj_data = (st_data_t)obj;
if (st_delete(generic_fields_tbl_, &obj_data, &fields_tbl)) {
- FL_UNSET_RAW(obj, FL_EXIVAR);
-
st_insert(generic_fields_tbl_, (st_data_t)clone, fields_tbl);
- FL_SET_RAW(clone, FL_EXIVAR);
}
else {
rb_bug("unreachable");
@@ -2453,16 +2424,16 @@ rb_field_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg,
break;
case T_CLASS:
case T_MODULE:
- IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
- RB_VM_LOCKING() {
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+ {
+ IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0);
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (fields_obj) {
class_fields_each(fields_obj, func, arg, ivar_only);
}
}
break;
default:
- if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
gen_fields_each(obj, func, arg, ivar_only);
}
break;
@@ -2488,7 +2459,7 @@ rb_ivar_count(VALUE obj)
case T_CLASS:
case T_MODULE:
{
- VALUE fields_obj = RCLASS_FIELDS_OBJ(obj);
+ VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (!fields_obj) {
return 0;
}
@@ -2498,7 +2469,7 @@ rb_ivar_count(VALUE obj)
return RBASIC_FIELDS_COUNT(fields_obj);
}
default:
- if (FL_TEST(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
struct gen_fields_tbl *fields_tbl;
if (rb_gen_fields_tbl_get(obj, 0, &fields_tbl)) {
@@ -4702,15 +4673,16 @@ rb_iv_set(VALUE obj, const char *name, VALUE val)
return rb_ivar_set(obj, id, val);
}
-static int
-class_ivar_set(VALUE obj, ID id, VALUE val)
+static bool
+class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool concurrent, VALUE *new_fields_obj)
{
bool existing = true;
- const VALUE original_fields_obj = RCLASS_FIELDS_OBJ(obj);
- VALUE fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_class_fields_new(obj, 1);
+ const VALUE original_fields_obj = fields_obj;
+ fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_class_fields_new(klass, 1);
- shape_id_t next_shape_id = 0;
shape_id_t current_shape_id = RBASIC_SHAPE_ID(fields_obj);
+ shape_id_t next_shape_id = current_shape_id;
+
if (UNLIKELY(rb_shape_too_complex_p(current_shape_id))) {
goto too_complex;
}
@@ -4727,7 +4699,7 @@ class_ivar_set(VALUE obj, ID id, VALUE val)
next_shape_id = rb_shape_transition_add_ivar(fields_obj, id);
if (UNLIKELY(rb_shape_too_complex_p(next_shape_id))) {
attr_index_t current_len = RSHAPE_LEN(current_shape_id);
- fields_obj = rb_imemo_class_fields_new_complex(obj, current_len + 1);
+ fields_obj = rb_imemo_class_fields_new_complex(klass, current_len + 1);
if (current_len) {
rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_class_fields_complex_tbl(fields_obj));
RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
@@ -4738,10 +4710,12 @@ class_ivar_set(VALUE obj, ID id, VALUE val)
attr_index_t next_capacity = RSHAPE_CAPACITY(next_shape_id);
attr_index_t current_capacity = RSHAPE_CAPACITY(current_shape_id);
- if (UNLIKELY(next_capacity != current_capacity)) {
- RUBY_ASSERT(next_capacity > current_capacity);
- // We allocate a new fields_obj so that we're embedded as long as possible
- fields_obj = rb_imemo_class_fields_new(obj, next_capacity);
+ if (concurrent || next_capacity != current_capacity) {
+ RUBY_ASSERT(concurrent || next_capacity > current_capacity);
+
+ // We allocate a new fields_obj even when concurrency isn't a concern
+ // so that we're embedded as long as possible.
+ fields_obj = rb_imemo_class_fields_new(klass, next_capacity);
if (original_fields_obj) {
MEMCPY(rb_imemo_class_fields_ptr(fields_obj), rb_imemo_class_fields_ptr(original_fields_obj), VALUE, RSHAPE_LEN(current_shape_id));
}
@@ -4753,20 +4727,12 @@ class_ivar_set(VALUE obj, ID id, VALUE val)
VALUE *fields = rb_imemo_class_fields_ptr(fields_obj);
RB_OBJ_WRITE(fields_obj, &fields[index], val);
+
if (!existing) {
RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
}
- if (fields_obj != original_fields_obj) {
- RCLASS_SET_FIELDS_OBJ(obj, fields_obj);
- // TODO: What should we set as the T_CLASS shape_id?
- // In most case we can replicate the single `fields_obj` shape
- // but in namespaced case?
- // Perhaps INVALID_SHAPE_ID?
- RBASIC_SET_SHAPE_ID(obj, next_shape_id);
- }
-
- RB_GC_GUARD(fields_obj);
+ *new_fields_obj = fields_obj;
return existing;
too_complex:
@@ -4777,15 +4743,10 @@ too_complex:
if (fields_obj != original_fields_obj) {
RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id);
- RCLASS_SET_FIELDS_OBJ(obj, fields_obj);
- // TODO: What should we set as the T_CLASS shape_id?
- // In most case we can replicate the single `fields_obj` shape
- // but in namespaced case?
- // Perhaps INVALID_SHAPE_ID?
- RBASIC_SET_SHAPE_ID(obj, next_shape_id);
}
}
- RB_GC_GUARD(fields_obj);
+
+ *new_fields_obj = fields_obj;
return existing;
}
@@ -4793,23 +4754,25 @@ int
rb_class_ivar_set(VALUE obj, ID id, VALUE val)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE));
- bool existing = false;
rb_check_frozen(obj);
rb_class_ensure_writable(obj);
- RB_VM_LOCKING() {
- existing = class_ivar_set(obj, id, val);
- }
+ const VALUE original_fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
+ VALUE new_fields_obj = 0;
- return existing;
-}
+ bool existing = class_fields_ivar_set(obj, original_fields_obj, id, val, rb_multi_ractor_p(), &new_fields_obj);
-static void
-class_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val)
-{
- RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE));
- obj_field_set(RCLASS_ENSURE_FIELDS_OBJ(obj), target_shape_id, val);
+ if (new_fields_obj != original_fields_obj) {
+ RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, new_fields_obj);
+
+ // TODO: What should we set as the T_CLASS shape_id?
+ // In most case we can replicate the single `fields_obj` shape
+ // but in namespaced case?
+ // Perhaps INVALID_SHAPE_ID?
+ RBASIC_SET_SHAPE_ID(obj, RBASIC_SHAPE_ID(new_fields_obj));
+ }
+ return existing;
}
static int
diff --git a/vm.c b/vm.c
index 4b254eaea1..6f20d43ee4 100644
--- a/vm.c
+++ b/vm.c
@@ -736,8 +736,8 @@ vm_stat(int argc, VALUE *argv, VALUE self)
SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
SET(constant_cache_misses, ruby_vm_constant_cache_misses);
SET(global_cvar_state, ruby_vm_global_cvar_state);
- SET(next_shape_id, (rb_serial_t)GET_SHAPE_TREE()->next_shape_id);
- SET(shape_cache_size, (rb_serial_t)GET_SHAPE_TREE()->cache_size);
+ SET(next_shape_id, (rb_serial_t)rb_shape_tree.next_shape_id);
+ SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size);
#undef SET
#if USE_DEBUG_COUNTER
diff --git a/vm_callinfo.h b/vm_callinfo.h
index d3d0555485..0ce25c2c0f 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -297,14 +297,13 @@ struct rb_callcache {
} aux_;
};
-#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
-#define VM_CALLCACHE_ON_STACK FL_EXIVAR
-
/* VM_CALLCACHE_IVAR used for IVAR/ATTRSET/STRUCT_AREF/STRUCT_ASET methods */
#define VM_CALLCACHE_IVAR IMEMO_FL_USER0
#define VM_CALLCACHE_BF IMEMO_FL_USER1
#define VM_CALLCACHE_SUPER IMEMO_FL_USER2
#define VM_CALLCACHE_REFINEMENT IMEMO_FL_USER3
+#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER4
+#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER5
enum vm_cc_type {
cc_type_normal, // chained from ccs
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index dbccc6bdbb..7efcdba8a4 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -1248,7 +1248,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
}
}
- fields_obj = RCLASS_FIELDS_OBJ(obj);
+ fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj);
if (!fields_obj) {
return default_value;
}
@@ -1258,7 +1258,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
break;
}
default:
- if (FL_TEST_RAW(obj, FL_EXIVAR)) {
+ if (rb_obj_exivar_p(obj)) {
struct gen_fields_tbl *fields_tbl;
rb_gen_fields_tbl_get(obj, id, &fields_tbl);
ivar_list = fields_tbl->as.shape.fields;
diff --git a/yjit.c b/yjit.c
index 2c51e6bf92..ae042a62aa 100644
--- a/yjit.c
+++ b/yjit.c
@@ -778,7 +778,7 @@ VALUE
rb_object_shape_count(void)
{
// next_shape_id starts from 0, so it's the same as the count
- return ULONG2NUM((unsigned long)GET_SHAPE_TREE()->next_shape_id);
+ return ULONG2NUM((unsigned long)rb_shape_tree.next_shape_id);
}
bool
@@ -799,6 +799,12 @@ rb_yjit_shape_capacity(shape_id_t shape_id)
return RSHAPE_CAPACITY(shape_id);
}
+attr_index_t
+rb_yjit_shape_index(shape_id_t shape_id)
+{
+ return RSHAPE_INDEX(shape_id);
+}
+
// Assert that we have the VM lock. Relevant mostly for multi ractor situations.
// The GC takes the lock before calling us, and this asserts that it indeed happens.
void
diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs
index a139892741..41d383f8bd 100644
--- a/yjit/bindgen/src/main.rs
+++ b/yjit/bindgen/src/main.rs
@@ -95,13 +95,13 @@ fn main() {
// From shape.h
.allowlist_function("rb_obj_shape_id")
- .allowlist_function("rb_shape_lookup")
.allowlist_function("rb_shape_id_offset")
.allowlist_function("rb_shape_get_iv_index")
.allowlist_function("rb_shape_transition_add_ivar_no_warnings")
.allowlist_function("rb_yjit_shape_obj_too_complex_p")
.allowlist_function("rb_yjit_shape_too_complex_p")
.allowlist_function("rb_yjit_shape_capacity")
+ .allowlist_function("rb_yjit_shape_index")
.allowlist_var("SHAPE_ID_NUM_BITS")
// From ruby/internal/intern/object.h
@@ -228,6 +228,7 @@ fn main() {
.allowlist_function("rb_obj_as_string_result")
.allowlist_function("rb_str_byte_substr")
.allowlist_function("rb_str_substr_two_fixnums")
+ .allowlist_function("rb_str_dup_m")
// From include/ruby/internal/intern/parse.h
.allowlist_function("rb_backref_get")
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 5f7d61f8b3..3e08857295 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -3128,8 +3128,6 @@ fn gen_set_ivar(
if new_shape_too_complex {
Some((next_shape_id, None, 0_usize))
} else {
- let current_shape = unsafe { rb_shape_lookup(current_shape_id) };
-
let current_capacity = unsafe { rb_yjit_shape_capacity(current_shape_id) };
let next_capacity = unsafe { rb_yjit_shape_capacity(next_shape_id) };
@@ -3138,7 +3136,7 @@ fn gen_set_ivar(
let needs_extension = next_capacity != current_capacity;
// We can write to the object, but we need to transition the shape
- let ivar_index = unsafe { (*current_shape).next_field_index } as usize;
+ let ivar_index = unsafe { rb_yjit_shape_index(next_shape_id) } as usize;
let needs_extension = if needs_extension {
Some((current_capacity, next_capacity))
@@ -6277,16 +6275,12 @@ fn jit_rb_str_dup(
jit_prepare_call_with_gc(jit, asm);
- // Check !FL_ANY_RAW(str, FL_EXIVAR), which is part of BARE_STRING_P.
let recv_opnd = asm.stack_pop(1);
let recv_opnd = asm.load(recv_opnd);
- let flags_opnd = Opnd::mem(64, recv_opnd, RUBY_OFFSET_RBASIC_FLAGS);
- asm.test(flags_opnd, Opnd::Imm(RUBY_FL_EXIVAR as i64));
- asm.jnz(Target::side_exit(Counter::send_str_dup_exivar));
// Call rb_str_dup
let stack_ret = asm.stack_push(Type::CString);
- let ret_opnd = asm.ccall(rb_str_dup as *const u8, vec![recv_opnd]);
+ let ret_opnd = asm.ccall(rb_str_dup_m as *const u8, vec![recv_opnd]);
asm.mov(stack_ret, ret_opnd);
true
diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs
index ecb6475319..725a29fa70 100644
--- a/yjit/src/cruby.rs
+++ b/yjit/src/cruby.rs
@@ -448,18 +448,6 @@ impl VALUE {
unsafe { rb_obj_shape_id(self) }
}
- pub fn shape_of(self) -> *mut rb_shape {
- unsafe {
- let shape = rb_shape_lookup(self.shape_id_of());
-
- if shape.is_null() {
- panic!("Shape should not be null");
- } else {
- shape
- }
- }
- }
-
pub fn embedded_p(self) -> bool {
unsafe {
FL_TEST_RAW(self, VALUE(ROBJECT_EMBED as usize)) != VALUE(0)
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index d92e12c38b..1d7ffca165 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -225,10 +225,11 @@ pub const RUBY_FL_PROMOTED: ruby_fl_type = 32;
pub const RUBY_FL_UNUSED6: ruby_fl_type = 64;
pub const RUBY_FL_FINALIZE: ruby_fl_type = 128;
pub const RUBY_FL_TAINT: ruby_fl_type = 0;
+pub const RUBY_FL_EXIVAR: ruby_fl_type = 0;
pub const RUBY_FL_SHAREABLE: ruby_fl_type = 256;
pub const RUBY_FL_UNTRUSTED: ruby_fl_type = 0;
pub const RUBY_FL_UNUSED9: ruby_fl_type = 512;
-pub const RUBY_FL_EXIVAR: ruby_fl_type = 1024;
+pub const RUBY_FL_UNUSED10: ruby_fl_type = 1024;
pub const RUBY_FL_FREEZE: ruby_fl_type = 2048;
pub const RUBY_FL_USER0: ruby_fl_type = 4096;
pub const RUBY_FL_USER1: ruby_fl_type = 8192;
@@ -688,27 +689,6 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16;
pub type vm_frame_env_flags = u32;
pub type attr_index_t = u16;
pub type shape_id_t = u32;
-pub type redblack_id_t = u32;
-pub type redblack_node_t = redblack_node;
-#[repr(C)]
-pub struct rb_shape {
- pub edges: VALUE,
- pub edge_name: ID,
- pub ancestor_index: *mut redblack_node_t,
- pub parent_id: shape_id_t,
- pub next_field_index: attr_index_t,
- pub capacity: attr_index_t,
- pub type_: u8,
-}
-pub type rb_shape_t = rb_shape;
-#[repr(C)]
-#[derive(Debug, Copy, Clone)]
-pub struct redblack_node {
- pub key: ID,
- pub value: *mut rb_shape_t,
- pub l: redblack_id_t,
- pub r: redblack_id_t,
-}
#[repr(C)]
pub struct rb_cvar_class_tbl_entry {
pub index: u32,
@@ -1133,7 +1113,6 @@ extern "C" {
pub fn rb_obj_info(obj: VALUE) -> *const ::std::os::raw::c_char;
pub fn rb_ec_stack_check(ec: *mut rb_execution_context_struct) -> ::std::os::raw::c_int;
pub fn rb_shape_id_offset() -> i32;
- pub fn rb_shape_lookup(shape_id: shape_id_t) -> *mut rb_shape_t;
pub fn rb_obj_shape_id(obj: VALUE) -> shape_id_t;
pub fn rb_shape_get_iv_index(shape_id: shape_id_t, id: ID, value: *mut attr_index_t) -> bool;
pub fn rb_shape_transition_add_ivar_no_warnings(obj: VALUE, id: ID) -> shape_id_t;
@@ -1141,6 +1120,7 @@ extern "C" {
pub fn rb_gvar_set(arg1: ID, arg2: VALUE) -> VALUE;
pub fn rb_ensure_iv_list_size(obj: VALUE, current_len: u32, newsize: u32);
pub fn rb_vm_barrier();
+ pub fn rb_str_dup_m(str_: VALUE) -> VALUE;
pub fn rb_str_byte_substr(str_: VALUE, beg: VALUE, len: VALUE) -> VALUE;
pub fn rb_str_substr_two_fixnums(
str_: VALUE,
@@ -1265,6 +1245,7 @@ extern "C" {
pub fn rb_yjit_shape_too_complex_p(shape_id: shape_id_t) -> bool;
pub fn rb_yjit_shape_obj_too_complex_p(obj: VALUE) -> bool;
pub fn rb_yjit_shape_capacity(shape_id: shape_id_t) -> attr_index_t;
+ pub fn rb_yjit_shape_index(shape_id: shape_id_t) -> attr_index_t;
pub fn rb_yjit_assert_holding_vm_lock();
pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize;
pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize;
diff --git a/zjit/bindgen/src/main.rs b/zjit/bindgen/src/main.rs
index 4aff3193f0..cf328fc68c 100644
--- a/zjit/bindgen/src/main.rs
+++ b/zjit/bindgen/src/main.rs
@@ -108,7 +108,6 @@ fn main() {
// From shape.h
.allowlist_function("rb_obj_shape_id")
- .allowlist_function("rb_shape_lookup")
.allowlist_function("rb_shape_id_offset")
.allowlist_function("rb_shape_get_iv_index")
.allowlist_function("rb_shape_transition_add_ivar_no_warnings")
diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs
index 85f242eccc..dd1eb52d34 100644
--- a/zjit/src/backend/arm64/mod.rs
+++ b/zjit/src/backend/arm64/mod.rs
@@ -211,11 +211,6 @@ impl Assembler
vec![X1_REG, X9_REG, X10_REG, X11_REG, X12_REG, X13_REG, X14_REG, X15_REG]
}
- /// Get the address that the current frame returns to
- pub fn return_addr_opnd() -> Opnd {
- Opnd::Reg(X30_REG)
- }
-
/// Split platform-specific instructions
/// The transformations done here are meant to make our lives simpler in later
/// stages of the compilation pipeline.
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index c0d73071ea..f46b35ded5 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -1,8 +1,8 @@
use std::collections::HashMap;
use std::fmt;
use std::mem::take;
-use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32, VM_ENV_DATA_SIZE};
-use crate::state::ZJITState;
+use crate::codegen::local_size_and_idx_to_ep_offset;
+use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32};
use crate::{cruby::VALUE};
use crate::backend::current::*;
use crate::virtualmem::CodePtr;
@@ -1797,7 +1797,7 @@ impl Assembler
asm_comment!(self, "write locals: {locals:?}");
for (idx, &opnd) in locals.iter().enumerate() {
let opnd = split_store_source(self, opnd);
- self.store(Opnd::mem(64, SP, (-(VM_ENV_DATA_SIZE as i32) - locals.len() as i32 + idx as i32) * SIZEOF_VALUE_I32), opnd);
+ self.store(Opnd::mem(64, SP, (-local_size_and_idx_to_ep_offset(locals.len(), idx) - 1) * SIZEOF_VALUE_I32), opnd);
}
asm_comment!(self, "save cfp->pc");
@@ -1809,10 +1809,6 @@ impl Assembler
let cfp_sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
self.store(cfp_sp, Opnd::Reg(Assembler::SCRATCH_REG));
- asm_comment!(self, "rewind caller frames");
- self.mov(C_ARG_OPNDS[0], Assembler::return_addr_opnd());
- self.ccall(Self::rewind_caller_frames as *const u8, vec![]);
-
asm_comment!(self, "exit to the interpreter");
self.frame_teardown();
self.mov(C_RET_OPND, Opnd::UImm(Qundef.as_u64()));
@@ -1823,13 +1819,6 @@ impl Assembler
}
Some(())
}
-
- #[unsafe(no_mangle)]
- extern "C" fn rewind_caller_frames(addr: *const u8) {
- if ZJITState::is_iseq_return_addr(addr) {
- unimplemented!("Can't side-exit from JIT-JIT call: rewind_caller_frames is not implemented yet");
- }
- }
}
impl fmt::Debug for Assembler {
diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs
index 2cc4fde3d8..d83fc184f9 100644
--- a/zjit/src/backend/x86_64/mod.rs
+++ b/zjit/src/backend/x86_64/mod.rs
@@ -109,11 +109,6 @@ impl Assembler
vec![RAX_REG, RCX_REG, RDX_REG, RSI_REG, RDI_REG, R8_REG, R9_REG, R10_REG, R11_REG]
}
- /// Get the address that the current frame returns to
- pub fn return_addr_opnd() -> Opnd {
- Opnd::mem(64, Opnd::Reg(RSP_REG), 0)
- }
-
// These are the callee-saved registers in the x86-64 SysV ABI
// RBX, RSP, RBP, and R12–R15
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index e32534b283..f274a64ca6 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -258,7 +258,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Insn::IfTrue { val, target } => return gen_if_true(jit, asm, opnd!(val), target),
Insn::IfFalse { val, target } => return gen_if_false(jit, asm, opnd!(val), target),
Insn::SendWithoutBlock { call_info, cd, state, self_val, args, .. } => gen_send_without_block(jit, asm, call_info, *cd, &function.frame_state(*state), self_val, args)?,
- Insn::SendWithoutBlockDirect { iseq, self_val, args, .. } => gen_send_without_block_direct(cb, jit, asm, *iseq, opnd!(self_val), args)?,
+ Insn::SendWithoutBlockDirect { cme, iseq, self_val, args, state, .. } => gen_send_without_block_direct(cb, jit, asm, *cme, *iseq, opnd!(self_val), args, &function.frame_state(*state))?,
Insn::Return { val } => return Some(gen_return(asm, opnd!(val))?),
Insn::FixnumAdd { left, right, state } => gen_fixnum_add(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?,
Insn::FixnumSub { left, right, state } => gen_fixnum_sub(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?,
@@ -277,13 +277,16 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
Insn::GetIvar { self_val, id, state: _ } => gen_getivar(asm, opnd!(self_val), *id),
Insn::SetGlobal { id, val, state: _ } => gen_setglobal(asm, *id, opnd!(val)),
Insn::GetGlobal { id, state: _ } => gen_getglobal(asm, *id),
- Insn::SetIvar { self_val, id, val, state: _ } => gen_setivar(asm, opnd!(self_val), *id, opnd!(val)),
+ Insn::SetIvar { self_val, id, val, state: _ } => return gen_setivar(asm, opnd!(self_val), *id, opnd!(val)),
+ Insn::SideExit { state } => return gen_side_exit(jit, asm, &function.frame_state(*state)),
_ => {
debug!("ZJIT: gen_function: unexpected insn {:?}", insn);
return None;
}
};
+ assert!(insn.has_output(), "Cannot write LIR output of HIR instruction with no output");
+
// If the instruction has an output, remember it in jit.opnds
jit.opnds[insn_id.0] = Some(out_opnd);
@@ -311,12 +314,13 @@ fn gen_getivar(asm: &mut Assembler, recv: Opnd, id: ID) -> Opnd {
}
/// Emit an uncached instance variable store
-fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) -> Opnd {
+fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) -> Option<()> {
asm_comment!(asm, "call rb_ivar_set");
asm.ccall(
rb_ivar_set as *const u8,
vec![recv, Opnd::UImm(id.0), val],
- )
+ );
+ Some(())
}
/// Look up global variables
@@ -337,6 +341,12 @@ fn gen_setglobal(asm: &mut Assembler, id: ID, val: Opnd) -> Opnd {
)
}
+/// Side-exit into the interpreter
+fn gen_side_exit(jit: &mut JITState, asm: &mut Assembler, state: &FrameState) -> Option<()> {
+ asm.jmp(side_exit(jit, state)?);
+ Some(())
+}
+
/// Compile an interpreter entry block to be inserted into an ISEQ
fn gen_entry_prologue(asm: &mut Assembler, iseq: IseqPtr) {
asm_comment!(asm, "ZJIT entry point: {}", iseq_get_location(iseq, 0));
@@ -484,8 +494,16 @@ fn gen_send_without_block(
self_val: &InsnId,
args: &Vec<InsnId>,
) -> Option<lir::Opnd> {
- // Spill the receiver and the arguments onto the stack. They need to be marked by GC and may be caller-saved registers.
+ // Spill locals onto the stack.
+ // TODO: Don't spill locals eagerly; lazily reify frames
+ asm_comment!(asm, "spill locals");
+ for (idx, &insn_id) in state.locals().enumerate() {
+ asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?);
+ }
+ // Spill the receiver and the arguments onto the stack.
+ // They need to be on the interpreter stack to let the interpreter access them.
// TODO: Avoid spilling operands that have been spilled before.
+ asm_comment!(asm, "spill receiver and arguments");
for (idx, &insn_id) in [*self_val].iter().chain(args.iter()).enumerate() {
// Currently, we don't move the SP register. So it's equal to the base pointer.
let stack_opnd = Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32);
@@ -515,10 +533,40 @@ fn gen_send_without_block_direct(
cb: &mut CodeBlock,
jit: &mut JITState,
asm: &mut Assembler,
+ cme: *const rb_callable_method_entry_t,
iseq: IseqPtr,
recv: Opnd,
args: &Vec<InsnId>,
+ state: &FrameState,
) -> Option<lir::Opnd> {
+ // Save cfp->pc and cfp->sp for the caller frame
+ gen_save_pc(asm, state);
+ gen_save_sp(asm, state.stack().len() - args.len() - 1); // -1 for receiver
+
+ // Spill the virtual stack and the locals of the caller onto the stack
+ // TODO: Lazily materialize caller frames on side exits or when needed
+ asm_comment!(asm, "spill locals and stack");
+ for (idx, &insn_id) in state.locals().enumerate() {
+ asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?);
+ }
+ for (idx, &insn_id) in state.stack().enumerate() {
+ asm.mov(Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?);
+ }
+
+ // Set up the new frame
+ // TODO: Lazily materialize caller frames on side exits or when needed
+ gen_push_frame(asm, args.len(), state, ControlFrame {
+ recv,
+ iseq,
+ cme,
+ frame_type: VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL,
+ });
+
+ asm_comment!(asm, "switch to new SP register");
+ let local_size = unsafe { get_iseq_body_local_table_size(iseq) } as usize;
+ let new_sp = asm.add(SP, ((state.stack().len() + local_size - args.len() + VM_ENV_DATA_SIZE as usize) * SIZEOF_VALUE).into());
+ asm.mov(SP, new_sp);
+
asm_comment!(asm, "switch to new CFP");
let new_cfp = asm.sub(CFP, RUBY_SIZEOF_CONTROL_FRAME.into());
asm.mov(CFP, new_cfp);
@@ -537,7 +585,15 @@ fn gen_send_without_block_direct(
jit.branch_iseqs.push((branch.clone(), iseq));
// TODO(max): Add a PatchPoint here that can side-exit the function if the callee messed with
// the frame's locals
- Some(asm.ccall_with_branch(dummy_ptr, c_args, &branch))
+ let ret = asm.ccall_with_branch(dummy_ptr, c_args, &branch);
+
+ // If a callee side-exits, i.e. returns Qundef, propagate the return value to the caller.
+ // The caller will side-exit the callee into the interpreter.
+ // TODO: Let side exit code pop all JIT frames to optimize away this cmp + je.
+ asm.cmp(ret, Qundef.into());
+ asm.je(ZJITState::get_exit_trampoline().into());
+
+ Some(ret)
}
/// Compile an array duplication instruction
@@ -749,6 +805,45 @@ fn gen_save_sp(asm: &mut Assembler, stack_size: usize) {
asm.mov(cfp_sp, sp_addr);
}
+/// Frame metadata written by gen_push_frame()
+struct ControlFrame {
+ recv: Opnd,
+ iseq: IseqPtr,
+ cme: *const rb_callable_method_entry_t,
+ frame_type: u32,
+}
+
+/// Compile an interpreter frame
+fn gen_push_frame(asm: &mut Assembler, argc: usize, state: &FrameState, frame: ControlFrame) {
+ // Locals are written by the callee frame on side-exits or non-leaf calls
+
+ // See vm_push_frame() for details
+ asm_comment!(asm, "push cme, specval, frame type");
+ // ep[-2]: cref of cme
+ let local_size = unsafe { get_iseq_body_local_table_size(frame.iseq) } as i32;
+ let ep_offset = state.stack().len() as i32 + local_size - argc as i32 + VM_ENV_DATA_SIZE as i32 - 1;
+ asm.store(Opnd::mem(64, SP, (ep_offset - 2) * SIZEOF_VALUE_I32), VALUE::from(frame.cme).into());
+ // ep[-1]: block_handler or prev EP
+ // block_handler is not supported for now
+ asm.store(Opnd::mem(64, SP, (ep_offset - 1) * SIZEOF_VALUE_I32), VM_BLOCK_HANDLER_NONE.into());
+ // ep[0]: ENV_FLAGS
+ asm.store(Opnd::mem(64, SP, ep_offset * SIZEOF_VALUE_I32), frame.frame_type.into());
+
+ // Write to the callee CFP
+ fn cfp_opnd(offset: i32) -> Opnd {
+ Opnd::mem(64, CFP, offset - (RUBY_SIZEOF_CONTROL_FRAME as i32))
+ }
+
+ asm_comment!(asm, "push callee control frame");
+ // cfp_opnd(RUBY_OFFSET_CFP_PC): written by the callee frame on side-exits or non-leaf calls
+ // cfp_opnd(RUBY_OFFSET_CFP_SP): written by the callee frame on side-exits or non-leaf calls
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_ISEQ), VALUE::from(frame.iseq).into());
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_SELF), frame.recv);
+ let ep = asm.lea(Opnd::mem(64, SP, ep_offset * SIZEOF_VALUE_I32));
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_EP), ep);
+ asm.mov(cfp_opnd(RUBY_OFFSET_CFP_BLOCK_CODE), 0.into());
+}
+
/// Return a register we use for the basic block argument at a given index
fn param_reg(idx: usize) -> Reg {
// To simplify the implementation, allocate a fixed register for each basic block argument for now.
@@ -764,10 +859,13 @@ fn param_reg(idx: usize) -> Reg {
/// Inverse of ep_offset_to_local_idx(). See ep_offset_to_local_idx() for details.
fn local_idx_to_ep_offset(iseq: IseqPtr, local_idx: usize) -> i32 {
- let local_table_size: i32 = unsafe { get_iseq_body_local_table_size(iseq) }
- .try_into()
- .unwrap();
- local_table_size - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32
+ let local_size = unsafe { get_iseq_body_local_table_size(iseq) };
+ local_size_and_idx_to_ep_offset(local_size as usize, local_idx)
+}
+
+/// Convert the number of locals and a local index to an offset in the EP
+pub fn local_size_and_idx_to_ep_offset(local_size: usize, local_idx: usize) -> i32 {
+ local_size as i32 - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32
}
/// Convert ISEQ into High-level IR
@@ -816,9 +914,8 @@ impl Assembler {
move |code_ptr, _| {
start_branch.start_addr.set(Some(code_ptr));
},
- move |code_ptr, cb| {
+ move |code_ptr, _| {
end_branch.end_addr.set(Some(code_ptr));
- ZJITState::add_iseq_return_addr(code_ptr.raw_ptr(cb));
},
)
}
diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs
index d5be47e026..de1c86e8d6 100644
--- a/zjit/src/cruby.rs
+++ b/zjit/src/cruby.rs
@@ -485,18 +485,6 @@ impl VALUE {
unsafe { rb_obj_shape_id(self) }
}
- pub fn shape_of(self) -> *mut rb_shape {
- unsafe {
- let shape = rb_shape_lookup(self.shape_id_of());
-
- if shape.is_null() {
- panic!("Shape should not be null");
- } else {
- shape
- }
- }
- }
-
pub fn embedded_p(self) -> bool {
unsafe {
FL_TEST_RAW(self, VALUE(ROBJECT_EMBED as usize)) != VALUE(0)
diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs
index 34f6ded80d..5fb5c2ec02 100644
--- a/zjit/src/cruby_bindings.inc.rs
+++ b/zjit/src/cruby_bindings.inc.rs
@@ -101,10 +101,11 @@ pub const RUBY_FL_PROMOTED: ruby_fl_type = 32;
pub const RUBY_FL_UNUSED6: ruby_fl_type = 64;
pub const RUBY_FL_FINALIZE: ruby_fl_type = 128;
pub const RUBY_FL_TAINT: ruby_fl_type = 0;
+pub const RUBY_FL_EXIVAR: ruby_fl_type = 0;
pub const RUBY_FL_SHAREABLE: ruby_fl_type = 256;
pub const RUBY_FL_UNTRUSTED: ruby_fl_type = 0;
pub const RUBY_FL_UNUSED9: ruby_fl_type = 512;
-pub const RUBY_FL_EXIVAR: ruby_fl_type = 1024;
+pub const RUBY_FL_UNUSED10: ruby_fl_type = 1024;
pub const RUBY_FL_FREEZE: ruby_fl_type = 2048;
pub const RUBY_FL_USER0: ruby_fl_type = 4096;
pub const RUBY_FL_USER1: ruby_fl_type = 8192;
@@ -396,26 +397,6 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16;
pub type vm_frame_env_flags = u32;
pub type attr_index_t = u16;
pub type shape_id_t = u32;
-pub type redblack_id_t = u32;
-pub type redblack_node_t = redblack_node;
-#[repr(C)]
-pub struct rb_shape {
- pub edges: VALUE,
- pub edge_name: ID,
- pub ancestor_index: *mut redblack_node_t,
- pub parent_id: shape_id_t,
- pub next_field_index: attr_index_t,
- pub capacity: attr_index_t,
- pub type_: u8,
-}
-pub type rb_shape_t = rb_shape;
-#[repr(C)]
-pub struct redblack_node {
- pub key: ID,
- pub value: *mut rb_shape_t,
- pub l: redblack_id_t,
- pub r: redblack_id_t,
-}
#[repr(C)]
pub struct rb_cvar_class_tbl_entry {
pub index: u32,
@@ -866,7 +847,6 @@ unsafe extern "C" {
pub fn rb_obj_info(obj: VALUE) -> *const ::std::os::raw::c_char;
pub fn rb_ec_stack_check(ec: *mut rb_execution_context_struct) -> ::std::os::raw::c_int;
pub fn rb_shape_id_offset() -> i32;
- pub fn rb_shape_lookup(shape_id: shape_id_t) -> *mut rb_shape_t;
pub fn rb_obj_shape_id(obj: VALUE) -> shape_id_t;
pub fn rb_shape_get_iv_index(shape_id: shape_id_t, id: ID, value: *mut attr_index_t) -> bool;
pub fn rb_shape_transition_add_ivar_no_warnings(obj: VALUE, id: ID) -> shape_id_t;
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index 47b961badf..45a9024ca9 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -426,7 +426,15 @@ pub enum Insn {
/// Ignoring keyword arguments etc for now
SendWithoutBlock { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, args: Vec<InsnId>, state: InsnId },
Send { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, blockiseq: IseqPtr, args: Vec<InsnId>, state: InsnId },
- SendWithoutBlockDirect { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, iseq: IseqPtr, args: Vec<InsnId>, state: InsnId },
+ SendWithoutBlockDirect {
+ self_val: InsnId,
+ call_info: CallInfo,
+ cd: *const rb_call_data,
+ cme: *const rb_callable_method_entry_t,
+ iseq: IseqPtr,
+ args: Vec<InsnId>,
+ state: InsnId,
+ },
/// Control flow instructions
Return { val: InsnId },
@@ -957,10 +965,11 @@ impl Function {
args: args.iter().map(|arg| find!(*arg)).collect(),
state: *state,
},
- SendWithoutBlockDirect { self_val, call_info, cd, iseq, args, state } => SendWithoutBlockDirect {
+ SendWithoutBlockDirect { self_val, call_info, cd, cme, iseq, args, state } => SendWithoutBlockDirect {
self_val: find!(*self_val),
call_info: call_info.clone(),
cd: *cd,
+ cme: *cme,
iseq: *iseq,
args: args.iter().map(|arg| find!(*arg)).collect(),
state: *state,
@@ -1261,7 +1270,7 @@ impl Function {
if let Some(expected) = guard_equal_to {
self_val = self.push_insn(block, Insn::GuardBitEquals { val: self_val, expected, state });
}
- let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { self_val, call_info, cd, iseq, args, state });
+ let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { self_val, call_info, cd, cme, iseq, args, state });
self.make_equal_to(insn_id, send_direct);
}
Insn::GetConstantPath { ic } => {
diff --git a/zjit/src/state.rs b/zjit/src/state.rs
index e8c389a5f8..acaac850c3 100644
--- a/zjit/src/state.rs
+++ b/zjit/src/state.rs
@@ -1,10 +1,10 @@
-use std::collections::HashSet;
-
use crate::cruby::{self, rb_bug_panic_hook, EcPtr, Qnil, VALUE};
use crate::cruby_methods;
use crate::invariants::Invariants;
use crate::options::Options;
use crate::asm::CodeBlock;
+use crate::backend::lir::{Assembler, C_RET_OPND};
+use crate::virtualmem::CodePtr;
#[allow(non_upper_case_globals)]
#[unsafe(no_mangle)]
@@ -32,8 +32,8 @@ pub struct ZJITState {
/// Properties of core library methods
method_annotations: cruby_methods::Annotations,
- /// The address of the instruction that JIT-to-JIT calls return to
- iseq_return_addrs: HashSet<*const u8>,
+ /// Trampoline to propagate a callee's side exit to the caller
+ exit_trampoline: Option<CodePtr>,
}
/// Private singleton instance of the codegen globals
@@ -88,9 +88,14 @@ impl ZJITState {
invariants: Invariants::default(),
assert_compiles: false,
method_annotations: cruby_methods::init(),
- iseq_return_addrs: HashSet::new(),
+ exit_trampoline: None,
};
unsafe { ZJIT_STATE = Some(zjit_state); }
+
+ // Generate trampolines after initializing ZJITState, which Assembler will use
+ let cb = ZJITState::get_code_block();
+ let exit_trampoline = Self::gen_exit_trampoline(cb).unwrap();
+ ZJITState::get_instance().exit_trampoline = Some(exit_trampoline);
}
/// Return true if zjit_state has been initialized
@@ -133,14 +138,17 @@ impl ZJITState {
instance.assert_compiles = true;
}
- /// Record an address that a JIT-to-JIT call returns to
- pub fn add_iseq_return_addr(addr: *const u8) {
- ZJITState::get_instance().iseq_return_addrs.insert(addr);
+ /// Generate a trampoline to propagate a callee's side exit to the caller
+ fn gen_exit_trampoline(cb: &mut CodeBlock) -> Option<CodePtr> {
+ let mut asm = Assembler::new();
+ asm.frame_teardown();
+ asm.cret(C_RET_OPND);
+ asm.compile(cb).map(|(start_ptr, _)| start_ptr)
}
- /// Returns true if a JIT-to-JIT call returns to a given address
- pub fn is_iseq_return_addr(addr: *const u8) -> bool {
- ZJITState::get_instance().iseq_return_addrs.contains(&addr)
+ /// Get the trampoline to propagate a callee's side exit to the caller
+ pub fn get_exit_trampoline() -> CodePtr {
+ ZJITState::get_instance().exit_trampoline.unwrap()
}
}