diff options
146 files changed, 2332 insertions, 1204 deletions
@@ -185,8 +185,8 @@ define rp print (struct RBasic *)($arg0) else if ($flags & RUBY_T_MASK) == RUBY_T_DATA - if ((struct RTypedData *)($arg0))->typed_flag == 1 - printf "%sT_DATA%s(%s): ", $color_type, $color_end, ((struct RTypedData *)($arg0))->type->wrap_struct_name + if ((struct RTypedData *)($arg0))->type & 1 + printf "%sT_DATA%s(%s): ", $color_type, $color_end, ((const rb_data_type_t *)(((struct RTypedData *)($arg0))->type & ~1))->wrap_struct_name print (struct RTypedData *)($arg0) else printf "%sT_DATA%s: ", $color_type, $color_end diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6c8f09660d..e0719118b4 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -71,7 +71,7 @@ jobs: bundler: none windows-toolchain: none - - name: Install libraries with scoop + - name: Install tools with scoop run: | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser iwr -useb get.scoop.sh | iex @@ -123,7 +123,7 @@ jobs: - name: Restore vcpkg artifact uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: - path: C:\Users\runneradmin\AppData\Local\vcpkg\archives + path: ${{ github.workspace }}/src/vcpkg_installed key: windows-${{ matrix.os }}-vcpkg-${{ hashFiles('src/vcpkg.json') }} - name: Install libraries with vcpkg @@ -14,6 +14,26 @@ Note that each entry is kept to a minimum, see links for details. Note: We're only listing outstanding class updates. +* Kernel + + * `Kernel#inspect` now checks for the existence of a `#instance_variables_to_inspect` method, + allowing control over which instance variables are displayed in the `#inspect` string: + + ```ruby + class DatabaseConfig + def initialize(host, user, password) + @host = host + @user = user + @password = password + end + + private def instance_variables_to_inspect = [:@host, :@user] + end + + conf = DatabaseConfig.new("localhost", "root", "hunter2") + conf.inspect #=> #<DatabaseConfig:0x0000000104def350 @host="localhost", @user="root"> + ``` + * Binding * `Binding#local_variables` does no longer include numbered parameters. diff --git a/benchmark/nilclass.yml b/benchmark/nilclass.yml index da66e71068..66234c4cdf 100644 --- a/benchmark/nilclass.yml +++ b/benchmark/nilclass.yml @@ -1,10 +1,16 @@ prelude: | def a = nil benchmark: + rationalize: + nil.rationalize + to_c: | + nil.to_c to_i: | nil.to_i to_f: | nil.to_f + to_r: | + nil.to_r splat: | a(*nil) loop_count: 100000 @@ -297,16 +297,8 @@ rb_class_duplicate_classext(rb_classext_t *orig, VALUE klass, const rb_namespace RCLASSEXT_M_TBL(ext) = duplicate_classext_m_tbl(RCLASSEXT_M_TBL(orig), klass, dup_iclass); - // TODO: consider shapes for performance - if (RCLASSEXT_FIELDS(orig)) { - RUBY_ASSERT(!RB_TYPE_P(klass, T_ICLASS)); - RCLASSEXT_FIELDS(ext) = (VALUE *)st_copy((st_table *)RCLASSEXT_FIELDS(orig)); - rb_autoload_copy_table_for_namespace((st_table *)RCLASSEXT_FIELDS(ext), ns); - } - else { - if (!RB_TYPE_P(klass, T_ICLASS)) { - RCLASSEXT_FIELDS(ext) = (VALUE *)st_init_numtable(); - } + if (orig->fields_obj) { + RB_OBJ_WRITE(klass, &ext->fields_obj, rb_imemo_class_fields_clone(orig->fields_obj)); } if (RCLASSEXT_SHARED_CONST_TBL(orig)) { @@ -1562,18 +1562,6 @@ extract-gems$(sequential): PHONY extract-gems$(sequential): $(HAVE_GIT:yes=clone-bundled-gems-src) -clone-bundled-gems-src: PHONY - $(Q) $(BASERUBY) -C "$(srcdir)" \ - -Itool/lib -rbundled_gem -answ \ - -e 'BEGIN {git = $$git}' \ - -e 'gem, _, repo, rev = *$$F' \ - -e 'next if !rev or /^#/=~gem' \ - -e 'gemdir = "gems/src/#{gem}"' \ - -e 'BundledGem.checkout(gemdir, repo, rev, git: git)' \ - -e 'BundledGem.dummy_gemspec("#{gemdir}/#{gem}.gemspec")' \ - -- -git="$(GIT)" \ - gems/bundled_gems - outdate-bundled-gems: PHONY $(Q) $(BASERUBY) $(tooldir)/[email protected] --make="$(MAKE)" --mflags="$(MFLAGS)" \ --ruby-platform=$(arch) --ruby-version=$(ruby_version) \ @@ -1623,7 +1611,8 @@ yes-install-for-test-bundled-gems: yes-update-default-gemspecs "sinatra" "rack" "tilt" "mustermann" "base64" "compact_index" "rack-test" "logger" "kpeg" "tracer" test-bundled-gems-fetch: yes-test-bundled-gems-fetch -yes-test-bundled-gems-fetch: +yes-test-bundled-gems-fetch: clone-bundled-gems-src +clone-bundled-gems-src: PHONY $(Q) $(BASERUBY) -C $(srcdir)/gems ../tool/fetch-bundled_gems.rb BUNDLED_GEMS="$(BUNDLED_GEMS)" src bundled_gems no-test-bundled-gems-fetch: @@ -1696,8 +1685,7 @@ test-bundler: $(TEST_RUNNABLE)-test-bundler yes-test-bundler: $(PREPARE_BUNDLER) $(gnumake_recursive)$(XRUBY) \ -r./$(arch)-fake \ - -e "exec(*ARGV)" -- \ - $(XRUBY) -C $(srcdir) -Ispec/bundler -Ispec/lib .bundle/bin/rspec \ + -C $(srcdir) -Ispec/bundler -Ispec/lib spec/bin/rspec \ -r spec_helper $(RSPECOPTS) spec/bundler/$(BUNDLER_SPECS) no-test-bundler: @@ -8129,6 +8117,7 @@ imemo.$(OBJEXT): $(top_srcdir)/internal/namespace.h imemo.$(OBJEXT): $(top_srcdir)/internal/sanitizers.h imemo.$(OBJEXT): $(top_srcdir)/internal/serial.h imemo.$(OBJEXT): $(top_srcdir)/internal/set_table.h +imemo.$(OBJEXT): $(top_srcdir)/internal/st.h imemo.$(OBJEXT): $(top_srcdir)/internal/static_assert.h imemo.$(OBJEXT): $(top_srcdir)/internal/variable.h imemo.$(OBJEXT): $(top_srcdir)/internal/vm.h @@ -15130,6 +15119,8 @@ re.$(OBJEXT): {$(VPATH)}missing.h re.$(OBJEXT): {$(VPATH)}node.h re.$(OBJEXT): {$(VPATH)}onigmo.h re.$(OBJEXT): {$(VPATH)}oniguruma.h +re.$(OBJEXT): {$(VPATH)}ractor.h +re.$(OBJEXT): {$(VPATH)}ractor_core.h re.$(OBJEXT): {$(VPATH)}re.c re.$(OBJEXT): {$(VPATH)}re.h re.$(OBJEXT): {$(VPATH)}regenc.h @@ -15145,6 +15136,7 @@ re.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h re.$(OBJEXT): {$(VPATH)}thread_native.h re.$(OBJEXT): {$(VPATH)}util.h re.$(OBJEXT): {$(VPATH)}vm_core.h +re.$(OBJEXT): {$(VPATH)}vm_debug.h re.$(OBJEXT): {$(VPATH)}vm_opts.h regcomp.$(OBJEXT): $(hdrdir)/ruby.h regcomp.$(OBJEXT): $(hdrdir)/ruby/ruby.h @@ -1927,21 +1927,6 @@ nucomp_to_c(VALUE self) /* * call-seq: - * to_c -> (0+0i) - * - * Returns zero as a Complex: - * - * nil.to_c # => (0+0i) - * - */ -static VALUE -nilclass_to_c(VALUE self) -{ - return rb_complex_new1(INT2FIX(0)); -} - -/* - * call-seq: * to_c -> complex * * Returns +self+ as a Complex object. @@ -2693,7 +2678,6 @@ Init_Complex(void) rb_define_method(rb_cComplex, "to_r", nucomp_to_r, 0); rb_define_method(rb_cComplex, "rationalize", nucomp_rationalize, -1); rb_define_method(rb_cComplex, "to_c", nucomp_to_c, 0); - rb_define_method(rb_cNilClass, "to_c", nilclass_to_c, 0); rb_define_method(rb_cNumeric, "to_c", numeric_to_c, 0); rb_define_method(rb_cString, "to_c", string_to_c, 0); diff --git a/debug_counter.h b/debug_counter.h index c4ee26534f..3142ada0c3 100644 --- a/debug_counter.h +++ b/debug_counter.h @@ -315,6 +315,7 @@ RB_DEBUG_COUNTER(obj_imemo_parser_strterm) RB_DEBUG_COUNTER(obj_imemo_callinfo) RB_DEBUG_COUNTER(obj_imemo_callcache) RB_DEBUG_COUNTER(obj_imemo_constcache) +RB_DEBUG_COUNTER(obj_imemo_class_fields) RB_DEBUG_COUNTER(opt_new_hit) RB_DEBUG_COUNTER(opt_new_miss) diff --git a/defs/gmake.mk b/defs/gmake.mk index 87fc8021b2..a81d82eadd 100644 --- a/defs/gmake.mk +++ b/defs/gmake.mk @@ -365,7 +365,7 @@ $(srcdir)/.bundle/.timestamp: define build-gem $(srcdir)/gems/src/$(1)/.git: | $(srcdir)/gems/src $(ECHO) Cloning $(4) - $(Q) $(GIT) clone $(4) $$(@D) + $(Q) $(GIT) clone --depth=1 --no-tags $(4) $$(@D) $(bundled-gem-revision): \ $(if $(if $(wildcard $$(@)),$(filter $(3),$(shell cat $$(@)))),,PHONY) \ diff --git a/doc/globals.rdoc b/doc/globals.rdoc index 9d9fc57e6e..9466005be7 100644 --- a/doc/globals.rdoc +++ b/doc/globals.rdoc @@ -137,7 +137,7 @@ English - <tt>$DEFAULT_INPUT</tt>. An output stream, initially <tt>$stdout</tt>. -English - <tt>$DEFAULT_OUTPUT +English - <tt>$DEFAULT_OUTPUT</tt> === <tt>$.</tt> (Input Position) diff --git a/enumerator.c b/enumerator.c index faaa77cb49..b91b2eb940 100644 --- a/enumerator.c +++ b/enumerator.c @@ -162,10 +162,9 @@ */ VALUE rb_cEnumerator; static VALUE rb_cLazy; -static ID id_rewind, id_new, id_to_enum, id_each_entry; +static ID id_rewind, id_to_enum, id_each_entry; static ID id_next, id_result, id_receiver, id_arguments, id_memo, id_method, id_force; -static ID id_begin, id_end, id_step, id_exclude_end; -static VALUE sym_each, sym_cycle, sym_yield; +static VALUE sym_each, sym_yield; static VALUE lazy_use_super_method; @@ -3748,6 +3747,55 @@ enumerator_s_product(int argc, VALUE *argv, VALUE klass) return obj; } +struct arith_seq { + struct enumerator enumerator; + VALUE begin; + VALUE end; + VALUE step; + bool exclude_end; +}; + +RUBY_REFERENCES(arith_seq_refs) = { + RUBY_REF_EDGE(struct enumerator, obj), + RUBY_REF_EDGE(struct enumerator, args), + RUBY_REF_EDGE(struct enumerator, fib), + RUBY_REF_EDGE(struct enumerator, dst), + RUBY_REF_EDGE(struct enumerator, lookahead), + RUBY_REF_EDGE(struct enumerator, feedvalue), + RUBY_REF_EDGE(struct enumerator, stop_exc), + RUBY_REF_EDGE(struct enumerator, size), + RUBY_REF_EDGE(struct enumerator, procs), + + RUBY_REF_EDGE(struct arith_seq, begin), + RUBY_REF_EDGE(struct arith_seq, end), + RUBY_REF_EDGE(struct arith_seq, step), + RUBY_REF_END +}; + +static const rb_data_type_t arith_seq_data_type = { + "arithmetic_sequence", + { + RUBY_REFS_LIST_PTR(arith_seq_refs), + RUBY_TYPED_DEFAULT_FREE, + NULL, // Nothing allocated externally, so don't need a memsize function + NULL, + }, + .parent = &enumerator_data_type, + .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_DECL_MARKING | RUBY_TYPED_EMBEDDABLE +}; + +static VALUE +arith_seq_allocate(VALUE klass) +{ + struct arith_seq *ptr; + VALUE enum_obj; + + enum_obj = TypedData_Make_Struct(klass, struct arith_seq, &arith_seq_data_type, ptr); + ptr->enumerator.obj = Qundef; + + return enum_obj; +} + /* * Document-class: Enumerator::ArithmeticSequence * @@ -3765,12 +3813,16 @@ rb_arith_seq_new(VALUE obj, VALUE meth, int argc, VALUE const *argv, rb_enumerator_size_func *size_fn, VALUE beg, VALUE end, VALUE step, int excl) { - VALUE aseq = enumerator_init(enumerator_allocate(rb_cArithSeq), + VALUE aseq = enumerator_init(arith_seq_allocate(rb_cArithSeq), obj, meth, argc, argv, size_fn, Qnil, rb_keyword_given_p()); - rb_ivar_set(aseq, id_begin, beg); - rb_ivar_set(aseq, id_end, end); - rb_ivar_set(aseq, id_step, step); - rb_ivar_set(aseq, id_exclude_end, RBOOL(excl)); + struct arith_seq *ptr; + TypedData_Get_Struct(aseq, struct arith_seq, &enumerator_data_type, ptr); + + RB_OBJ_WRITE(aseq, &ptr->begin, beg); + RB_OBJ_WRITE(aseq, &ptr->end, end); + RB_OBJ_WRITE(aseq, &ptr->step, step); + ptr->exclude_end = excl; + return aseq; } @@ -3783,7 +3835,9 @@ rb_arith_seq_new(VALUE obj, VALUE meth, int argc, VALUE const *argv, static inline VALUE arith_seq_begin(VALUE self) { - return rb_ivar_get(self, id_begin); + struct arith_seq *ptr; + TypedData_Get_Struct(self, struct arith_seq, &enumerator_data_type, ptr); + return ptr->begin; } /* @@ -3794,7 +3848,9 @@ arith_seq_begin(VALUE self) static inline VALUE arith_seq_end(VALUE self) { - return rb_ivar_get(self, id_end); + struct arith_seq *ptr; + TypedData_Get_Struct(self, struct arith_seq, &enumerator_data_type, ptr); + return ptr->end; } /* @@ -3806,7 +3862,9 @@ arith_seq_end(VALUE self) static inline VALUE arith_seq_step(VALUE self) { - return rb_ivar_get(self, id_step); + struct arith_seq *ptr; + TypedData_Get_Struct(self, struct arith_seq, &enumerator_data_type, ptr); + return ptr->step; } /* @@ -3817,13 +3875,17 @@ arith_seq_step(VALUE self) static inline VALUE arith_seq_exclude_end(VALUE self) { - return rb_ivar_get(self, id_exclude_end); + struct arith_seq *ptr; + TypedData_Get_Struct(self, struct arith_seq, &enumerator_data_type, ptr); + return RBOOL(ptr->exclude_end); } static inline int arith_seq_exclude_end_p(VALUE self) { - return RTEST(arith_seq_exclude_end(self)); + struct arith_seq *ptr; + TypedData_Get_Struct(self, struct arith_seq, &enumerator_data_type, ptr); + return ptr->exclude_end; } int @@ -4664,7 +4726,6 @@ void Init_Enumerator(void) { id_rewind = rb_intern_const("rewind"); - id_new = rb_intern_const("new"); id_next = rb_intern_const("next"); id_result = rb_intern_const("result"); id_receiver = rb_intern_const("receiver"); @@ -4674,12 +4735,7 @@ Init_Enumerator(void) id_force = rb_intern_const("force"); id_to_enum = rb_intern_const("to_enum"); id_each_entry = rb_intern_const("each_entry"); - id_begin = rb_intern_const("begin"); - id_end = rb_intern_const("end"); - id_step = rb_intern_const("step"); - id_exclude_end = rb_intern_const("exclude_end"); sym_each = ID2SYM(id_each); - sym_cycle = ID2SYM(rb_intern_const("cycle")); sym_yield = ID2SYM(rb_intern_const("yield")); InitVM(Enumerator); diff --git a/ext/date/zonetab.h b/ext/date/zonetab.h index 4682c2cdbc..2a2e8910c9 100644 --- a/ext/date/zonetab.h +++ b/ext/date/zonetab.h @@ -1,4 +1,4 @@ -/* ANSI-C code produced by gperf version 3.3 */ +/* ANSI-C code produced by gperf version 3.1 */ /* Command-line: gperf --ignore-case -L ANSI-C -C -c -P -p -j1 -i 1 -g -o -t -N zonetab zonetab.list */ /* Computed positions: -k'1-4,9' */ @@ -51,7 +51,7 @@ struct zone; #ifndef GPERF_DOWNCASE #define GPERF_DOWNCASE 1 -static const unsigned char gperf_downcase[256] = +static unsigned char gperf_downcase[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, @@ -144,11 +144,6 @@ hash (register const char *str, register size_t len) { default: hval += asso_values[(unsigned char)str[8]]; -#if (defined __cplusplus && (__cplusplus >= 201703L || (__cplusplus >= 201103L && defined __clang__ && __clang_major__ + (__clang_minor__ >= 9) > 3))) || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 202000L && ((defined __GNUC__ && __GNUC__ >= 10) || (defined __clang__ && __clang_major__ >= 9))) - [[fallthrough]]; -#elif (defined __GNUC__ && __GNUC__ >= 7) || (defined __clang__ && __clang_major__ >= 10) - __attribute__ ((__fallthrough__)); -#endif /*FALLTHROUGH*/ case 8: case 7: @@ -156,27 +151,12 @@ hash (register const char *str, register size_t len) case 5: case 4: hval += asso_values[(unsigned char)str[3]]; -#if (defined __cplusplus && (__cplusplus >= 201703L || (__cplusplus >= 201103L && defined __clang__ && __clang_major__ + (__clang_minor__ >= 9) > 3))) || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 202000L && ((defined __GNUC__ && __GNUC__ >= 10) || (defined __clang__ && __clang_major__ >= 9))) - [[fallthrough]]; -#elif (defined __GNUC__ && __GNUC__ >= 7) || (defined __clang__ && __clang_major__ >= 10) - __attribute__ ((__fallthrough__)); -#endif /*FALLTHROUGH*/ case 3: hval += asso_values[(unsigned char)str[2]]; -#if (defined __cplusplus && (__cplusplus >= 201703L || (__cplusplus >= 201103L && defined __clang__ && __clang_major__ + (__clang_minor__ >= 9) > 3))) || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 202000L && ((defined __GNUC__ && __GNUC__ >= 10) || (defined __clang__ && __clang_major__ >= 9))) - [[fallthrough]]; -#elif (defined __GNUC__ && __GNUC__ >= 7) || (defined __clang__ && __clang_major__ >= 10) - __attribute__ ((__fallthrough__)); -#endif /*FALLTHROUGH*/ case 2: hval += asso_values[(unsigned char)str[1]+6]; -#if (defined __cplusplus && (__cplusplus >= 201703L || (__cplusplus >= 201103L && defined __clang__ && __clang_major__ + (__clang_minor__ >= 9) > 3))) || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 202000L && ((defined __GNUC__ && __GNUC__ >= 10) || (defined __clang__ && __clang_major__ >= 9))) - [[fallthrough]]; -#elif (defined __GNUC__ && __GNUC__ >= 7) || (defined __clang__ && __clang_major__ >= 10) - __attribute__ ((__fallthrough__)); -#endif /*FALLTHROUGH*/ case 1: hval += asso_values[(unsigned char)str[0]+52]; @@ -827,10 +807,6 @@ static const struct stringpool_t stringpool_contents = const struct zone * zonetab (register const char *str, register size_t len) { -#if (defined __GNUC__ && __GNUC__ + (__GNUC_MINOR__ >= 6) > 4) || (defined __clang__ && __clang_major__ >= 3) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmissing-field-initializers" -#endif static const struct zone wordlist[] = { {-1}, {-1}, @@ -1565,9 +1541,6 @@ zonetab (register const char *str, register size_t len) #line 141 "zonetab.list" {(int)(size_t)&((struct stringpool_t *)0)->stringpool_str619, -10800} }; -#if (defined __GNUC__ && __GNUC__ + (__GNUC_MINOR__ >= 6) > 4) || (defined __clang__ && __clang_major__ >= 3) -#pragma GCC diagnostic pop -#endif if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) { @@ -1585,7 +1558,7 @@ zonetab (register const char *str, register size_t len) } } } - return (struct zone *) 0; + return 0; } #line 330 "zonetab.list" diff --git a/ext/objspace/objspace.c b/ext/objspace/objspace.c index acd4a6864d..754c998ac6 100644 --- a/ext/objspace/objspace.c +++ b/ext/objspace/objspace.c @@ -504,6 +504,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self) INIT_IMEMO_TYPE_ID(imemo_callinfo); INIT_IMEMO_TYPE_ID(imemo_callcache); INIT_IMEMO_TYPE_ID(imemo_constcache); + INIT_IMEMO_TYPE_ID(imemo_class_fields); #undef INIT_IMEMO_TYPE_ID } diff --git a/ext/objspace/objspace_dump.c b/ext/objspace/objspace_dump.c index ac8bafaea9..83b434c3a1 100644 --- a/ext/objspace/objspace_dump.c +++ b/ext/objspace/objspace_dump.c @@ -792,22 +792,21 @@ shape_id_i(shape_id_t shape_id, void *data) return; } - rb_shape_t *shape = RSHAPE(shape_id); dump_append(dc, "{\"address\":"); - dump_append_ref(dc, (VALUE)shape); + dump_append_ref(dc, (VALUE)RSHAPE(shape_id)); dump_append(dc, ", \"type\":\"SHAPE\", \"id\":"); dump_append_sizet(dc, shape_id); - if (shape->type != SHAPE_ROOT) { + if (RSHAPE_TYPE(shape_id) != SHAPE_ROOT) { dump_append(dc, ", \"parent_id\":"); - dump_append_lu(dc, shape->parent_id); + dump_append_lu(dc, RSHAPE_PARENT(shape_id)); } dump_append(dc, ", \"depth\":"); dump_append_sizet(dc, rb_shape_depth(shape_id)); - switch((enum shape_type)shape->type) { + switch (RSHAPE_TYPE(shape_id)) { case SHAPE_ROOT: dump_append(dc, ", \"shape_type\":\"ROOT\""); break; @@ -815,7 +814,7 @@ shape_id_i(shape_id_t shape_id, void *data) dump_append(dc, ", \"shape_type\":\"IVAR\""); dump_append(dc, ",\"edge_name\":"); - dump_append_id(dc, shape->edge_name); + dump_append_id(dc, RSHAPE_EDGE_NAME(shape_id)); break; case SHAPE_OBJ_ID: diff --git a/ext/strscan/extconf.rb b/ext/strscan/extconf.rb index bd65606a4e..abcbdb3ad2 100644 --- a/ext/strscan/extconf.rb +++ b/ext/strscan/extconf.rb @@ -2,8 +2,8 @@ require 'mkmf' if RUBY_ENGINE == 'ruby' $INCFLAGS << " -I$(top_srcdir)" if $extmk - have_func("onig_region_memsize", "ruby.h") - have_func("rb_reg_onig_match", "ruby.h") + have_func("onig_region_memsize") + have_func("rb_reg_onig_match", "ruby/re.h") create_makefile 'strscan' else File.write('Makefile', dummy_makefile("").join) @@ -543,7 +543,7 @@ rb_stat_new(const struct stat *st) if (st) { #if RUBY_USE_STATX # define CP(m) .stx_ ## m = st->st_ ## m -# define CP_32(m) .stx_ ## m = (__u32)st->st_ ## m +# define CP_32(m) .stx_ ## m = (uint32_t)st->st_ ## m # define CP_TS(m) .stx_ ## m = stat_ ## m ## spec(st) rb_st->stat = (struct statx){ .stx_mask = STATX_BASIC_STATS, @@ -131,45 +131,45 @@ #include "shape.h" unsigned int -rb_gc_vm_lock(void) +rb_gc_vm_lock(const char *file, int line) { unsigned int lev = 0; - RB_VM_LOCK_ENTER_LEV(&lev); + rb_vm_lock_enter(&lev, file, line); return lev; } void -rb_gc_vm_unlock(unsigned int lev) +rb_gc_vm_unlock(unsigned int lev, const char *file, int line) { - RB_VM_LOCK_LEAVE_LEV(&lev); + rb_vm_lock_leave(&lev, file, line); } unsigned int -rb_gc_cr_lock(void) +rb_gc_cr_lock(const char *file, int line) { unsigned int lev; - RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev); + rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line); return lev; } void -rb_gc_cr_unlock(unsigned int lev) +rb_gc_cr_unlock(unsigned int lev, const char *file, int line) { - RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev); + rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line); } unsigned int -rb_gc_vm_lock_no_barrier(void) +rb_gc_vm_lock_no_barrier(const char *file, int line) { unsigned int lev = 0; - RB_VM_LOCK_ENTER_LEV_NB(&lev); + rb_vm_lock_enter_nb(&lev, file, line); return lev; } void -rb_gc_vm_unlock_no_barrier(unsigned int lev) +rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line) { - RB_VM_LOCK_LEAVE_LEV_NB(&lev); + rb_vm_lock_leave_nb(&lev, file, line); } void @@ -1201,7 +1201,6 @@ rb_data_free(void *objspace, VALUE obj) struct classext_foreach_args { VALUE klass; - bool obj_too_complex; rb_objspace_t *objspace; // used for update_* }; @@ -1213,12 +1212,6 @@ classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg) rb_id_table_free(RCLASSEXT_M_TBL(ext)); rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass); - if (args->obj_too_complex) { - st_free_table((st_table *)RCLASSEXT_FIELDS(ext)); - } - else { - xfree(RCLASSEXT_FIELDS(ext)); - } if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) { rb_free_const_table(tbl); } @@ -1292,8 +1285,6 @@ rb_gc_obj_free(void *objspace, VALUE obj) case T_MODULE: case T_CLASS: args.klass = obj; - args.obj_too_complex = rb_shape_obj_too_complex_p(obj) ? true : false; - rb_class_classext_foreach(obj, classext_free, (void *)&args); if (RCLASS(obj)->ns_classext_tbl) { st_free_table(RCLASS(obj)->ns_classext_tbl); @@ -1783,9 +1774,9 @@ generate_next_object_id(void) // 64bit atomics are available return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT); #else - unsigned int lock_lev = rb_gc_vm_lock(); + unsigned int lock_lev = RB_GC_VM_LOCK(); VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT); - rb_gc_vm_unlock(lock_lev); + RB_GC_VM_UNLOCK(lock_lev); return id; #endif } @@ -1867,7 +1858,7 @@ class_object_id(VALUE klass) { VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id); if (!id) { - unsigned int lock_lev = rb_gc_vm_lock(); + unsigned int lock_lev = RB_GC_VM_LOCK(); id = generate_next_object_id(); VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id); if (existing_id) { @@ -1876,7 +1867,7 @@ class_object_id(VALUE klass) else if (RB_UNLIKELY(id2ref_tbl)) { st_insert(id2ref_tbl, id, klass); } - rb_gc_vm_unlock(lock_lev); + RB_GC_VM_UNLOCK(lock_lev); } return id; } @@ -1946,9 +1937,9 @@ object_id(VALUE obj) } if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) { - unsigned int lock_lev = rb_gc_vm_lock(); + unsigned int lock_lev = RB_GC_VM_LOCK(); VALUE id = object_id0(obj); - rb_gc_vm_unlock(lock_lev); + RB_GC_VM_UNLOCK(lock_lev); return id; } @@ -1983,7 +1974,7 @@ object_id_to_ref(void *objspace_ptr, VALUE object_id) { rb_objspace_t *objspace = objspace_ptr; - unsigned int lev = rb_gc_vm_lock(); + unsigned int lev = RB_GC_VM_LOCK(); if (!id2ref_tbl) { rb_gc_vm_barrier(); // stop other ractors @@ -2007,7 +1998,7 @@ object_id_to_ref(void *objspace_ptr, VALUE object_id) VALUE obj; bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj); - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); if (found) { return obj; @@ -2306,18 +2297,6 @@ classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg) } static void -classext_fields_hash_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg) -{ - size_t *size = (size_t *)arg; - size_t count; - RB_VM_LOCKING() { - count = rb_st_table_size((st_table *)RCLASSEXT_FIELDS(ext)); - } - // class IV sizes are allocated as powers of two - *size += SIZEOF_VALUE << bit_length(count); -} - -static void classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg) { size_t *size = (size_t *)arg; @@ -2354,15 +2333,6 @@ rb_obj_memsize_of(VALUE obj) case T_MODULE: case T_CLASS: rb_class_classext_foreach(obj, classext_memsize, (void *)&size); - - if (rb_shape_obj_too_complex_p(obj)) { - rb_class_classext_foreach(obj, classext_fields_hash_memsize, (void *)&size); - } - else { - // class IV sizes are allocated as powers of two - size += SIZEOF_VALUE << bit_length(RCLASS_FIELDS_COUNT(obj)); - } - rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size); break; case T_ICLASS: @@ -3135,10 +3105,7 @@ gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *a gc_mark_internal(RCLASSEXT_SUPER(ext)); } mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext)); - if (rb_shape_obj_too_complex_p(obj)) { - gc_mark_tbl_no_pin((st_table *)RCLASSEXT_FIELDS(ext)); - // for the case ELSE is written in rb_gc_mark_children() because it's per RClass, not classext - } + gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext)); if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) { mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext)); } @@ -3218,12 +3185,6 @@ rb_gc_mark_children(void *objspace, VALUE obj) foreach_args.objspace = objspace; foreach_args.obj = obj; rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args); - - if (!rb_shape_obj_too_complex_p(obj)) { - for (attr_index_t i = 0; i < RCLASS_FIELDS_COUNT(obj); i++) { - gc_mark_internal(RCLASS_PRIME_FIELDS(obj)[i]); - } - } break; case T_ICLASS: @@ -3849,7 +3810,6 @@ static void update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg) { struct classext_foreach_args *args = (struct classext_foreach_args *)arg; - VALUE klass = args->klass; rb_objspace_t *objspace = args->objspace; if (RCLASSEXT_SUPER(ext)) { @@ -3858,16 +3818,7 @@ update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg) update_m_tbl(objspace, RCLASSEXT_M_TBL(ext)); - if (args->obj_too_complex) { - gc_ref_update_table_values_only((st_table *)RCLASSEXT_FIELDS(ext)); - } - else { - // Classext is not copied in this case - for (attr_index_t i = 0; i < RCLASS_FIELDS_COUNT(klass); i++) { - UPDATE_IF_MOVED(objspace, RCLASSEXT_FIELDS(RCLASS_EXT_PRIME(klass))[i]); - } - } - + UPDATE_IF_MOVED(objspace, ext->fields_obj); if (!RCLASSEXT_SHARED_CONST_TBL(ext)) { update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext)); } @@ -4093,7 +4044,8 @@ vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data) ); } else { - for (uint32_t i = 0; i < fields_tbl->as.shape.fields_count; i++) { + uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID((VALUE)key)); + for (uint32_t i = 0; i < fields_count; i++) { if (SPECIAL_CONST_P(fields_tbl->as.shape.fields[i])) continue; int ivar_ret = iter_data->callback(fields_tbl->as.shape.fields[i], iter_data->data); @@ -4254,7 +4206,6 @@ rb_gc_update_object_references(void *objspace, VALUE obj) // Continue to the shared T_CLASS/T_MODULE case T_MODULE: args.klass = obj; - args.obj_too_complex = rb_shape_obj_too_complex_p(obj); args.objspace = objspace; rb_class_classext_foreach(obj, update_classext, (void *)&args); break; diff --git a/gc/README.md b/gc/README.md index 102b24e24e..cb71357973 100644 --- a/gc/README.md +++ b/gc/README.md @@ -15,12 +15,17 @@ Two GC implementations are included in Ruby: > [!IMPORTANT] > Ruby's modular GC feature is experimental and subject to change. There may be bugs or performance impacts. Use at your own risk. +### Building Ruby with Modular GC + 1. Configure Ruby with the `--with-modular-gc=<dir>` option, where `dir` is the directory you want to place the built GC libraries into. 2. Build Ruby as usual. -3. Build your desired GC implementation with `make install-modular-gc MODULAR_GC=<impl>`. This will build the GC implementation and place the built library into the `dir` specified in step 1. `impl` can be one of: + +### Building GC implementations shipped with Ruby + +1. Build your desired GC implementation with `make install-modular-gc MODULAR_GC=<impl>`. This will build the GC implementation and place the built library into the `dir` specified in step 1. `impl` can be one of: - `default`: The default GC that Ruby ships with. - `mmtk`: The GC that uses [MMTk](https://www.mmtk.io/) as the back-end. See Ruby-specific details in the [ruby/mmtk](https://github.com/ruby/mmtk) repository. -4. Run your desired GC implementation by setting the `RUBY_GC_LIBRARY=<lib>` environment variable, where `lib` could be `default`, `mmtk`, or your own implementation (as long as you place it in the `dir` specified in step 1). +2. Run your desired GC implementation by setting the `RUBY_GC_LIBRARY=<lib>` environment variable, where `lib` could be `default`, `mmtk`, or your own implementation (as long as you place it in the `dir` specified in step 1). ## Modular GC API diff --git a/gc/default/default.c b/gc/default/default.c index 5664b3dd90..40d39d6f17 100644 --- a/gc/default/default.c +++ b/gc/default/default.c @@ -1229,7 +1229,7 @@ check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int ter { int err = 0; - int lev = rb_gc_vm_lock_no_barrier(); + int lev = RB_GC_VM_LOCK_NO_BARRIER(); { if (SPECIAL_CONST_P(obj)) { fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj); @@ -1319,7 +1319,7 @@ check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int ter } } } - rb_gc_vm_unlock_no_barrier(lev); + RB_GC_VM_UNLOCK_NO_BARRIER(lev); if (err > 0 && terminate) { rb_bug("check_rvalue_consistency_force: there is %d errors.", err); @@ -2140,7 +2140,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, #if RGENGC_CHECK_MODE newobj_fill(obj, 0, 0, 0); - int lev = rb_gc_vm_lock_no_barrier(); + int lev = RB_GC_VM_LOCK_NO_BARRIER(); { check_rvalue_consistency(objspace, obj); @@ -2151,7 +2151,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, if (RVALUE_REMEMBERED(objspace, obj)) rb_bug("newobj: %s is remembered.", rb_obj_info(obj)); } - rb_gc_vm_unlock_no_barrier(lev); + RB_GC_VM_UNLOCK_NO_BARRIER(lev); #endif if (RB_UNLIKELY(wb_protected == FALSE)) { @@ -2363,7 +2363,7 @@ newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size bool unlock_vm = false; if (!vm_locked) { - lev = rb_gc_cr_lock(); + lev = RB_GC_CR_LOCK(); unlock_vm = true; } @@ -2387,7 +2387,7 @@ newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size } if (unlock_vm) { - rb_gc_cr_unlock(lev); + RB_GC_CR_UNLOCK(lev); } if (RB_UNLIKELY(obj == Qfalse)) { @@ -2416,7 +2416,7 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_new VALUE obj; unsigned int lev; - lev = rb_gc_cr_lock(); + lev = RB_GC_CR_LOCK(); { if (RB_UNLIKELY(during_gc || ruby_gc_stressful)) { if (during_gc) { @@ -2438,7 +2438,7 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_new obj = newobj_alloc(objspace, cache, heap_idx, true); newobj_init(klass, flags, wb_protected, objspace, obj); } - rb_gc_cr_unlock(lev); + RB_GC_CR_UNLOCK(lev); return obj; } @@ -2753,7 +2753,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) RBASIC(obj)->flags |= FL_FINALIZE; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); if (st_lookup(finalizer_table, obj, &data)) { table = (VALUE)data; @@ -2766,7 +2766,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) for (i = 0; i < len; i++) { VALUE recv = RARRAY_AREF(table, i); if (rb_equal(recv, block)) { - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); return recv; } } @@ -2780,7 +2780,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) st_add_direct(finalizer_table, obj, table); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); return block; } @@ -2794,9 +2794,9 @@ rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj) st_data_t data = obj; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); st_delete(finalizer_table, &data, 0); - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); FL_UNSET(obj, FL_FINALIZE); } @@ -2810,7 +2810,7 @@ rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj) if (!FL_TEST(obj, FL_FINALIZE)) return; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) { table = rb_ary_dup((VALUE)data); RARRAY_ASET(table, 0, rb_obj_id(dest)); @@ -2820,7 +2820,7 @@ rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj) else { rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj)); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); } static VALUE @@ -2864,7 +2864,7 @@ finalize_list(rb_objspace_t *objspace, VALUE zombie) next_zombie = RZOMBIE(zombie)->next; page = GET_HEAP_PAGE(zombie); - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); run_final(objspace, zombie); { @@ -2878,7 +2878,7 @@ finalize_list(rb_objspace_t *objspace, VALUE zombie) heap_page_add_freeobj(objspace, page, zombie); page->heap->total_freed_objects++; } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); zombie = next_zombie; } @@ -3247,7 +3247,7 @@ read_barrier_handler(uintptr_t address) rb_bug("read_barrier_handler: segmentation fault at %p", (void *)address); } - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); { unlock_page_body(objspace, page_body); @@ -3255,7 +3255,7 @@ read_barrier_handler(uintptr_t address) invalidate_moved_page(objspace, GET_HEAP_PAGE(address)); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); } #endif @@ -5180,7 +5180,7 @@ gc_verify_internal_consistency(void *objspace_ptr) { rb_objspace_t *objspace = objspace_ptr; - unsigned int lev = rb_gc_vm_lock(); + unsigned int lev = RB_GC_VM_LOCK(); { rb_gc_vm_barrier(); // stop other ractors @@ -5191,7 +5191,7 @@ gc_verify_internal_consistency(void *objspace_ptr) } during_gc = prev_during_gc; } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); } static void @@ -5952,11 +5952,11 @@ gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace) /* mark `a' and remember (default behavior) */ if (!RVALUE_REMEMBERED(objspace, a)) { - int lev = rb_gc_vm_lock_no_barrier(); + int lev = RB_GC_VM_LOCK_NO_BARRIER(); { rgengc_remember(objspace, a); } - rb_gc_vm_unlock_no_barrier(lev); + RB_GC_VM_UNLOCK_NO_BARRIER(lev); gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b)); } @@ -6029,7 +6029,7 @@ rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b) else { bool retry = false; /* slow path */ - int lev = rb_gc_vm_lock_no_barrier(); + int lev = RB_GC_VM_LOCK_NO_BARRIER(); { if (is_incremental_marking(objspace)) { gc_writebarrier_incremental(a, b, objspace); @@ -6038,7 +6038,7 @@ rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b) retry = true; } } - rb_gc_vm_unlock_no_barrier(lev); + RB_GC_VM_UNLOCK_NO_BARRIER(lev); if (retry) goto retry; } @@ -6057,7 +6057,7 @@ rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj) gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj), RVALUE_REMEMBERED(objspace, obj) ? " (already remembered)" : ""); - unsigned int lev = rb_gc_vm_lock_no_barrier(); + unsigned int lev = RB_GC_VM_LOCK_NO_BARRIER(); { if (RVALUE_OLD_P(objspace, obj)) { gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj)); @@ -6079,7 +6079,7 @@ rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj) RB_DEBUG_COUNTER_INC(obj_wb_unprotect); MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj); } - rb_gc_vm_unlock_no_barrier(lev); + RB_GC_VM_UNLOCK_NO_BARRIER(lev); } } @@ -6292,7 +6292,7 @@ garbage_collect(rb_objspace_t *objspace, unsigned int reason) { int ret; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); { #if GC_PROFILE_MORE_DETAIL objspace->profile.prepare_time = getrusage_time(); @@ -6306,7 +6306,7 @@ garbage_collect(rb_objspace_t *objspace, unsigned int reason) ret = gc_start(objspace, reason); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); return ret; } @@ -6590,7 +6590,7 @@ gc_clock_end(struct timespec *ts) static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev) { - *lock_lev = rb_gc_vm_lock(); + *lock_lev = RB_GC_VM_LOCK(); switch (event) { case gc_enter_event_rest: @@ -6629,7 +6629,7 @@ gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_l gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace)); during_gc = FALSE; - rb_gc_vm_unlock(*lock_lev); + RB_GC_VM_UNLOCK(*lock_lev); } #ifndef MEASURE_GC @@ -9106,7 +9106,7 @@ gc_verify_compaction_references(int argc, VALUE* argv, VALUE self) /* Clear the heap. */ rb_gc_impl_start(objspace, true, true, true, false); - unsigned int lev = rb_gc_vm_lock(); + unsigned int lev = RB_GC_VM_LOCK(); { gc_rest(objspace); @@ -9162,7 +9162,7 @@ gc_verify_compaction_references(int argc, VALUE* argv, VALUE self) objspace->rcompactor.compare_func = compare_free_slots; } } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true); @@ -35,6 +35,13 @@ enum rb_gc_vm_weak_tables { RB_GC_VM_WEAK_TABLE_COUNT }; +#define RB_GC_VM_LOCK() rb_gc_vm_lock(__FILE__, __LINE__) +#define RB_GC_VM_UNLOCK(lev) rb_gc_vm_unlock(lev, __FILE__, __LINE__) +#define RB_GC_CR_LOCK() rb_gc_cr_lock(__FILE__, __LINE__) +#define RB_GC_CR_UNLOCK(lev) rb_gc_cr_unlock(lev, __FILE__, __LINE__) +#define RB_GC_VM_LOCK_NO_BARRIER() rb_gc_vm_lock_no_barrier(__FILE__, __LINE__) +#define RB_GC_VM_UNLOCK_NO_BARRIER(lev) rb_gc_vm_unlock_no_barrier(lev, __FILE__, __LINE__) + #if USE_MODULAR_GC # define MODULAR_GC_FN #else @@ -57,12 +64,12 @@ size_t rb_obj_memsize_of(VALUE obj); bool ruby_free_at_exit_p(void); void rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data); -MODULAR_GC_FN unsigned int rb_gc_vm_lock(void); -MODULAR_GC_FN void rb_gc_vm_unlock(unsigned int lev); -MODULAR_GC_FN unsigned int rb_gc_cr_lock(void); -MODULAR_GC_FN void rb_gc_cr_unlock(unsigned int lev); -MODULAR_GC_FN unsigned int rb_gc_vm_lock_no_barrier(void); -MODULAR_GC_FN void rb_gc_vm_unlock_no_barrier(unsigned int lev); +MODULAR_GC_FN unsigned int rb_gc_vm_lock(const char *file, int line); +MODULAR_GC_FN void rb_gc_vm_unlock(unsigned int lev, const char *file, int line); +MODULAR_GC_FN unsigned int rb_gc_cr_lock(const char *file, int line); +MODULAR_GC_FN void rb_gc_cr_unlock(unsigned int lev, const char *file, int line); +MODULAR_GC_FN unsigned int rb_gc_vm_lock_no_barrier(const char *file, int line); +MODULAR_GC_FN void rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line); MODULAR_GC_FN void rb_gc_vm_barrier(void); MODULAR_GC_FN size_t rb_gc_obj_optimal_size(VALUE obj); MODULAR_GC_FN void rb_gc_mark_children(void *objspace, VALUE obj); diff --git a/gc/mmtk/mmtk.c b/gc/mmtk/mmtk.c index 9e4ee9f3de..c318c6fe48 100644 --- a/gc/mmtk/mmtk.c +++ b/gc/mmtk/mmtk.c @@ -129,7 +129,7 @@ rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator) struct objspace *objspace = rb_gc_get_objspace(); size_t starting_gc_count = objspace->gc_count; - int lock_lev = rb_gc_vm_lock(); + int lock_lev = RB_GC_VM_LOCK(); int err; if ((err = pthread_mutex_lock(&objspace->mutex)) != 0) { rb_bug("ERROR: cannot lock objspace->mutex: %s", strerror(err)); @@ -173,7 +173,7 @@ rb_mmtk_block_for_gc(MMTk_VMMutatorThread mutator) if ((err = pthread_mutex_unlock(&objspace->mutex)) != 0) { rb_bug("ERROR: cannot release objspace->mutex: %s", strerror(err)); } - rb_gc_vm_unlock(lock_lev); + RB_GC_VM_UNLOCK(lock_lev); } static size_t @@ -927,7 +927,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) RBASIC(obj)->flags |= FL_FINALIZE; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); if (st_lookup(objspace->finalizer_table, obj, &data)) { table = (VALUE)data; @@ -940,7 +940,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) for (i = 0; i < len; i++) { VALUE recv = RARRAY_AREF(table, i); if (rb_equal(recv, block)) { - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); return recv; } } @@ -954,7 +954,7 @@ rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block) st_add_direct(objspace->finalizer_table, obj, table); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); return block; } @@ -966,9 +966,9 @@ rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj) st_data_t data = obj; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); st_delete(objspace->finalizer_table, &data, 0); - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); FL_UNSET(obj, FL_FINALIZE); } @@ -982,7 +982,7 @@ rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj) if (!FL_TEST(obj, FL_FINALIZE)) return; - int lev = rb_gc_vm_lock(); + int lev = RB_GC_VM_LOCK(); if (RB_LIKELY(st_lookup(objspace->finalizer_table, obj, &data))) { table = rb_ary_dup((VALUE)data); RARRAY_ASET(table, 0, rb_obj_id(dest)); @@ -992,7 +992,7 @@ rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj) else { rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj)); } - rb_gc_vm_unlock(lev); + RB_GC_VM_UNLOCK(lev); } static int @@ -3,6 +3,7 @@ #include "id_table.h" #include "internal.h" #include "internal/imemo.h" +#include "internal/st.h" #include "vm_callinfo.h" size_t rb_iseq_memsize(const rb_iseq_t *iseq); @@ -29,10 +30,10 @@ rb_imemo_name(enum imemo_type type) IMEMO_NAME(svar); IMEMO_NAME(throw_data); IMEMO_NAME(tmpbuf); + IMEMO_NAME(class_fields); #undef IMEMO_NAME - default: - rb_bug("unreachable"); } + rb_bug("unreachable"); } /* ========================================================================= @@ -109,6 +110,64 @@ rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt) return tmpbuf; } +static VALUE +imemo_class_fields_new(VALUE klass, size_t capa) +{ + size_t embedded_size = offsetof(struct rb_class_fields, as.embed) + capa * sizeof(VALUE); + if (rb_gc_size_allocatable_p(embedded_size)) { + VALUE fields = rb_imemo_new(imemo_class_fields, klass, embedded_size); + RUBY_ASSERT(IMEMO_TYPE_P(fields, imemo_class_fields)); + return fields; + } + else { + VALUE fields = rb_imemo_new(imemo_class_fields, klass, sizeof(struct rb_class_fields)); + FL_SET_RAW(fields, OBJ_FIELD_EXTERNAL); + IMEMO_OBJ_FIELDS(fields)->as.external.ptr = ALLOC_N(VALUE, capa); + return fields; + } +} + +VALUE +rb_imemo_class_fields_new(VALUE klass, size_t capa) +{ + return imemo_class_fields_new(rb_singleton_class(klass), capa); +} + +static VALUE +imemo_class_fields_new_complex(VALUE klass, size_t capa) +{ + VALUE fields = imemo_class_fields_new(klass, sizeof(struct rb_class_fields)); + IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(capa); + return fields; +} + +VALUE +rb_imemo_class_fields_new_complex(VALUE klass, size_t capa) +{ + return imemo_class_fields_new_complex(rb_singleton_class(klass), capa); +} + +VALUE +rb_imemo_class_fields_clone(VALUE fields_obj) +{ + shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj); + VALUE clone; + + if (rb_shape_too_complex_p(shape_id)) { + clone = rb_imemo_class_fields_new_complex(CLASS_OF(fields_obj), 0); + RBASIC_SET_SHAPE_ID(clone, shape_id); + st_table *src_table = rb_imemo_class_fields_complex_tbl(fields_obj); + st_replace(rb_imemo_class_fields_complex_tbl(clone), src_table); + } + else { + clone = imemo_class_fields_new(CLASS_OF(fields_obj), RSHAPE_CAPACITY(shape_id)); + RBASIC_SET_SHAPE_ID(clone, shape_id); + MEMCPY(rb_imemo_class_fields_ptr(clone), rb_imemo_class_fields_ptr(fields_obj), VALUE, RSHAPE_LEN(shape_id)); + } + + return clone; +} + /* ========================================================================= * memsize * ========================================================================= */ @@ -156,6 +215,14 @@ rb_imemo_memsize(VALUE obj) size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE); break; + case imemo_class_fields: + if (rb_shape_obj_too_complex_p(obj)) { + size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table); + } + else if (FL_TEST_RAW(obj, OBJ_FIELD_EXTERNAL)) { + size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) * sizeof(VALUE); + } + break; default: rb_bug("unreachable"); } @@ -420,6 +487,27 @@ rb_imemo_mark_and_move(VALUE obj, bool reference_updating) break; } + case imemo_class_fields: { + rb_gc_mark_and_move((VALUE *)&RBASIC(obj)->klass); + + if (rb_shape_obj_too_complex_p(obj)) { + st_table *tbl = rb_imemo_class_fields_complex_tbl(obj); + if (reference_updating) { + rb_gc_ref_update_table_values_only(tbl); + } + else { + rb_mark_tbl_no_pin(tbl); + } + } + else { + VALUE *fields = rb_imemo_class_fields_ptr(obj); + attr_index_t len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj)); + for (attr_index_t i = 0; i < len; i++) { + rb_gc_mark_and_move(&fields[i]); + } + } + break; + } default: rb_bug("unreachable"); } @@ -513,6 +601,17 @@ rb_cc_tbl_free(struct rb_id_table *cc_tbl, VALUE klass) rb_id_table_free(cc_tbl); } +static inline void +imemo_class_fields_free(struct rb_class_fields *fields) +{ + if (rb_shape_obj_too_complex_p((VALUE)fields)) { + st_free_table(fields->as.complex.table); + } + else if (FL_TEST_RAW((VALUE)fields, OBJ_FIELD_EXTERNAL)) { + xfree(fields->as.external.ptr); + } +} + void rb_imemo_free(VALUE obj) { @@ -576,6 +675,7 @@ rb_imemo_free(VALUE obj) break; case imemo_svar: RB_DEBUG_COUNTER_INC(obj_imemo_svar); + break; case imemo_throw_data: RB_DEBUG_COUNTER_INC(obj_imemo_throw_data); @@ -586,6 +686,10 @@ rb_imemo_free(VALUE obj) RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf); break; + case imemo_class_fields: + imemo_class_fields_free(IMEMO_OBJ_FIELDS(obj)); + RB_DEBUG_COUNTER_INC(obj_imemo_class_fields); + break; default: rb_bug("unreachable"); } diff --git a/internal/class.h b/internal/class.h index 620c7e9c53..2250d3f343 100644 --- a/internal/class.h +++ b/internal/class.h @@ -79,7 +79,7 @@ struct rb_cvar_class_tbl_entry { struct rb_classext_struct { const rb_namespace_t *ns; VALUE super; - VALUE *fields; // Fields are either ivar or other internal properties stored inline + VALUE fields_obj; // Fields are either ivar or other internal properties stored inline struct rb_id_table *m_tbl; struct rb_id_table *const_tbl; struct rb_id_table *callable_m_tbl; @@ -175,7 +175,8 @@ static inline rb_classext_t * RCLASS_EXT_WRITABLE(VALUE obj); #define RCLASSEXT_NS(ext) (ext->ns) #define RCLASSEXT_SUPER(ext) (ext->super) -#define RCLASSEXT_FIELDS(ext) (ext->fields) +#define RCLASSEXT_FIELDS(ext) (ext->fields_obj ? ROBJECT_FIELDS(ext->fields_obj) : NULL) +#define RCLASSEXT_FIELDS_OBJ(ext) (ext->fields_obj) #define RCLASSEXT_M_TBL(ext) (ext->m_tbl) #define RCLASSEXT_CONST_TBL(ext) (ext->const_tbl) #define RCLASSEXT_CALLABLE_M_TBL(ext) (ext->callable_m_tbl) @@ -205,7 +206,7 @@ static inline void RCLASSEXT_SET_INCLUDER(rb_classext_t *ext, VALUE klass, VALUE #define RCLASS_PRIME_NS(c) (RCLASS_EXT_PRIME(c)->ns) // To invalidate CC by inserting&invalidating method entry into tables containing the target cme // See clear_method_cache_by_id_in_class() -#define RCLASS_PRIME_FIELDS(c) (RCLASS_EXT_PRIME(c)->fields) +#define RCLASS_PRIME_FIELDS_OBJ(c) (RCLASS_EXT_PRIME(c)->fields_obj) #define RCLASS_PRIME_M_TBL(c) (RCLASS_EXT_PRIME(c)->m_tbl) #define RCLASS_PRIME_CONST_TBL(c) (RCLASS_EXT_PRIME(c)->const_tbl) #define RCLASS_PRIME_CALLABLE_M_TBL(c) (RCLASS_EXT_PRIME(c)->callable_m_tbl) @@ -255,11 +256,6 @@ static inline void RCLASSEXT_SET_INCLUDER(rb_classext_t *ext, VALUE klass, VALUE static inline void RCLASS_SET_SUPER(VALUE klass, VALUE super); static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super); -static inline st_table * RCLASS_FIELDS_HASH(VALUE obj); -static inline st_table * RCLASS_WRITABLE_FIELDS_HASH(VALUE obj); -static inline uint32_t RCLASS_FIELDS_COUNT(VALUE obj); -static inline void RCLASS_SET_FIELDS_HASH(VALUE obj, const st_table *table); -static inline void RCLASS_WRITE_FIELDS_HASH(VALUE obj, const st_table *table); // TODO: rename RCLASS_SET_M_TBL_WORKAROUND (and _WRITE_) to RCLASS_SET_M_TBL with write barrier static inline void RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted); static inline void RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted); @@ -407,10 +403,6 @@ RCLASS_EXT_WRITABLE_LOOKUP(VALUE obj, const rb_namespace_t *ns) if (ext) return ext; - if (!rb_shape_obj_too_complex_p(obj)) { - rb_evict_ivars_to_hash(obj); // fallback to ivptr for ivars from shapes - } - RB_VM_LOCKING() { // re-check the classext is not created to avoid the multi-thread race ext = RCLASS_EXT_TABLE_LOOKUP_INTERNAL(obj, ns); @@ -528,56 +520,57 @@ RCLASS_WRITE_SUPER(VALUE klass, VALUE super) RB_OBJ_WRITE(klass, &RCLASSEXT_SUPER(RCLASS_EXT_WRITABLE(klass)), super); } -static inline st_table * -RCLASS_FIELDS_HASH(VALUE obj) +static inline VALUE +RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(VALUE obj) { RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE)); - RUBY_ASSERT(rb_shape_obj_too_complex_p(obj)); - return (st_table *)RCLASSEXT_FIELDS(RCLASS_EXT_READABLE(obj)); + rb_classext_t *ext = RCLASS_EXT_WRITABLE(obj); + if (!ext->fields_obj) { + RB_OBJ_WRITE(obj, &ext->fields_obj, rb_imemo_class_fields_new(obj, 1)); + } + return ext->fields_obj; } -static inline st_table * -RCLASS_WRITABLE_FIELDS_HASH(VALUE obj) +static inline VALUE +RCLASS_WRITABLE_FIELDS_OBJ(VALUE obj) { RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE)); - RUBY_ASSERT(rb_shape_obj_too_complex_p(obj)); - return (st_table *)RCLASSEXT_FIELDS(RCLASS_EXT_WRITABLE(obj)); + return RCLASSEXT_FIELDS_OBJ(RCLASS_EXT_WRITABLE(obj)); } static inline void -RCLASS_SET_FIELDS_HASH(VALUE obj, const st_table *tbl) +RCLASSEXT_SET_FIELDS_OBJ(VALUE obj, rb_classext_t *ext, VALUE fields_obj) { RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE)); - RUBY_ASSERT(rb_shape_obj_too_complex_p(obj)); - RCLASSEXT_FIELDS(RCLASS_EXT_PRIME(obj)) = (VALUE *)tbl; + + VALUE old_fields_obj = ext->fields_obj; + RUBY_ATOMIC_VALUE_SET(ext->fields_obj, fields_obj); + RB_OBJ_WRITTEN(obj, old_fields_obj, fields_obj); } static inline void -RCLASS_WRITE_FIELDS_HASH(VALUE obj, const st_table *tbl) +RCLASS_WRITABLE_SET_FIELDS_OBJ(VALUE obj, VALUE fields_obj) { RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE)); - RUBY_ASSERT(rb_shape_obj_too_complex_p(obj)); - RCLASSEXT_FIELDS(RCLASS_EXT_WRITABLE(obj)) = (VALUE *)tbl; + + RCLASSEXT_SET_FIELDS_OBJ(obj, RCLASS_EXT_WRITABLE(obj), fields_obj); } static inline uint32_t RCLASS_FIELDS_COUNT(VALUE obj) { RUBY_ASSERT(RB_TYPE_P(obj, RUBY_T_CLASS) || RB_TYPE_P(obj, RUBY_T_MODULE)); - if (rb_shape_obj_too_complex_p(obj)) { - uint32_t count; - // "Too complex" classes could have their IV hash mutated in - // parallel, so lets lock around getting the hash size. - RB_VM_LOCKING() { - count = (uint32_t)rb_st_table_size(RCLASS_FIELDS_HASH(obj)); + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + if (rb_shape_obj_too_complex_p(fields_obj)) { + return (uint32_t)rb_st_table_size(rb_imemo_class_fields_complex_tbl(fields_obj)); + } + else { + return RSHAPE_LEN(RBASIC_SHAPE_ID(fields_obj)); } - - return count; - } - else { - return RSHAPE(RBASIC_SHAPE_ID(obj))->next_field_index; } + return 0; } #define RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, false) diff --git a/internal/imemo.h b/internal/imemo.h index 305d12d240..0806baa9a6 100644 --- a/internal/imemo.h +++ b/internal/imemo.h @@ -42,6 +42,7 @@ enum imemo_type { imemo_callinfo = 11, imemo_callcache = 12, imemo_constcache = 13, + imemo_class_fields = 14, }; /* CREF (Class REFerence) is defined in method.h */ @@ -257,4 +258,57 @@ MEMO_V2_SET(struct MEMO *m, VALUE v) RB_OBJ_WRITE(m, &m->v2, v); } +struct rb_class_fields { + struct RBasic basic; + union { + struct { + VALUE fields[1]; + } embed; + struct { + VALUE *ptr; + } external; + struct { + // Note: the st_table could be embedded, but complex T_CLASS should be rare to + // non-existent, so not really worth the trouble. + st_table *table; + } complex; + } as; +}; + +#define OBJ_FIELD_EXTERNAL IMEMO_FL_USER0 +#define IMEMO_OBJ_FIELDS(fields) ((struct rb_class_fields *)fields) + +VALUE rb_imemo_class_fields_new(VALUE klass, size_t capa); +VALUE rb_imemo_class_fields_new_complex(VALUE klass, size_t capa); +VALUE rb_imemo_class_fields_clone(VALUE fields_obj); + +static inline VALUE * +rb_imemo_class_fields_ptr(VALUE obj_fields) +{ + if (!obj_fields) { + return NULL; + } + + RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_class_fields)); + + if (RB_UNLIKELY(FL_TEST_RAW(obj_fields, OBJ_FIELD_EXTERNAL))) { + return IMEMO_OBJ_FIELDS(obj_fields)->as.external.ptr; + } + else { + return IMEMO_OBJ_FIELDS(obj_fields)->as.embed.fields; + } +} + +static inline st_table * +rb_imemo_class_fields_complex_tbl(VALUE obj_fields) +{ + if (!obj_fields) { + return NULL; + } + + RUBY_ASSERT(IMEMO_TYPE_P(obj_fields, imemo_class_fields)); + + return IMEMO_OBJ_FIELDS(obj_fields)->as.complex.table; +} + #endif /* INTERNAL_IMEMO_H */ diff --git a/internal/thread.h b/internal/thread.h index 8403ac2663..00fcbfc560 100644 --- a/internal/thread.h +++ b/internal/thread.h @@ -90,6 +90,7 @@ typedef VALUE (rb_interrupt_exec_func_t)(void *data); enum rb_interrupt_exec_flag { rb_interrupt_exec_flag_none = 0x00, rb_interrupt_exec_flag_value_data = 0x01, + rb_interrupt_exec_flag_new_thread = 0x02, }; // interrupt the target_th and run func. diff --git a/internal/variable.h b/internal/variable.h index a0608b22d1..8da6c678a5 100644 --- a/internal/variable.h +++ b/internal/variable.h @@ -13,7 +13,7 @@ #include "constant.h" /* for rb_const_entry_t */ #include "ruby/internal/stdbool.h" /* for bool */ #include "ruby/ruby.h" /* for VALUE */ -#include "shape.h" /* for rb_shape_t */ +#include "shape.h" /* for shape_id_t */ /* variable.c */ void rb_gc_mark_global_tbl(void); diff --git a/lib/bundler/cli.rb b/lib/bundler/cli.rb index 51f71af501..c0c7d9f899 100644 --- a/lib/bundler/cli.rb +++ b/lib/bundler/cli.rb @@ -130,7 +130,7 @@ module Bundler if man_pages.include?(command) man_page = man_pages[command] - if Bundler.which("man") && !man_path.match?(%r{^file:/.+!/META-INF/jruby.home/.+}) + if Bundler.which("man") && !man_path.match?(%r{^(?:file:/.+!|uri:classloader:)/META-INF/jruby.home/.+}) Kernel.exec("man", man_page) else puts File.read("#{man_path}/#{File.basename(man_page)}.ronn") diff --git a/lib/bundler/cli/install.rb b/lib/bundler/cli/install.rb index b0b354cf10..94d485682d 100644 --- a/lib/bundler/cli/install.rb +++ b/lib/bundler/cli/install.rb @@ -66,7 +66,9 @@ module Bundler Plugin.gemfile_install(Bundler.default_gemfile) if Bundler.feature_flag.plugins? - definition = Bundler.definition + # For install we want to enable strict validation + # (rather than some optimizations we perform at app runtime). + definition = Bundler.definition(strict: true) definition.validate_runtime! installer = Installer.install(Bundler.root, definition, options) diff --git a/lib/bundler/definition.rb b/lib/bundler/definition.rb index 564589ebfa..32006af109 100644 --- a/lib/bundler/definition.rb +++ b/lib/bundler/definition.rb @@ -60,6 +60,7 @@ module Bundler if unlock == true @unlocking_all = true + strict = false @unlocking_bundler = false @unlocking = unlock @sources_to_unlock = [] @@ -68,6 +69,7 @@ module Bundler conservative = false else @unlocking_all = false + strict = unlock.delete(:strict) @unlocking_bundler = unlock.delete(:bundler) @unlocking = unlock.any? {|_k, v| !Array(v).empty? } @sources_to_unlock = unlock.delete(:sources) || [] @@ -97,7 +99,7 @@ module Bundler if lockfile_exists? @lockfile_contents = Bundler.read_file(lockfile) - @locked_gems = LockfileParser.new(@lockfile_contents) + @locked_gems = LockfileParser.new(@lockfile_contents, strict: strict) @locked_platforms = @locked_gems.platforms @most_specific_locked_platform = @locked_gems.most_specific_locked_platform @platforms = @locked_platforms.dup diff --git a/lib/bundler/lazy_specification.rb b/lib/bundler/lazy_specification.rb index 081cac48d2..81ded54797 100644 --- a/lib/bundler/lazy_specification.rb +++ b/lib/bundler/lazy_specification.rb @@ -33,7 +33,7 @@ module Bundler lazy_spec end - def initialize(name, version, platform, source = nil) + def initialize(name, version, platform, source = nil, **materialization_options) @name = name @version = version @dependencies = [] @@ -43,6 +43,7 @@ module Bundler @original_source = source @source = source + @materialization_options = materialization_options @force_ruby_platform = default_force_ruby_platform @most_specific_locked_platform = nil @@ -226,12 +227,13 @@ module Bundler # Validate dependencies of this locked spec are consistent with dependencies # of the actual spec that was materialized. # - # Note that we don't validate dependencies of locally installed gems but + # Note that unless we are in strict mode (which we set during installation) + # we don't validate dependencies of locally installed gems but # accept what's in the lockfile instead for performance, since loading # dependencies of locally installed gems would mean evaluating all gemspecs, # which would affect `bundler/setup` performance. def validate_dependencies(spec) - if spec.is_a?(StubSpecification) + if !@materialization_options[:strict] && spec.is_a?(StubSpecification) spec.dependencies = dependencies else if !source.is_a?(Source::Path) && spec.runtime_dependencies.sort != dependencies.sort diff --git a/lib/bundler/lockfile_parser.rb b/lib/bundler/lockfile_parser.rb index 94fe90eb2e..d00ba4cc10 100644 --- a/lib/bundler/lockfile_parser.rb +++ b/lib/bundler/lockfile_parser.rb @@ -94,7 +94,7 @@ module Bundler lockfile_contents.split(BUNDLED).last.strip end - def initialize(lockfile) + def initialize(lockfile, strict: false) @platforms = [] @sources = [] @dependencies = {} @@ -106,6 +106,7 @@ module Bundler "Gemfile.lock" end @pos = Position.new(1, 1) + @strict = strict if lockfile.match?(/<<<<<<<|=======|>>>>>>>|\|\|\|\|\|\|\|/) raise LockfileError, "Your #{@lockfile_path} contains merge conflicts.\n" \ @@ -286,7 +287,7 @@ module Bundler version = Gem::Version.new(version) platform = platform ? Gem::Platform.new(platform) : Gem::Platform::RUBY - @current_spec = LazySpecification.new(name, version, platform, @current_source) + @current_spec = LazySpecification.new(name, version, platform, @current_source, strict: @strict) @current_source.add_dependency_names(name) @specs[@current_spec.full_name] = @current_spec diff --git a/lib/bundler/man/bundle-add.1 b/lib/bundler/man/bundle-add.1 index d0c32fcb2a..5a27a70173 100644 --- a/lib/bundler/man/bundle-add.1 +++ b/lib/bundler/man/bundle-add.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-ADD" "1" "May 2025" "" +.TH "BUNDLE\-ADD" "1" "June 2025" "" .SH "NAME" \fBbundle\-add\fR \- Add gem to the Gemfile and run bundle install .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-binstubs.1 b/lib/bundler/man/bundle-binstubs.1 index 5e8cf0753a..3ab9584653 100644 --- a/lib/bundler/man/bundle-binstubs.1 +++ b/lib/bundler/man/bundle-binstubs.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-BINSTUBS" "1" "May 2025" "" +.TH "BUNDLE\-BINSTUBS" "1" "June 2025" "" .SH "NAME" \fBbundle\-binstubs\fR \- Install the binstubs of the listed gems .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-cache.1 b/lib/bundler/man/bundle-cache.1 index 44d5040f91..54cbd8ebc6 100644 --- a/lib/bundler/man/bundle-cache.1 +++ b/lib/bundler/man/bundle-cache.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-CACHE" "1" "May 2025" "" +.TH "BUNDLE\-CACHE" "1" "June 2025" "" .SH "NAME" \fBbundle\-cache\fR \- Package your needed \fB\.gem\fR files into your application .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-check.1 b/lib/bundler/man/bundle-check.1 index 3a5c02f702..122299a99b 100644 --- a/lib/bundler/man/bundle-check.1 +++ b/lib/bundler/man/bundle-check.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-CHECK" "1" "May 2025" "" +.TH "BUNDLE\-CHECK" "1" "June 2025" "" .SH "NAME" \fBbundle\-check\fR \- Verifies if dependencies are satisfied by installed gems .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-clean.1 b/lib/bundler/man/bundle-clean.1 index c23a3939b8..52e1096c18 100644 --- a/lib/bundler/man/bundle-clean.1 +++ b/lib/bundler/man/bundle-clean.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-CLEAN" "1" "May 2025" "" +.TH "BUNDLE\-CLEAN" "1" "June 2025" "" .SH "NAME" \fBbundle\-clean\fR \- Cleans up unused gems in your bundler directory .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-config.1 b/lib/bundler/man/bundle-config.1 index 5ce284113f..0c1a8a7609 100644 --- a/lib/bundler/man/bundle-config.1 +++ b/lib/bundler/man/bundle-config.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-CONFIG" "1" "May 2025" "" +.TH "BUNDLE\-CONFIG" "1" "June 2025" "" .SH "NAME" \fBbundle\-config\fR \- Set bundler configuration options .SH "SYNOPSIS" @@ -52,113 +52,165 @@ The canonical form of this configuration is \fB"without"\fR\. To convert the can Any periods in the configuration keys must be replaced with two underscores when setting it via environment variables\. The configuration key \fBlocal\.rack\fR becomes the environment variable \fBBUNDLE_LOCAL__RACK\fR\. .SH "LIST OF AVAILABLE KEYS" The following is a list of all configuration keys and their purpose\. You can learn more about their operation in bundle install(1) \fIbundle\-install\.1\.html\fR\. -.IP "\(bu" 4 -\fBallow_offline_install\fR (\fBBUNDLE_ALLOW_OFFLINE_INSTALL\fR): Allow Bundler to use cached data when installing without network access\. -.IP "\(bu" 4 -\fBauto_clean_without_path\fR (\fBBUNDLE_AUTO_CLEAN_WITHOUT_PATH\fR): Automatically run \fBbundle clean\fR after installing when an explicit \fBpath\fR has not been set and Bundler is not installing into the system gems\. -.IP "\(bu" 4 -\fBauto_install\fR (\fBBUNDLE_AUTO_INSTALL\fR): Automatically run \fBbundle install\fR when gems are missing\. -.IP "\(bu" 4 -\fBbin\fR (\fBBUNDLE_BIN\fR): Install executables from gems in the bundle to the specified directory\. Defaults to \fBfalse\fR\. -.IP "\(bu" 4 -\fBcache_all\fR (\fBBUNDLE_CACHE_ALL\fR): Cache all gems, including path and git gems\. This needs to be explicitly configured on bundler 1 and bundler 2, but will be the default on bundler 3\. -.IP "\(bu" 4 -\fBcache_all_platforms\fR (\fBBUNDLE_CACHE_ALL_PLATFORMS\fR): Cache gems for all platforms\. -.IP "\(bu" 4 -\fBcache_path\fR (\fBBUNDLE_CACHE_PATH\fR): The directory that bundler will place cached gems in when running \fBbundle package\fR, and that bundler will look in when installing gems\. Defaults to \fBvendor/cache\fR\. -.IP "\(bu" 4 -\fBclean\fR (\fBBUNDLE_CLEAN\fR): Whether Bundler should run \fBbundle clean\fR automatically after \fBbundle install\fR\. -.IP "\(bu" 4 -\fBconsole\fR (\fBBUNDLE_CONSOLE\fR): The console that \fBbundle console\fR starts\. Defaults to \fBirb\fR\. -.IP "\(bu" 4 -\fBdefault_install_uses_path\fR (\fBBUNDLE_DEFAULT_INSTALL_USES_PATH\fR): Whether a \fBbundle install\fR without an explicit \fB\-\-path\fR argument defaults to installing gems in \fB\.bundle\fR\. -.IP "\(bu" 4 -\fBdeployment\fR (\fBBUNDLE_DEPLOYMENT\fR): Equivalent to setting \fBfrozen\fR to \fBtrue\fR and \fBpath\fR to \fBvendor/bundle\fR\. -.IP "\(bu" 4 -\fBdisable_checksum_validation\fR (\fBBUNDLE_DISABLE_CHECKSUM_VALIDATION\fR): Allow installing gems even if they do not match the checksum provided by RubyGems\. -.IP "\(bu" 4 -\fBdisable_exec_load\fR (\fBBUNDLE_DISABLE_EXEC_LOAD\fR): Stop Bundler from using \fBload\fR to launch an executable in\-process in \fBbundle exec\fR\. -.IP "\(bu" 4 -\fBdisable_local_branch_check\fR (\fBBUNDLE_DISABLE_LOCAL_BRANCH_CHECK\fR): Allow Bundler to use a local git override without a branch specified in the Gemfile\. -.IP "\(bu" 4 -\fBdisable_local_revision_check\fR (\fBBUNDLE_DISABLE_LOCAL_REVISION_CHECK\fR): Allow Bundler to use a local git override without checking if the revision present in the lockfile is present in the repository\. -.IP "\(bu" 4 -\fBdisable_shared_gems\fR (\fBBUNDLE_DISABLE_SHARED_GEMS\fR): Stop Bundler from accessing gems installed to RubyGems' normal location\. -.IP "\(bu" 4 -\fBdisable_version_check\fR (\fBBUNDLE_DISABLE_VERSION_CHECK\fR): Stop Bundler from checking if a newer Bundler version is available on rubygems\.org\. -.IP "\(bu" 4 -\fBforce_ruby_platform\fR (\fBBUNDLE_FORCE_RUBY_PLATFORM\fR): Ignore the current machine's platform and install only \fBruby\fR platform gems\. As a result, gems with native extensions will be compiled from source\. -.IP "\(bu" 4 -\fBfrozen\fR (\fBBUNDLE_FROZEN\fR): Disallow any automatic changes to \fBGemfile\.lock\fR\. Bundler commands will be blocked unless the lockfile can be installed exactly as written\. Usually this will happen when changing the \fBGemfile\fR manually and forgetting to update the lockfile through \fBbundle lock\fR or \fBbundle install\fR\. -.IP "\(bu" 4 -\fBgem\.github_username\fR (\fBBUNDLE_GEM__GITHUB_USERNAME\fR): Sets a GitHub username or organization to be used in \fBREADME\fR file when you create a new gem via \fBbundle gem\fR command\. It can be overridden by passing an explicit \fB\-\-github\-username\fR flag to \fBbundle gem\fR\. -.IP "\(bu" 4 -\fBgem\.push_key\fR (\fBBUNDLE_GEM__PUSH_KEY\fR): Sets the \fB\-\-key\fR parameter for \fBgem push\fR when using the \fBrake release\fR command with a private gemstash server\. -.IP "\(bu" 4 -\fBgemfile\fR (\fBBUNDLE_GEMFILE\fR): The name of the file that bundler should use as the \fBGemfile\fR\. This location of this file also sets the root of the project, which is used to resolve relative paths in the \fBGemfile\fR, among other things\. By default, bundler will search up from the current working directory until it finds a \fBGemfile\fR\. -.IP "\(bu" 4 -\fBglobal_gem_cache\fR (\fBBUNDLE_GLOBAL_GEM_CACHE\fR): Whether Bundler should cache all gems globally, rather than locally to the installing Ruby installation\. -.IP "\(bu" 4 -\fBignore_funding_requests\fR (\fBBUNDLE_IGNORE_FUNDING_REQUESTS\fR): When set, no funding requests will be printed\. -.IP "\(bu" 4 -\fBignore_messages\fR (\fBBUNDLE_IGNORE_MESSAGES\fR): When set, no post install messages will be printed\. To silence a single gem, use dot notation like \fBignore_messages\.httparty true\fR\. -.IP "\(bu" 4 -\fBinit_gems_rb\fR (\fBBUNDLE_INIT_GEMS_RB\fR): Generate a \fBgems\.rb\fR instead of a \fBGemfile\fR when running \fBbundle init\fR\. -.IP "\(bu" 4 -\fBjobs\fR (\fBBUNDLE_JOBS\fR): The number of gems Bundler can install in parallel\. Defaults to the number of available processors\. -.IP "\(bu" 4 -\fBlockfile_checksums\fR (\fBBUNDLE_LOCKFILE_CHECKSUMS\fR): Whether Bundler should include a checksums section in new lockfiles, to protect from compromised gem sources\. -.IP "\(bu" 4 -\fBno_install\fR (\fBBUNDLE_NO_INSTALL\fR): Whether \fBbundle package\fR should skip installing gems\. -.IP "\(bu" 4 -\fBno_prune\fR (\fBBUNDLE_NO_PRUNE\fR): Whether Bundler should leave outdated gems unpruned when caching\. -.IP "\(bu" 4 -\fBonly\fR (\fBBUNDLE_ONLY\fR): A space\-separated list of groups to install only gems of the specified groups\. -.IP "\(bu" 4 -\fBpath\fR (\fBBUNDLE_PATH\fR): The location on disk where all gems in your bundle will be located regardless of \fB$GEM_HOME\fR or \fB$GEM_PATH\fR values\. Bundle gems not found in this location will be installed by \fBbundle install\fR\. Defaults to \fBGem\.dir\fR\. -.IP "\(bu" 4 -\fBpath\.system\fR (\fBBUNDLE_PATH__SYSTEM\fR): Whether Bundler will install gems into the default system path (\fBGem\.dir\fR)\. -.IP "\(bu" 4 -\fBpath_relative_to_cwd\fR (\fBBUNDLE_PATH_RELATIVE_TO_CWD\fR) Makes \fB\-\-path\fR relative to the CWD instead of the \fBGemfile\fR\. -.IP "\(bu" 4 -\fBplugins\fR (\fBBUNDLE_PLUGINS\fR): Enable Bundler's experimental plugin system\. -.IP "\(bu" 4 -\fBprefer_patch\fR (BUNDLE_PREFER_PATCH): Prefer updating only to next patch version during updates\. Makes \fBbundle update\fR calls equivalent to \fBbundler update \-\-patch\fR\. -.IP "\(bu" 4 -\fBprint_only_version_number\fR (\fBBUNDLE_PRINT_ONLY_VERSION_NUMBER\fR): Print only version number from \fBbundler \-\-version\fR\. -.IP "\(bu" 4 -\fBredirect\fR (\fBBUNDLE_REDIRECT\fR): The number of redirects allowed for network requests\. Defaults to \fB5\fR\. -.IP "\(bu" 4 -\fBretry\fR (\fBBUNDLE_RETRY\fR): The number of times to retry failed network requests\. Defaults to \fB3\fR\. -.IP "\(bu" 4 -\fBsetup_makes_kernel_gem_public\fR (\fBBUNDLE_SETUP_MAKES_KERNEL_GEM_PUBLIC\fR): Have \fBBundler\.setup\fR make the \fBKernel#gem\fR method public, even though RubyGems declares it as private\. -.IP "\(bu" 4 -\fBshebang\fR (\fBBUNDLE_SHEBANG\fR): The program name that should be invoked for generated binstubs\. Defaults to the ruby install name used to generate the binstub\. -.IP "\(bu" 4 -\fBsilence_deprecations\fR (\fBBUNDLE_SILENCE_DEPRECATIONS\fR): Whether Bundler should silence deprecation warnings for behavior that will be changed in the next major version\. -.IP "\(bu" 4 -\fBsilence_root_warning\fR (\fBBUNDLE_SILENCE_ROOT_WARNING\fR): Silence the warning Bundler prints when installing gems as root\. -.IP "\(bu" 4 -\fBssl_ca_cert\fR (\fBBUNDLE_SSL_CA_CERT\fR): Path to a designated CA certificate file or folder containing multiple certificates for trusted CAs in PEM format\. -.IP "\(bu" 4 -\fBssl_client_cert\fR (\fBBUNDLE_SSL_CLIENT_CERT\fR): Path to a designated file containing a X\.509 client certificate and key in PEM format\. -.IP "\(bu" 4 -\fBssl_verify_mode\fR (\fBBUNDLE_SSL_VERIFY_MODE\fR): The SSL verification mode Bundler uses when making HTTPS requests\. Defaults to verify peer\. -.IP "\(bu" 4 -\fBsystem_bindir\fR (\fBBUNDLE_SYSTEM_BINDIR\fR): The location where RubyGems installs binstubs\. Defaults to \fBGem\.bindir\fR\. -.IP "\(bu" 4 -\fBtimeout\fR (\fBBUNDLE_TIMEOUT\fR): The seconds allowed before timing out for network requests\. Defaults to \fB10\fR\. -.IP "\(bu" 4 -\fBupdate_requires_all_flag\fR (\fBBUNDLE_UPDATE_REQUIRES_ALL_FLAG\fR): Require passing \fB\-\-all\fR to \fBbundle update\fR when everything should be updated, and disallow passing no options to \fBbundle update\fR\. -.IP "\(bu" 4 -\fBuser_agent\fR (\fBBUNDLE_USER_AGENT\fR): The custom user agent fragment Bundler includes in API requests\. -.IP "\(bu" 4 -\fBversion\fR (\fBBUNDLE_VERSION\fR): The version of Bundler to use when running under Bundler environment\. Defaults to \fBlockfile\fR\. You can also specify \fBsystem\fR or \fBx\.y\.z\fR\. \fBlockfile\fR will use the Bundler version specified in the \fBGemfile\.lock\fR, \fBsystem\fR will use the system version of Bundler, and \fBx\.y\.z\fR will use the specified version of Bundler\. -.IP "\(bu" 4 -\fBwith\fR (\fBBUNDLE_WITH\fR): A space\-separated or \fB:\fR\-separated list of groups whose gems bundler should install\. -.IP "\(bu" 4 -\fBwithout\fR (\fBBUNDLE_WITHOUT\fR): A space\-separated or \fB:\fR\-separated list of groups whose gems bundler should not install\. -.IP "" 0 +.TP +\fBallow_offline_install\fR (\fBBUNDLE_ALLOW_OFFLINE_INSTALL\fR) +Allow Bundler to use cached data when installing without network access\. +.TP +\fBauto_clean_without_path\fR (\fBBUNDLE_AUTO_CLEAN_WITHOUT_PATH\fR) +Automatically run \fBbundle clean\fR after installing when an explicit \fBpath\fR has not been set and Bundler is not installing into the system gems\. +.TP +\fBauto_install\fR (\fBBUNDLE_AUTO_INSTALL\fR) +Automatically run \fBbundle install\fR when gems are missing\. +.TP +\fBbin\fR (\fBBUNDLE_BIN\fR) +Install executables from gems in the bundle to the specified directory\. Defaults to \fBfalse\fR\. +.TP +\fBcache_all\fR (\fBBUNDLE_CACHE_ALL\fR) +Cache all gems, including path and git gems\. This needs to be explicitly configured on bundler 1 and bundler 2, but will be the default on bundler 3\. +.TP +\fBcache_all_platforms\fR (\fBBUNDLE_CACHE_ALL_PLATFORMS\fR) +Cache gems for all platforms\. +.TP +\fBcache_path\fR (\fBBUNDLE_CACHE_PATH\fR) +The directory that bundler will place cached gems in when running \fBbundle package\fR, and that bundler will look in when installing gems\. Defaults to \fBvendor/cache\fR\. +.TP +\fBclean\fR (\fBBUNDLE_CLEAN\fR) +Whether Bundler should run \fBbundle clean\fR automatically after \fBbundle install\fR\. +.TP +\fBconsole\fR (\fBBUNDLE_CONSOLE\fR) +The console that \fBbundle console\fR starts\. Defaults to \fBirb\fR\. +.TP +\fBdefault_install_uses_path\fR (\fBBUNDLE_DEFAULT_INSTALL_USES_PATH\fR) +Whether a \fBbundle install\fR without an explicit \fB\-\-path\fR argument defaults to installing gems in \fB\.bundle\fR\. +.TP +\fBdeployment\fR (\fBBUNDLE_DEPLOYMENT\fR) +Equivalent to setting \fBfrozen\fR to \fBtrue\fR and \fBpath\fR to \fBvendor/bundle\fR\. +.TP +\fBdisable_checksum_validation\fR (\fBBUNDLE_DISABLE_CHECKSUM_VALIDATION\fR) +Allow installing gems even if they do not match the checksum provided by RubyGems\. +.TP +\fBdisable_exec_load\fR (\fBBUNDLE_DISABLE_EXEC_LOAD\fR) +Stop Bundler from using \fBload\fR to launch an executable in\-process in \fBbundle exec\fR\. +.TP +\fBdisable_local_branch_check\fR (\fBBUNDLE_DISABLE_LOCAL_BRANCH_CHECK\fR) +Allow Bundler to use a local git override without a branch specified in the Gemfile\. +.TP +\fBdisable_local_revision_check\fR (\fBBUNDLE_DISABLE_LOCAL_REVISION_CHECK\fR) +Allow Bundler to use a local git override without checking if the revision present in the lockfile is present in the repository\. +.TP +\fBdisable_shared_gems\fR (\fBBUNDLE_DISABLE_SHARED_GEMS\fR) +Stop Bundler from accessing gems installed to RubyGems' normal location\. +.TP +\fBdisable_version_check\fR (\fBBUNDLE_DISABLE_VERSION_CHECK\fR) +Stop Bundler from checking if a newer Bundler version is available on rubygems\.org\. +.TP +\fBforce_ruby_platform\fR (\fBBUNDLE_FORCE_RUBY_PLATFORM\fR) +Ignore the current machine's platform and install only \fBruby\fR platform gems\. As a result, gems with native extensions will be compiled from source\. +.TP +\fBfrozen\fR (\fBBUNDLE_FROZEN\fR) +Disallow any automatic changes to \fBGemfile\.lock\fR\. Bundler commands will be blocked unless the lockfile can be installed exactly as written\. Usually this will happen when changing the \fBGemfile\fR manually and forgetting to update the lockfile through \fBbundle lock\fR or \fBbundle install\fR\. +.TP +\fBgem\.github_username\fR (\fBBUNDLE_GEM__GITHUB_USERNAME\fR) +Sets a GitHub username or organization to be used in \fBREADME\fR file when you create a new gem via \fBbundle gem\fR command\. It can be overridden by passing an explicit \fB\-\-github\-username\fR flag to \fBbundle gem\fR\. +.TP +\fBgem\.push_key\fR (\fBBUNDLE_GEM__PUSH_KEY\fR) +Sets the \fB\-\-key\fR parameter for \fBgem push\fR when using the \fBrake release\fR command with a private gemstash server\. +.TP +\fBgemfile\fR (\fBBUNDLE_GEMFILE\fR) +The name of the file that bundler should use as the \fBGemfile\fR\. This location of this file also sets the root of the project, which is used to resolve relative paths in the \fBGemfile\fR, among other things\. By default, bundler will search up from the current working directory until it finds a \fBGemfile\fR\. +.TP +\fBglobal_gem_cache\fR (\fBBUNDLE_GLOBAL_GEM_CACHE\fR) +Whether Bundler should cache all gems globally, rather than locally to the installing Ruby installation\. +.TP +\fBignore_funding_requests\fR (\fBBUNDLE_IGNORE_FUNDING_REQUESTS\fR) +When set, no funding requests will be printed\. +.TP +\fBignore_messages\fR (\fBBUNDLE_IGNORE_MESSAGES\fR) +When set, no post install messages will be printed\. To silence a single gem, use dot notation like \fBignore_messages\.httparty true\fR\. +.TP +\fBinit_gems_rb\fR (\fBBUNDLE_INIT_GEMS_RB\fR) +Generate a \fBgems\.rb\fR instead of a \fBGemfile\fR when running \fBbundle init\fR\. +.TP +\fBjobs\fR (\fBBUNDLE_JOBS\fR) +The number of gems Bundler can install in parallel\. Defaults to the number of available processors\. +.TP +\fBlockfile_checksums\fR (\fBBUNDLE_LOCKFILE_CHECKSUMS\fR) +Whether Bundler should include a checksums section in new lockfiles, to protect from compromised gem sources\. +.TP +\fBno_install\fR (\fBBUNDLE_NO_INSTALL\fR) +Whether \fBbundle package\fR should skip installing gems\. +.TP +\fBno_prune\fR (\fBBUNDLE_NO_PRUNE\fR) +Whether Bundler should leave outdated gems unpruned when caching\. +.TP +\fBonly\fR (\fBBUNDLE_ONLY\fR) +A space\-separated list of groups to install only gems of the specified groups\. +.TP +\fBpath\fR (\fBBUNDLE_PATH\fR) +The location on disk where all gems in your bundle will be located regardless of \fB$GEM_HOME\fR or \fB$GEM_PATH\fR values\. Bundle gems not found in this location will be installed by \fBbundle install\fR\. Defaults to \fBGem\.dir\fR\. +.TP +\fBpath\.system\fR (\fBBUNDLE_PATH__SYSTEM\fR) +Whether Bundler will install gems into the default system path (\fBGem\.dir\fR)\. +.TP +\fBpath_relative_to_cwd\fR (\fBBUNDLE_PATH_RELATIVE_TO_CWD\fR) +Makes \fB\-\-path\fR relative to the CWD instead of the \fBGemfile\fR\. +.TP +\fBplugins\fR (\fBBUNDLE_PLUGINS\fR) +Enable Bundler's experimental plugin system\. +.TP +\fBprefer_patch\fR (BUNDLE_PREFER_PATCH) +Prefer updating only to next patch version during updates\. Makes \fBbundle update\fR calls equivalent to \fBbundler update \-\-patch\fR\. +.TP +\fBprint_only_version_number\fR (\fBBUNDLE_PRINT_ONLY_VERSION_NUMBER\fR) +Print only version number from \fBbundler \-\-version\fR\. +.TP +\fBredirect\fR (\fBBUNDLE_REDIRECT\fR) +The number of redirects allowed for network requests\. Defaults to \fB5\fR\. +.TP +\fBretry\fR (\fBBUNDLE_RETRY\fR) +The number of times to retry failed network requests\. Defaults to \fB3\fR\. +.TP +\fBsetup_makes_kernel_gem_public\fR (\fBBUNDLE_SETUP_MAKES_KERNEL_GEM_PUBLIC\fR) +Have \fBBundler\.setup\fR make the \fBKernel#gem\fR method public, even though RubyGems declares it as private\. +.TP +\fBshebang\fR (\fBBUNDLE_SHEBANG\fR) +The program name that should be invoked for generated binstubs\. Defaults to the ruby install name used to generate the binstub\. +.TP +\fBsilence_deprecations\fR (\fBBUNDLE_SILENCE_DEPRECATIONS\fR) +Whether Bundler should silence deprecation warnings for behavior that will be changed in the next major version\. +.TP +\fBsilence_root_warning\fR (\fBBUNDLE_SILENCE_ROOT_WARNING\fR) +Silence the warning Bundler prints when installing gems as root\. +.TP +\fBssl_ca_cert\fR (\fBBUNDLE_SSL_CA_CERT\fR) +Path to a designated CA certificate file or folder containing multiple certificates for trusted CAs in PEM format\. +.TP +\fBssl_client_cert\fR (\fBBUNDLE_SSL_CLIENT_CERT\fR) +Path to a designated file containing a X\.509 client certificate and key in PEM format\. +.TP +\fBssl_verify_mode\fR (\fBBUNDLE_SSL_VERIFY_MODE\fR) +The SSL verification mode Bundler uses when making HTTPS requests\. Defaults to verify peer\. +.TP +\fBsystem_bindir\fR (\fBBUNDLE_SYSTEM_BINDIR\fR) +The location where RubyGems installs binstubs\. Defaults to \fBGem\.bindir\fR\. +.TP +\fBtimeout\fR (\fBBUNDLE_TIMEOUT\fR) +The seconds allowed before timing out for network requests\. Defaults to \fB10\fR\. +.TP +\fBupdate_requires_all_flag\fR (\fBBUNDLE_UPDATE_REQUIRES_ALL_FLAG\fR) +Require passing \fB\-\-all\fR to \fBbundle update\fR when everything should be updated, and disallow passing no options to \fBbundle update\fR\. +.TP +\fBuser_agent\fR (\fBBUNDLE_USER_AGENT\fR) +The custom user agent fragment Bundler includes in API requests\. +.TP +\fBversion\fR (\fBBUNDLE_VERSION\fR) +The version of Bundler to use when running under Bundler environment\. Defaults to \fBlockfile\fR\. You can also specify \fBsystem\fR or \fBx\.y\.z\fR\. \fBlockfile\fR will use the Bundler version specified in the \fBGemfile\.lock\fR, \fBsystem\fR will use the system version of Bundler, and \fBx\.y\.z\fR will use the specified version of Bundler\. +.TP +\fBwith\fR (\fBBUNDLE_WITH\fR) +A space\-separated or \fB:\fR\-separated list of groups whose gems bundler should install\. +.TP +\fBwithout\fR (\fBBUNDLE_WITHOUT\fR) +A space\-separated or \fB:\fR\-separated list of groups whose gems bundler should not install\. .SH "REMEMBERING OPTIONS" Flags passed to \fBbundle install\fR or the Bundler runtime, such as \fB\-\-path foo\fR or \fB\-\-without production\fR, are remembered between commands and saved to your local application's configuration (normally, \fB\./\.bundle/config\fR)\. .P diff --git a/lib/bundler/man/bundle-config.1.ronn b/lib/bundler/man/bundle-config.1.ronn index fef8f2d26b..9750a5ae4c 100644 --- a/lib/bundler/man/bundle-config.1.ronn +++ b/lib/bundler/man/bundle-config.1.ronn @@ -171,7 +171,7 @@ learn more about their operation in [bundle install(1)](bundle-install.1.html). will be installed by `bundle install`. Defaults to `Gem.dir`. * `path.system` (`BUNDLE_PATH__SYSTEM`): Whether Bundler will install gems into the default system path (`Gem.dir`). -* `path_relative_to_cwd` (`BUNDLE_PATH_RELATIVE_TO_CWD`) +* `path_relative_to_cwd` (`BUNDLE_PATH_RELATIVE_TO_CWD`): Makes `--path` relative to the CWD instead of the `Gemfile`. * `plugins` (`BUNDLE_PLUGINS`): Enable Bundler's experimental plugin system. diff --git a/lib/bundler/man/bundle-console.1 b/lib/bundler/man/bundle-console.1 index b83d1c4dad..1dd6b278de 100644 --- a/lib/bundler/man/bundle-console.1 +++ b/lib/bundler/man/bundle-console.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-CONSOLE" "1" "May 2025" "" +.TH "BUNDLE\-CONSOLE" "1" "June 2025" "" .SH "NAME" \fBbundle\-console\fR \- Open an IRB session with the bundle pre\-loaded .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-doctor.1 b/lib/bundler/man/bundle-doctor.1 index fed818cfaf..0cf01e02e9 100644 --- a/lib/bundler/man/bundle-doctor.1 +++ b/lib/bundler/man/bundle-doctor.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-DOCTOR" "1" "May 2025" "" +.TH "BUNDLE\-DOCTOR" "1" "June 2025" "" .SH "NAME" \fBbundle\-doctor\fR \- Checks the bundle for common problems .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-env.1 b/lib/bundler/man/bundle-env.1 index 34631206ed..167d902c99 100644 --- a/lib/bundler/man/bundle-env.1 +++ b/lib/bundler/man/bundle-env.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-ENV" "1" "May 2025" "" +.TH "BUNDLE\-ENV" "1" "June 2025" "" .SH "NAME" \fBbundle\-env\fR \- Print information about the environment Bundler is running under .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-exec.1 b/lib/bundler/man/bundle-exec.1 index abce4f0112..062944b3ca 100644 --- a/lib/bundler/man/bundle-exec.1 +++ b/lib/bundler/man/bundle-exec.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-EXEC" "1" "May 2025" "" +.TH "BUNDLE\-EXEC" "1" "June 2025" "" .SH "NAME" \fBbundle\-exec\fR \- Execute a command in the context of the bundle .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-fund.1 b/lib/bundler/man/bundle-fund.1 index e79d38a2af..131b0e9d2d 100644 --- a/lib/bundler/man/bundle-fund.1 +++ b/lib/bundler/man/bundle-fund.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-FUND" "1" "May 2025" "" +.TH "BUNDLE\-FUND" "1" "June 2025" "" .SH "NAME" \fBbundle\-fund\fR \- Lists information about gems seeking funding assistance .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-gem.1 b/lib/bundler/man/bundle-gem.1 index ae6f9f7f8a..d4aacfe4fb 100644 --- a/lib/bundler/man/bundle-gem.1 +++ b/lib/bundler/man/bundle-gem.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-GEM" "1" "May 2025" "" +.TH "BUNDLE\-GEM" "1" "June 2025" "" .SH "NAME" \fBbundle\-gem\fR \- Generate a project skeleton for creating a rubygem .SH "SYNOPSIS" @@ -19,67 +19,84 @@ The generated project skeleton can be customized with OPTIONS, as explained belo \fBgem\.test\fR .IP "" 0 .SH "OPTIONS" -.IP "\(bu" 4 -\fB\-\-exe\fR, \fB\-\-bin\fR, \fB\-b\fR: Specify that Bundler should create a binary executable (as \fBexe/GEM_NAME\fR) in the generated rubygem project\. This binary will also be added to the \fBGEM_NAME\.gemspec\fR manifest\. This behavior is disabled by default\. -.IP "\(bu" 4 -\fB\-\-no\-exe\fR: Do not create a binary (overrides \fB\-\-exe\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-coc\fR: Add a \fBCODE_OF_CONDUCT\.md\fR file to the root of the generated project\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-coc\fR: Do not create a \fBCODE_OF_CONDUCT\.md\fR (overrides \fB\-\-coc\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-changelog\fR Add a \fBCHANGELOG\.md\fR file to the root of the generated project\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-changelog\fR: Do not create a \fBCHANGELOG\.md\fR (overrides \fB\-\-changelog\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-ext=c\fR, \fB\-\-ext=rust\fR: Add boilerplate for C or Rust (currently magnus \fIhttps://docs\.rs/magnus\fR based) extension code to the generated project\. This behavior is disabled by default\. -.IP "\(bu" 4 -\fB\-\-no\-ext\fR: Do not add extension code (overrides \fB\-\-ext\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-git\fR: Initialize a git repo inside your library\. -.IP "\(bu" 4 -\fB\-\-github\-username=GITHUB_USERNAME\fR: Fill in GitHub username on README so that you don't have to do it manually\. Set a default with \fBbundle config set \-\-global gem\.github_username <your_username>\fR\. -.IP "\(bu" 4 -\fB\-\-mit\fR: Add an MIT license to a \fBLICENSE\.txt\fR file in the root of the generated project\. Your name from the global git config is used for the copyright statement\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-mit\fR: Do not create a \fBLICENSE\.txt\fR (overrides \fB\-\-mit\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-t\fR, \fB\-\-test=minitest\fR, \fB\-\-test=rspec\fR, \fB\-\-test=test\-unit\fR: Specify the test framework that Bundler should use when generating the project\. Acceptable values are \fBminitest\fR, \fBrspec\fR and \fBtest\-unit\fR\. The \fBGEM_NAME\.gemspec\fR will be configured and a skeleton test/spec directory will be created based on this option\. Given no option is specified: +.TP +\fB\-\-exe\fR, \fB\-\-bin\fR, \fB\-b\fR +Specify that Bundler should create a binary executable (as \fBexe/GEM_NAME\fR) in the generated rubygem project\. This binary will also be added to the \fBGEM_NAME\.gemspec\fR manifest\. This behavior is disabled by default\. +.TP +\fB\-\-no\-exe\fR +Do not create a binary (overrides \fB\-\-exe\fR specified in the global config)\. +.TP +\fB\-\-coc\fR +Add a \fBCODE_OF_CONDUCT\.md\fR file to the root of the generated project\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. +.TP +\fB\-\-no\-coc\fR +Do not create a \fBCODE_OF_CONDUCT\.md\fR (overrides \fB\-\-coc\fR specified in the global config)\. +.TP +\fB\-\-changelog\fR +Add a \fBCHANGELOG\.md\fR file to the root of the generated project\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. Update the default with \fBbundle config set \-\-global gem\.changelog <true|false>\fR\. +.TP +\fB\-\-no\-changelog\fR +Do not create a \fBCHANGELOG\.md\fR (overrides \fB\-\-changelog\fR specified in the global config)\. +.TP +\fB\-\-ext=c\fR, \fB\-\-ext=rust\fR +Add boilerplate for C or Rust (currently magnus \fIhttps://docs\.rs/magnus\fR based) extension code to the generated project\. This behavior is disabled by default\. +.TP +\fB\-\-no\-ext\fR +Do not add extension code (overrides \fB\-\-ext\fR specified in the global config)\. +.TP +\fB\-\-git\fR +Initialize a git repo inside your library\. +.TP +\fB\-\-github\-username=GITHUB_USERNAME\fR +Fill in GitHub username on README so that you don't have to do it manually\. Set a default with \fBbundle config set \-\-global gem\.github_username <your_username>\fR\. +.TP +\fB\-\-mit\fR +Add an MIT license to a \fBLICENSE\.txt\fR file in the root of the generated project\. Your name from the global git config is used for the copyright statement\. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. +.TP +\fB\-\-no\-mit\fR +Do not create a \fBLICENSE\.txt\fR (overrides \fB\-\-mit\fR specified in the global config)\. +.TP +\fB\-t\fR, \fB\-\-test=minitest\fR, \fB\-\-test=rspec\fR, \fB\-\-test=test\-unit\fR +Specify the test framework that Bundler should use when generating the project\. Acceptable values are \fBminitest\fR, \fBrspec\fR and \fBtest\-unit\fR\. The \fBGEM_NAME\.gemspec\fR will be configured and a skeleton test/spec directory will be created based on this option\. Given no option is specified: .IP When Bundler is configured to generate tests, this defaults to Bundler's global config setting \fBgem\.test\fR\. .IP When Bundler is configured to not generate tests, an interactive prompt will be displayed and the answer will be used for the current rubygem project\. .IP When Bundler is unconfigured, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-test\fR: Do not use a test framework (overrides \fB\-\-test\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-changelog\fR: Generate changelog file\. Set a default with \fBbundle config set \-\-global gem\.changelog true\fR\. -.IP "\(bu" 4 -\fB\-\-ci\fR, \fB\-\-ci=circle\fR, \fB\-\-ci=github\fR, \fB\-\-ci=gitlab\fR: Specify the continuous integration service that Bundler should use when generating the project\. Acceptable values are \fBgithub\fR, \fBgitlab\fR and \fBcircle\fR\. A configuration file will be generated in the project directory\. Given no option is specified: +.TP +\fB\-\-no\-test\fR +Do not use a test framework (overrides \fB\-\-test\fR specified in the global config)\. +.TP +\fB\-\-ci\fR, \fB\-\-ci=circle\fR, \fB\-\-ci=github\fR, \fB\-\-ci=gitlab\fR +Specify the continuous integration service that Bundler should use when generating the project\. Acceptable values are \fBgithub\fR, \fBgitlab\fR and \fBcircle\fR\. A configuration file will be generated in the project directory\. Given no option is specified: .IP When Bundler is configured to generate CI files, this defaults to Bundler's global config setting \fBgem\.ci\fR\. .IP When Bundler is configured to not generate CI files, an interactive prompt will be displayed and the answer will be used for the current rubygem project\. .IP When Bundler is unconfigured, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-ci\fR: Do not use a continuous integration service (overrides \fB\-\-ci\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-linter\fR, \fB\-\-linter=rubocop\fR, \fB\-\-linter=standard\fR: Specify the linter and code formatter that Bundler should add to the project's development dependencies\. Acceptable values are \fBrubocop\fR and \fBstandard\fR\. A configuration file will be generated in the project directory\. Given no option is specified: +.TP +\fB\-\-no\-ci\fR +Do not use a continuous integration service (overrides \fB\-\-ci\fR specified in the global config)\. +.TP +\fB\-\-linter\fR, \fB\-\-linter=rubocop\fR, \fB\-\-linter=standard\fR +Specify the linter and code formatter that Bundler should add to the project's development dependencies\. Acceptable values are \fBrubocop\fR and \fBstandard\fR\. A configuration file will be generated in the project directory\. Given no option is specified: .IP When Bundler is configured to add a linter, this defaults to Bundler's global config setting \fBgem\.linter\fR\. .IP When Bundler is configured not to add a linter, an interactive prompt will be displayed and the answer will be used for the current rubygem project\. .IP When Bundler is unconfigured, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future \fBbundle gem\fR use\. -.IP "\(bu" 4 -\fB\-\-no\-linter\fR: Do not add a linter (overrides \fB\-\-linter\fR specified in the global config)\. -.IP "\(bu" 4 -\fB\-\-rubocop\fR: Add rubocop to the generated Rakefile and gemspec\. Set a default with \fBbundle config set \-\-global gem\.rubocop true\fR\. -.IP "\(bu" 4 -\fB\-\-edit=EDIT\fR, \fB\-e=EDIT\fR: Open the resulting GEM_NAME\.gemspec in EDIT, or the default editor if not specified\. The default is \fB$BUNDLER_EDITOR\fR, \fB$VISUAL\fR, or \fB$EDITOR\fR\. -.IP "" 0 +.TP +\fB\-\-no\-linter\fR +Do not add a linter (overrides \fB\-\-linter\fR specified in the global config)\. +.TP +\fB\-\-rubocop\fR +Add rubocop to the generated Rakefile and gemspec\. Set a default with \fBbundle config set \-\-global gem\.rubocop true\fR\. +.TP +\fB\-\-edit=EDIT\fR, \fB\-e=EDIT\fR +Open the resulting GEM_NAME\.gemspec in EDIT, or the default editor if not specified\. The default is \fB$BUNDLER_EDITOR\fR, \fB$VISUAL\fR, or \fB$EDITOR\fR\. .SH "SEE ALSO" .IP "\(bu" 4 bundle config(1) \fIbundle\-config\.1\.html\fR diff --git a/lib/bundler/man/bundle-gem.1.ronn b/lib/bundler/man/bundle-gem.1.ronn index 13dc55c310..049e0072aa 100644 --- a/lib/bundler/man/bundle-gem.1.ronn +++ b/lib/bundler/man/bundle-gem.1.ronn @@ -41,10 +41,11 @@ configuration file using the following names: Do not create a `CODE_OF_CONDUCT.md` (overrides `--coc` specified in the global config). -* `--changelog` +* `--changelog`: Add a `CHANGELOG.md` file to the root of the generated project. If this option is unspecified, an interactive prompt will be displayed and the answer will be saved in Bundler's global config for future `bundle gem` use. + Update the default with `bundle config set --global gem.changelog <true|false>`. * `--no-changelog`: Do not create a `CHANGELOG.md` (overrides `--changelog` specified in the @@ -95,9 +96,6 @@ configuration file using the following names: Do not use a test framework (overrides `--test` specified in the global config). -* `--changelog`: - Generate changelog file. Set a default with `bundle config set --global gem.changelog true`. - * `--ci`, `--ci=circle`, `--ci=github`, `--ci=gitlab`: Specify the continuous integration service that Bundler should use when generating the project. Acceptable values are `github`, `gitlab` diff --git a/lib/bundler/man/bundle-help.1 b/lib/bundler/man/bundle-help.1 index 1af5a663d8..f24b050d37 100644 --- a/lib/bundler/man/bundle-help.1 +++ b/lib/bundler/man/bundle-help.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-HELP" "1" "May 2025" "" +.TH "BUNDLE\-HELP" "1" "June 2025" "" .SH "NAME" \fBbundle\-help\fR \- Displays detailed help for each subcommand .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-info.1 b/lib/bundler/man/bundle-info.1 index 30ab4cbeb4..82f39ebd0c 100644 --- a/lib/bundler/man/bundle-info.1 +++ b/lib/bundler/man/bundle-info.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-INFO" "1" "May 2025" "" +.TH "BUNDLE\-INFO" "1" "June 2025" "" .SH "NAME" \fBbundle\-info\fR \- Show information for the given gem in your bundle .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-init.1 b/lib/bundler/man/bundle-init.1 index 876c1f65a2..4571e09718 100644 --- a/lib/bundler/man/bundle-init.1 +++ b/lib/bundler/man/bundle-init.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-INIT" "1" "May 2025" "" +.TH "BUNDLE\-INIT" "1" "June 2025" "" .SH "NAME" \fBbundle\-init\fR \- Generates a Gemfile into the current working directory .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-inject.1 b/lib/bundler/man/bundle-inject.1 index 1433e7105d..acdf22a909 100644 --- a/lib/bundler/man/bundle-inject.1 +++ b/lib/bundler/man/bundle-inject.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-INJECT" "1" "May 2025" "" +.TH "BUNDLE\-INJECT" "1" "June 2025" "" .SH "NAME" \fBbundle\-inject\fR \- Add named gem(s) with version requirements to Gemfile .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-install.1 b/lib/bundler/man/bundle-install.1 index 4cd21c34cb..67a8df96fe 100644 --- a/lib/bundler/man/bundle-install.1 +++ b/lib/bundler/man/bundle-install.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-INSTALL" "1" "May 2025" "" +.TH "BUNDLE\-INSTALL" "1" "June 2025" "" .SH "NAME" \fBbundle\-install\fR \- Install the dependencies specified in your Gemfile .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-issue.1 b/lib/bundler/man/bundle-issue.1 index ee8bcc2749..62973e9892 100644 --- a/lib/bundler/man/bundle-issue.1 +++ b/lib/bundler/man/bundle-issue.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-ISSUE" "1" "May 2025" "" +.TH "BUNDLE\-ISSUE" "1" "June 2025" "" .SH "NAME" \fBbundle\-issue\fR \- Get help reporting Bundler issues .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-licenses.1 b/lib/bundler/man/bundle-licenses.1 index 4fd952e887..75e2b93d35 100644 --- a/lib/bundler/man/bundle-licenses.1 +++ b/lib/bundler/man/bundle-licenses.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-LICENSES" "1" "May 2025" "" +.TH "BUNDLE\-LICENSES" "1" "June 2025" "" .SH "NAME" \fBbundle\-licenses\fR \- Print the license of all gems in the bundle .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-list.1 b/lib/bundler/man/bundle-list.1 index cd6234797c..ed4e09e48e 100644 --- a/lib/bundler/man/bundle-list.1 +++ b/lib/bundler/man/bundle-list.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-LIST" "1" "May 2025" "" +.TH "BUNDLE\-LIST" "1" "June 2025" "" .SH "NAME" \fBbundle\-list\fR \- List all the gems in the bundle .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-lock.1 b/lib/bundler/man/bundle-lock.1 index c76c3e4233..0d78414aa4 100644 --- a/lib/bundler/man/bundle-lock.1 +++ b/lib/bundler/man/bundle-lock.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-LOCK" "1" "May 2025" "" +.TH "BUNDLE\-LOCK" "1" "June 2025" "" .SH "NAME" \fBbundle\-lock\fR \- Creates / Updates a lockfile without installing .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-open.1 b/lib/bundler/man/bundle-open.1 index 0e283e577f..b3016a5bbd 100644 --- a/lib/bundler/man/bundle-open.1 +++ b/lib/bundler/man/bundle-open.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-OPEN" "1" "May 2025" "" +.TH "BUNDLE\-OPEN" "1" "June 2025" "" .SH "NAME" \fBbundle\-open\fR \- Opens the source directory for a gem in your bundle .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-outdated.1 b/lib/bundler/man/bundle-outdated.1 index 616c1201ef..f98038ce69 100644 --- a/lib/bundler/man/bundle-outdated.1 +++ b/lib/bundler/man/bundle-outdated.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-OUTDATED" "1" "May 2025" "" +.TH "BUNDLE\-OUTDATED" "1" "June 2025" "" .SH "NAME" \fBbundle\-outdated\fR \- List installed gems with newer versions available .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-platform.1 b/lib/bundler/man/bundle-platform.1 index 47fdbf89d9..e9c40b8556 100644 --- a/lib/bundler/man/bundle-platform.1 +++ b/lib/bundler/man/bundle-platform.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-PLATFORM" "1" "May 2025" "" +.TH "BUNDLE\-PLATFORM" "1" "June 2025" "" .SH "NAME" \fBbundle\-platform\fR \- Displays platform compatibility information .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-plugin.1 b/lib/bundler/man/bundle-plugin.1 index e7650760f4..c1f95b05c6 100644 --- a/lib/bundler/man/bundle-plugin.1 +++ b/lib/bundler/man/bundle-plugin.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-PLUGIN" "1" "May 2025" "" +.TH "BUNDLE\-PLUGIN" "1" "June 2025" "" .SH "NAME" \fBbundle\-plugin\fR \- Manage Bundler plugins .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-pristine.1 b/lib/bundler/man/bundle-pristine.1 index e9df372482..84a02dfd47 100644 --- a/lib/bundler/man/bundle-pristine.1 +++ b/lib/bundler/man/bundle-pristine.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-PRISTINE" "1" "May 2025" "" +.TH "BUNDLE\-PRISTINE" "1" "June 2025" "" .SH "NAME" \fBbundle\-pristine\fR \- Restores installed gems to their pristine condition .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-remove.1 b/lib/bundler/man/bundle-remove.1 index c57aeb5898..00d9cf4319 100644 --- a/lib/bundler/man/bundle-remove.1 +++ b/lib/bundler/man/bundle-remove.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-REMOVE" "1" "May 2025" "" +.TH "BUNDLE\-REMOVE" "1" "June 2025" "" .SH "NAME" \fBbundle\-remove\fR \- Removes gems from the Gemfile .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-show.1 b/lib/bundler/man/bundle-show.1 index bba79d064e..d556c738f6 100644 --- a/lib/bundler/man/bundle-show.1 +++ b/lib/bundler/man/bundle-show.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-SHOW" "1" "May 2025" "" +.TH "BUNDLE\-SHOW" "1" "June 2025" "" .SH "NAME" \fBbundle\-show\fR \- Shows all the gems in your bundle, or the path to a gem .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-update.1 b/lib/bundler/man/bundle-update.1 index c76ed74d57..080d9b889f 100644 --- a/lib/bundler/man/bundle-update.1 +++ b/lib/bundler/man/bundle-update.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-UPDATE" "1" "May 2025" "" +.TH "BUNDLE\-UPDATE" "1" "June 2025" "" .SH "NAME" \fBbundle\-update\fR \- Update your gems to the latest available versions .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-version.1 b/lib/bundler/man/bundle-version.1 index 522a87383d..e3ccd023b6 100644 --- a/lib/bundler/man/bundle-version.1 +++ b/lib/bundler/man/bundle-version.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-VERSION" "1" "May 2025" "" +.TH "BUNDLE\-VERSION" "1" "June 2025" "" .SH "NAME" \fBbundle\-version\fR \- Prints Bundler version information .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle-viz.1 b/lib/bundler/man/bundle-viz.1 index 5bb8c336a1..34a2cf1fff 100644 --- a/lib/bundler/man/bundle-viz.1 +++ b/lib/bundler/man/bundle-viz.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE\-VIZ" "1" "May 2025" "" +.TH "BUNDLE\-VIZ" "1" "June 2025" "" .SH "NAME" \fBbundle\-viz\fR \- Generates a visual dependency graph for your Gemfile .SH "SYNOPSIS" diff --git a/lib/bundler/man/bundle.1 b/lib/bundler/man/bundle.1 index f87886cfcb..5c42b06547 100644 --- a/lib/bundler/man/bundle.1 +++ b/lib/bundler/man/bundle.1 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "BUNDLE" "1" "May 2025" "" +.TH "BUNDLE" "1" "June 2025" "" .SH "NAME" \fBbundle\fR \- Ruby Dependency Management .SH "SYNOPSIS" diff --git a/lib/bundler/man/gemfile.5 b/lib/bundler/man/gemfile.5 index e1d433e924..8262ee0afc 100644 --- a/lib/bundler/man/gemfile.5 +++ b/lib/bundler/man/gemfile.5 @@ -1,6 +1,6 @@ .\" generated with Ronn-NG/v0.10.1 .\" http://github.com/apjanke/ronn-ng/tree/0.10.1 -.TH "GEMFILE" "5" "May 2025" "" +.TH "GEMFILE" "5" "June 2025" "" .SH "NAME" \fBGemfile\fR \- A format for describing gem dependencies for Ruby programs .SH "SYNOPSIS" diff --git a/lib/net/http/generic_request.rb b/lib/net/http/generic_request.rb index 44e329a0c8..c92004e557 100644 --- a/lib/net/http/generic_request.rb +++ b/lib/net/http/generic_request.rb @@ -102,6 +102,31 @@ class Net::HTTPGenericRequest "\#<#{self.class} #{@method}>" end + # Returns a string representation of the request with the details for pp: + # + # require 'pp' + # post = Net::HTTP::Post.new(uri) + # post.inspect # => "#<Net::HTTP::Post POST>" + # post.pretty_inspect + # # => #<Net::HTTP::Post + # POST + # path="/" + # headers={"accept-encoding" => ["gzip;q=1.0,deflate;q=0.6,identity;q=0.3"], + # "accept" => ["*/*"], + # "user-agent" => ["Ruby"], + # "host" => ["www.ruby-lang.org"]}> + # + def pretty_print(q) + q.object_group(self) { + q.breakable + q.text @method + q.breakable + q.text "path="; q.pp @path + q.breakable + q.text "headers="; q.pp to_hash + } + end + ## # Don't automatically decode response content-encoding if the user indicates # they want to handle it. @@ -260,7 +285,6 @@ class Net::HTTPGenericRequest def send_request_with_body(sock, ver, path, body) self.content_length = body.bytesize delete 'Transfer-Encoding' - supply_default_content_type write_header sock, ver, path wait_for_continue sock, ver if sock.continue_timeout sock.write body @@ -271,7 +295,6 @@ class Net::HTTPGenericRequest raise ArgumentError, "Content-Length not given and Transfer-Encoding is not `chunked'" end - supply_default_content_type write_header sock, ver, path wait_for_continue sock, ver if sock.continue_timeout if chunked? @@ -373,12 +396,6 @@ class Net::HTTPGenericRequest buf.clear end - def supply_default_content_type - return if content_type() - warn 'net/http: Content-Type did not set; using application/x-www-form-urlencoded', uplevel: 1 if $VERBOSE - set_content_type 'application/x-www-form-urlencoded' - end - ## # Waits up to the continue timeout for a response from the server provided # we're speaking HTTP 1.1 and are expecting a 100-continue response. @@ -411,4 +428,3 @@ class Net::HTTPGenericRequest end end - diff --git a/lib/prism/polyfill/scan_byte.rb b/lib/prism/polyfill/scan_byte.rb new file mode 100644 index 0000000000..2def4572c4 --- /dev/null +++ b/lib/prism/polyfill/scan_byte.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +require "strscan" + +# Polyfill for StringScanner#scan_byte, which didn't exist until Ruby 3.4. +if !(StringScanner.instance_methods.include?(:scan_byte)) + StringScanner.include( + Module.new { + def scan_byte # :nodoc: + get_byte&.b&.ord + end + } + ) +end diff --git a/lib/prism/prism.gemspec b/lib/prism/prism.gemspec index 5cb5a98057..4daa511300 100644 --- a/lib/prism/prism.gemspec +++ b/lib/prism/prism.gemspec @@ -88,6 +88,7 @@ Gem::Specification.new do |spec| "lib/prism/pattern.rb", "lib/prism/polyfill/append_as_bytes.rb", "lib/prism/polyfill/byteindex.rb", + "lib/prism/polyfill/scan_byte.rb", "lib/prism/polyfill/unpack1.rb", "lib/prism/polyfill/warn.rb", "lib/prism/reflection.rb", diff --git a/lib/prism/translation/parser/lexer.rb b/lib/prism/translation/parser/lexer.rb index 349a0b257f..22ca3b6321 100644 --- a/lib/prism/translation/parser/lexer.rb +++ b/lib/prism/translation/parser/lexer.rb @@ -3,6 +3,7 @@ require "strscan" require_relative "../../polyfill/append_as_bytes" +require_relative "../../polyfill/scan_byte" module Prism module Translation @@ -762,12 +763,12 @@ module Prism elsif (value = scanner.scan(/M-\\?(?=[[:print:]])/)) # \M-x where x is an ASCII printable character escape_read(result, scanner, control, true) - elsif (byte = scanner.get_byte) + elsif (byte = scanner.scan_byte) # Something else after an escape. - if control && byte == "?" + if control && byte == 0x3f # ASCII '?' result.append_as_bytes(escape_build(0x7f, false, meta)) else - result.append_as_bytes(escape_build(byte.ord, control, meta)) + result.append_as_bytes(escape_build(byte, control, meta)) end end end diff --git a/lib/rubygems/commands/pristine_command.rb b/lib/rubygems/commands/pristine_command.rb index 97f1646ba0..93503d2b69 100644 --- a/lib/rubygems/commands/pristine_command.rb +++ b/lib/rubygems/commands/pristine_command.rb @@ -137,11 +137,14 @@ extensions will be restored. specs.group_by(&:full_name_with_location).values.each do |grouped_specs| spec = grouped_specs.find {|s| !s.default_gem? } || grouped_specs.first - unless only_executables_or_plugins? + only_executables = options[:only_executables] + only_plugins = options[:only_plugins] + + unless only_executables || only_plugins # Default gemspecs include changes provided by ruby-core installer that # can't currently be pristined (inclusion of compiled extension targets in # the file list). So stick to resetting executables if it's a default gem. - options[:only_executables] = true if spec.default_gem? + only_executables = true if spec.default_gem? end if options.key? :skip @@ -151,14 +154,14 @@ extensions will be restored. end end - unless spec.extensions.empty? || options[:extensions] || only_executables_or_plugins? + unless spec.extensions.empty? || options[:extensions] || only_executables || only_plugins say "Skipped #{spec.full_name_with_location}, it needs to compile an extension" next end gem = spec.cache_file - unless File.exist?(gem) || only_executables_or_plugins? + unless File.exist?(gem) || only_executables || only_plugins require_relative "../remote_fetcher" say "Cached gem for #{spec.full_name_with_location} not found, attempting to fetch..." @@ -194,10 +197,10 @@ extensions will be restored. bin_dir: bin_dir, } - if options[:only_executables] + if only_executables installer = Gem::Installer.for_spec(spec, installer_options) installer.generate_bin - elsif options[:only_plugins] + elsif only_plugins installer = Gem::Installer.for_spec(spec, installer_options) installer.generate_plugins else @@ -208,10 +211,4 @@ extensions will be restored. say "Restored #{spec.full_name_with_location}" end end - - private - - def only_executables_or_plugins? - options[:only_executables] || options[:only_plugins] - end end diff --git a/lib/rubygems/ext/cargo_builder.rb b/lib/rubygems/ext/cargo_builder.rb index 03024a640e..21b50f394d 100644 --- a/lib/rubygems/ext/cargo_builder.rb +++ b/lib/rubygems/ext/cargo_builder.rb @@ -158,6 +158,10 @@ class Gem::Ext::CargoBuilder < Gem::Ext::Builder # mkmf work properly. def linker_args cc_flag = self.class.shellsplit(makefile_config("CC")) + # Avoid to ccache like tool from Rust build + # see https://github.com/rubygems/rubygems/pull/8521#issuecomment-2689854359 + # ex. CC="ccache gcc" or CC="sccache clang --any --args" + cc_flag.shift if cc_flag.size >= 2 && !cc_flag[1].start_with?("-") linker = cc_flag.shift link_args = cc_flag.flat_map {|a| ["-C", "link-arg=#{a}"] } diff --git a/lib/rubygems/resolver.rb b/lib/rubygems/resolver.rb index 35d83abd2d..9bf5f80930 100644 --- a/lib/rubygems/resolver.rb +++ b/lib/rubygems/resolver.rb @@ -241,7 +241,7 @@ class Gem::Resolver sources.each do |source| groups[source]. - sort_by {|spec| [spec.version, spec.platform =~ Gem::Platform.local ? 1 : 0] }. # rubocop:disable Performance/RegexpMatch + sort_by {|spec| [spec.version, -Gem::Platform.platform_specificity_match(spec.platform, Gem::Platform.local)] }. map {|spec| ActivationRequest.new spec, dependency }. each {|activation_request| activation_requests << activation_request } end diff --git a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA_R3.pem b/lib/rubygems/ssl_certs/rubygems.org/GlobalSign.pem index 8afb219058..8afb219058 100644 --- a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA_R3.pem +++ b/lib/rubygems/ssl_certs/rubygems.org/GlobalSign.pem diff --git a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem b/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem deleted file mode 100644 index f4ce4ca43d..0000000000 --- a/lib/rubygems/ssl_certs/rubygems.org/GlobalSignRootCA.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG -A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv -b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw -MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i -YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT -aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ -jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp -xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp -1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG -snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ -U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 -9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E -BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B -AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz -yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE -38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP -AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad -DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME -HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== ------END CERTIFICATE----- diff --git a/misc/tsan_suppressions.txt b/misc/tsan_suppressions.txt index 18abf90571..e46f133a9e 100644 --- a/misc/tsan_suppressions.txt +++ b/misc/tsan_suppressions.txt @@ -65,6 +65,14 @@ race_top:rb_ractor_set_current_ec_ # Possible deadlock between Ractor lock and UBF lock deadlock:ractor_sleep_interrupt +# TSan reports a lock-order-inversion between thread_sched_lock_ and this lock. +# It's unclear if that can cause a deadlock since the lock is on self +deadlock:ractor_lock_self + +# TSan reports a deadlock when reacquiring the this lock after a barrier, but +# we know the other threads have been stopped +deadlock:rb_ractor_sched_barrier_start + # RVALUE_AGE_SET manipulates flag bits on objects which may be accessed in Ractors race_top:RVALUE_AGE_SET @@ -87,6 +95,10 @@ race:gccct_method_search race:rb_ec_finalize race:rb_ec_cleanup +# TSan doesn't work well post-fork, this raises errors when creating the new +# timer thread +race:after_fork_ruby + # object_id races race:object_id diff --git a/namespace.c b/namespace.c index 44afdd8f21..af7fb4459c 100644 --- a/namespace.c +++ b/namespace.c @@ -450,9 +450,6 @@ namespace_initialize(VALUE namespace) // If a code in the namespace adds a constant, the constant will be visible even from root/main. RCLASS_SET_PRIME_CLASSEXT_WRITABLE(namespace, true); - // fallback to ivptr for ivars from shapes to manipulate the constant table - rb_evict_ivars_to_hash(namespace); - // Get a clean constant table of Object even by writable one // because ns was just created, so it has not touched any constants yet. object_classext = RCLASS_EXT_WRITABLE_IN_NS(rb_cObject, ns); diff --git a/nilclass.rb b/nilclass.rb index 5a2e19680d..acd5666c71 100644 --- a/nilclass.rb +++ b/nilclass.rb @@ -1,6 +1,32 @@ class NilClass # # call-seq: + # rationalize(eps = nil) -> (0/1) + # + # Returns zero as a Rational: + # + # nil.rationalize # => (0/1) + # + # Argument +eps+ is ignored. + # + def rationalize(eps = nil) + 0r + end + + # + # call-seq: + # to_c -> (0+0i) + # + # Returns zero as a Complex: + # + # nil.to_c # => (0+0i) + # + def to_c + 0i + end + + # + # call-seq: # nil.to_i -> 0 # # Always returns zero. @@ -22,4 +48,16 @@ class NilClass def to_f return 0.0 end + + # + # call-seq: + # to_r -> (0/1) + # + # Returns zero as a Rational: + # + # nil.to_r # => (0/1) + # + def to_r + 0r + end end @@ -83,6 +83,7 @@ static VALUE rb_cFalseClass_to_s; #define id_init_dup idInitialize_dup #define id_const_missing idConst_missing #define id_to_f idTo_f +static ID id_instance_variables_to_inspect; #define CLASS_OR_MODULE_P(obj) \ (!SPECIAL_CONST_P(obj) && \ @@ -339,7 +340,7 @@ rb_obj_copy_ivar(VALUE dest, VALUE obj) shape_id_t dest_shape_id = src_shape_id; shape_id_t initial_shape_id = RBASIC_SHAPE_ID(dest); - RUBY_ASSERT(RSHAPE(initial_shape_id)->type == SHAPE_ROOT); + RUBY_ASSERT(RSHAPE_TYPE_P(initial_shape_id, SHAPE_ROOT)); dest_shape_id = rb_shape_rebuild(initial_shape_id, src_shape_id); if (UNLIKELY(rb_shape_too_complex_p(dest_shape_id))) { @@ -733,11 +734,17 @@ rb_inspect(VALUE obj) static int inspect_i(ID id, VALUE value, st_data_t a) { - VALUE str = (VALUE)a; + VALUE *args = (VALUE *)a, str = args[0], ivars = args[1]; /* need not to show internal data */ if (CLASS_OF(value) == 0) return ST_CONTINUE; if (!rb_is_instance_id(id)) return ST_CONTINUE; + if (!NIL_P(ivars)) { + VALUE name = ID2SYM(id); + for (long i = 0; RARRAY_AREF(ivars, i) != name; ) { + if (++i >= RARRAY_LEN(ivars)) return ST_CONTINUE; + } + } if (RSTRING_PTR(str)[0] == '-') { /* first element */ RSTRING_PTR(str)[0] = '#'; rb_str_cat2(str, " "); @@ -752,13 +759,15 @@ inspect_i(ID id, VALUE value, st_data_t a) } static VALUE -inspect_obj(VALUE obj, VALUE str, int recur) +inspect_obj(VALUE obj, VALUE a, int recur) { + VALUE *args = (VALUE *)a, str = args[0]; + if (recur) { rb_str_cat2(str, " ..."); } else { - rb_ivar_foreach(obj, inspect_i, str); + rb_ivar_foreach(obj, inspect_i, a); } rb_str_cat2(str, ">"); RSTRING_PTR(str)[0] = '#'; @@ -791,17 +800,47 @@ inspect_obj(VALUE obj, VALUE str, int recur) * end * end * Bar.new.inspect #=> "#<Bar:0x0300c868 @bar=1>" + * + * If _obj_ responds to +instance_variables_to_inspect+, then only + * the instance variables listed in the returned array will be included + * in the inspect string. + * + * + * class DatabaseConfig + * def initialize(host, user, password) + * @host = host + * @user = user + * @password = password + * end + * + * private + * def instance_variables_to_inspect = [:@host, :@user] + * end + * + * conf = DatabaseConfig.new("localhost", "root", "hunter2") + * conf.inspect #=> #<DatabaseConfig:0x0000000104def350 @host="localhost", @user="root"> */ static VALUE rb_obj_inspect(VALUE obj) { - if (rb_ivar_count(obj) > 0) { - VALUE str; + VALUE ivars = rb_check_funcall(obj, id_instance_variables_to_inspect, 0, 0); + st_index_t n = 0; + if (UNDEF_P(ivars)) { + n = rb_ivar_count(obj); + ivars = Qnil; + } + else if (!NIL_P(ivars)) { + Check_Type(ivars, T_ARRAY); + n = RARRAY_LEN(ivars); + } + if (n > 0) { VALUE c = rb_class_name(CLASS_OF(obj)); - - str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void*)obj); - return rb_exec_recursive(inspect_obj, obj, str); + VALUE args[2] = { + rb_sprintf("-<%"PRIsVALUE":%p", c, (void*)obj), + ivars + }; + return rb_exec_recursive(inspect_obj, obj, (VALUE)args); } else { return rb_any_to_s(obj); @@ -4600,6 +4639,7 @@ void Init_Object(void) { id_dig = rb_intern_const("dig"); + id_instance_variables_to_inspect = rb_intern_const("instance_variables_to_inspect"); InitVM(Object); } diff --git a/prism/templates/lib/prism/visitor.rb.erb b/prism/templates/lib/prism/visitor.rb.erb index a1eac38dc4..b1a03c3f1a 100644 --- a/prism/templates/lib/prism/visitor.rb.erb +++ b/prism/templates/lib/prism/visitor.rb.erb @@ -34,7 +34,7 @@ module Prism # # class FooCalls < Prism::Visitor # def visit_call_node(node) - # if node.name == "foo" + # if node.name == :foo # # Do something with the node # end # diff --git a/prism_compile.c b/prism_compile.c index c71c1429b2..2ae6c1db9e 100644 --- a/prism_compile.c +++ b/prism_compile.c @@ -5164,6 +5164,20 @@ pm_compile_target_node(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *cons break; } + case PM_SPLAT_NODE: { + // Splat nodes capture all values into an array. They can be used + // as targets in assignments or for loops. + // + // for *x in []; end + // + const pm_splat_node_t *cast = (const pm_splat_node_t *) node; + + if (cast->expression != NULL) { + pm_compile_target_node(iseq, cast->expression, parents, writes, cleanup, scope_node, state); + } + + break; + } default: rb_bug("Unexpected node type: %s", pm_node_type_to_str(PM_NODE_TYPE(node))); break; @@ -5277,7 +5291,8 @@ pm_compile_for_node_index(rb_iseq_t *iseq, const pm_node_t *node, LINK_ANCHOR *c case PM_INSTANCE_VARIABLE_TARGET_NODE: case PM_CONSTANT_PATH_TARGET_NODE: case PM_CALL_TARGET_NODE: - case PM_INDEX_TARGET_NODE: { + case PM_INDEX_TARGET_NODE: + case PM_SPLAT_NODE: { // For other targets, we need to potentially compile the parent or // owning expression of this target, then retrieve the value, expand it, // and then compile the necessary writes. @@ -1658,10 +1658,9 @@ obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data) if (d.stop) return 1; } else { - for (uint32_t i = 0; i < fields_tbl->as.shape.fields_count; i++) { - if (!UNDEF_P(fields_tbl->as.shape.fields[i])) { - CHECK_AND_REPLACE(fields_tbl->as.shape.fields[i]); - } + uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj)); + for (uint32_t i = 0; i < fields_count; i++) { + CHECK_AND_REPLACE(fields_tbl->as.shape.fields[i]); } } } @@ -2242,6 +2241,28 @@ struct cross_ractor_require { ID name; }; +static void +cross_ractor_require_mark(void *ptr) +{ + struct cross_ractor_require *crr = (struct cross_ractor_require *)ptr; + rb_gc_mark(crr->port); + rb_gc_mark(crr->result); + rb_gc_mark(crr->exception); + rb_gc_mark(crr->feature); + rb_gc_mark(crr->module); +} + +static const rb_data_type_t cross_ractor_require_data_type = { + "ractor/cross_ractor_require", + { + cross_ractor_require_mark, + RUBY_DEFAULT_FREE, + NULL, // memsize + NULL, // compact + }, + 0, 0, RUBY_TYPED_FREE_IMMEDIATELY +}; + static VALUE require_body(VALUE data) { @@ -2288,8 +2309,11 @@ require_result_copy_resuce(VALUE data, VALUE errinfo) } static VALUE -ractor_require_protect(struct cross_ractor_require *crr, VALUE (*func)(VALUE)) +ractor_require_protect(VALUE crr_obj, VALUE (*func)(VALUE)) { + struct cross_ractor_require *crr; + TypedData_Get_Struct(crr_obj, struct cross_ractor_require, &cross_ractor_require_data_type, crr); + // catch any error rb_rescue2(func, (VALUE)crr, require_rescue, (VALUE)crr, rb_eException, 0); @@ -2298,43 +2322,49 @@ ractor_require_protect(struct cross_ractor_require *crr, VALUE (*func)(VALUE)) require_result_copy_resuce, (VALUE)crr, rb_eException, 0); ractor_port_send(GET_EC(), crr->port, Qtrue, Qfalse); + RB_GC_GUARD(crr_obj); return Qnil; } static VALUE -ractor_require_func(void *data) +ractor_require_func(void *crr_obj) { - struct cross_ractor_require *crr = (struct cross_ractor_require *)data; - return ractor_require_protect(crr, require_body); + return ractor_require_protect((VALUE)crr_obj, require_body); } VALUE rb_ractor_require(VALUE feature) { - // TODO: make feature shareable - struct cross_ractor_require crr = { - .feature = feature, // TODO: ractor - .port = ractor_port_new(GET_RACTOR()), - .result = Qundef, - .exception = Qundef, - }; + struct cross_ractor_require *crr; + VALUE crr_obj = TypedData_Make_Struct(0, struct cross_ractor_require, &cross_ractor_require_data_type, crr); + FL_SET_RAW(crr_obj, RUBY_FL_SHAREABLE); + + // Convert feature to proper file path and make it shareable as fstring + crr->feature = rb_fstring(FilePathValue(feature)); + crr->port = ractor_port_new(GET_RACTOR()); + crr->result = Qundef; + crr->exception = Qundef; rb_execution_context_t *ec = GET_EC(); rb_ractor_t *main_r = GET_VM()->ractor.main_ractor; - rb_ractor_interrupt_exec(main_r, ractor_require_func, &crr, 0); + rb_ractor_interrupt_exec(main_r, ractor_require_func, (void *)crr_obj, rb_interrupt_exec_flag_value_data); // wait for require done - ractor_port_receive(ec, crr.port); - ractor_port_close(ec, crr.port); + ractor_port_receive(ec, crr->port); + ractor_port_close(ec, crr->port); - if (crr.exception != Qundef) { - ractor_reset_belonging(crr.exception); - rb_exc_raise(crr.exception); + VALUE exc = crr->exception; + VALUE result = crr->result; + RB_GC_GUARD(crr_obj); + + if (exc != Qundef) { + ractor_reset_belonging(exc); + rb_exc_raise(exc); } else { - RUBY_ASSERT(crr.result != Qundef); - ractor_reset_belonging(crr.result); - return crr.result; + RUBY_ASSERT(result != Qundef); + ractor_reset_belonging(result); + return result; } } @@ -2353,36 +2383,40 @@ autoload_load_body(VALUE data) } static VALUE -ractor_autoload_load_func(void *data) +ractor_autoload_load_func(void *crr_obj) { - struct cross_ractor_require *crr = (struct cross_ractor_require *)data; - return ractor_require_protect(crr, autoload_load_body); + return ractor_require_protect((VALUE)crr_obj, autoload_load_body); } VALUE rb_ractor_autoload_load(VALUE module, ID name) { - struct cross_ractor_require crr = { - .module = module, - .name = name, - .port = ractor_port_new(GET_RACTOR()), - .result = Qundef, - .exception = Qundef, - }; + struct cross_ractor_require *crr; + VALUE crr_obj = TypedData_Make_Struct(0, struct cross_ractor_require, &cross_ractor_require_data_type, crr); + FL_SET_RAW(crr_obj, RUBY_FL_SHAREABLE); + crr->module = module; + crr->name = name; + crr->port = ractor_port_new(GET_RACTOR()); + crr->result = Qundef; + crr->exception = Qundef; rb_execution_context_t *ec = GET_EC(); rb_ractor_t *main_r = GET_VM()->ractor.main_ractor; - rb_ractor_interrupt_exec(main_r, ractor_autoload_load_func, &crr, 0); + rb_ractor_interrupt_exec(main_r, ractor_autoload_load_func, (void *)crr_obj, rb_interrupt_exec_flag_value_data); // wait for require done - ractor_port_receive(ec, crr.port); - ractor_port_close(ec, crr.port); + ractor_port_receive(ec, crr->port); + ractor_port_close(ec, crr->port); + + VALUE exc = crr->exception; + VALUE result = crr->result; + RB_GC_GUARD(crr_obj); - if (crr.exception != Qundef) { - rb_exc_raise(crr.exception); + if (exc != Qundef) { + rb_exc_raise(exc); } else { - return crr.result; + return result; } } diff --git a/ractor_sync.c b/ractor_sync.c index 0fcc293504..204c800a06 100644 --- a/ractor_sync.c +++ b/ractor_sync.c @@ -1197,6 +1197,7 @@ ractor_send_basket(rb_execution_context_t *ec, const struct ractor_port *rp, str RUBY_DEBUG_LOG("closed:%u@r%u", (unsigned int)ractor_port_id(rp), rb_ractor_id(rp->r)); if (raise_on_error) { + ractor_basket_free(b); rb_raise(rb_eRactorClosedError, "The port was already closed"); } } diff --git a/rational.c b/rational.c index f1547856b4..89e74c328d 100644 --- a/rational.c +++ b/rational.c @@ -2109,39 +2109,6 @@ rb_float_denominator(VALUE self) /* * call-seq: - * to_r -> (0/1) - * - * Returns zero as a Rational: - * - * nil.to_r # => (0/1) - * - */ -static VALUE -nilclass_to_r(VALUE self) -{ - return rb_rational_new1(INT2FIX(0)); -} - -/* - * call-seq: - * rationalize(eps = nil) -> (0/1) - * - * Returns zero as a Rational: - * - * nil.rationalize # => (0/1) - * - * Argument +eps+ is ignored. - * - */ -static VALUE -nilclass_rationalize(int argc, VALUE *argv, VALUE self) -{ - rb_check_arity(argc, 0, 1); - return nilclass_to_r(self); -} - -/* - * call-seq: * int.to_r -> rational * * Returns the value as a rational. @@ -2823,8 +2790,6 @@ Init_Rational(void) rb_define_method(rb_cFloat, "numerator", rb_float_numerator, 0); rb_define_method(rb_cFloat, "denominator", rb_float_denominator, 0); - rb_define_method(rb_cNilClass, "to_r", nilclass_to_r, 0); - rb_define_method(rb_cNilClass, "rationalize", nilclass_rationalize, -1); rb_define_method(rb_cInteger, "to_r", integer_to_r, 0); rb_define_method(rb_cInteger, "rationalize", integer_rationalize, -1); rb_define_method(rb_cFloat, "to_r", float_to_r, 0); @@ -28,6 +28,7 @@ #include "ruby/encoding.h" #include "ruby/re.h" #include "ruby/util.h" +#include "ractor_core.h" VALUE rb_eRegexpError, rb_eRegexpTimeoutError; @@ -1666,7 +1667,7 @@ rb_reg_prepare_re(VALUE re, VALUE str) RSTRING_GETMEM(unescaped, ptr, len); /* If there are no other users of this regex, then we can directly overwrite it. */ - if (RREGEXP(re)->usecnt == 0) { + if (ruby_single_main_ractor && RREGEXP(re)->usecnt == 0) { regex_t tmp_reg; r = onig_new_without_alloc(&tmp_reg, (UChar *)ptr, (UChar *)(ptr + len), reg->options, enc, @@ -3499,12 +3500,16 @@ static VALUE reg_cache; VALUE rb_reg_regcomp(VALUE str) { - if (reg_cache && RREGEXP_SRC_LEN(reg_cache) == RSTRING_LEN(str) - && ENCODING_GET(reg_cache) == ENCODING_GET(str) - && memcmp(RREGEXP_SRC_PTR(reg_cache), RSTRING_PTR(str), RSTRING_LEN(str)) == 0) - return reg_cache; + if (rb_ractor_main_p()) { + if (reg_cache && RREGEXP_SRC_LEN(reg_cache) == RSTRING_LEN(str) + && ENCODING_GET(reg_cache) == ENCODING_GET(str) + && memcmp(RREGEXP_SRC_PTR(reg_cache), RSTRING_PTR(str), RSTRING_LEN(str)) == 0) + return reg_cache; - return reg_cache = rb_reg_new_str(str, 0); + return reg_cache = rb_reg_new_str(str, 0); + } else { + return rb_reg_new_str(str, 0); + } } static st_index_t reg_hash(VALUE re); diff --git a/ruby_atomic.h b/ruby_atomic.h index f5f32191af..04c5d6d9f8 100644 --- a/ruby_atomic.h +++ b/ruby_atomic.h @@ -36,8 +36,10 @@ rbimpl_atomic_load_relaxed(volatile rb_atomic_t *ptr) } #define ATOMIC_LOAD_RELAXED(var) rbimpl_atomic_load_relaxed(&(var)) +typedef RBIMPL_ALIGNAS(8) uint64_t rbimpl_atomic_uint64_t; + static inline uint64_t -rbimpl_atomic_u64_load_relaxed(const volatile uint64_t *value) +rbimpl_atomic_u64_load_relaxed(const volatile rbimpl_atomic_uint64_t *value) { #if defined(HAVE_GCC_ATOMIC_BUILTINS_64) return __atomic_load_n(value, __ATOMIC_RELAXED); @@ -54,7 +56,7 @@ rbimpl_atomic_u64_load_relaxed(const volatile uint64_t *value) #define ATOMIC_U64_LOAD_RELAXED(var) rbimpl_atomic_u64_load_relaxed(&(var)) static inline void -rbimpl_atomic_u64_set_relaxed(volatile uint64_t *address, uint64_t value) +rbimpl_atomic_u64_set_relaxed(volatile rbimpl_atomic_uint64_t *address, uint64_t value) { #if defined(HAVE_GCC_ATOMIC_BUILTINS_64) __atomic_store_n(address, value, __ATOMIC_RELAXED); diff --git a/scheduler.c b/scheduler.c index 80c0278933..11faca01d3 100644 --- a/scheduler.c +++ b/scheduler.c @@ -1061,9 +1061,8 @@ VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void* (*functi operation->data2 = NULL; operation->unblock_function = NULL; - // If the blocking operation was never executed, return Qundef to signal - // the caller to use rb_nogvl instead - if (current_status != RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED) { + // If the blocking operation was never executed, return Qundef to signal the caller to use rb_nogvl instead + if (current_status == RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) { return Qundef; } @@ -528,6 +528,7 @@ set_i_initialize_copy(VALUE set, VALUE other) set_free_embedded(sobj); set_copy(&sobj->table, RSET_TABLE(other)); + rb_gc_writebarrier_remember(set); return set; } @@ -48,8 +48,8 @@ redblack_left(redblack_node_t *node) return LEAF; } else { - RUBY_ASSERT(node->l < GET_SHAPE_TREE()->cache_size); - redblack_node_t *left = &GET_SHAPE_TREE()->shape_cache[node->l - 1]; + RUBY_ASSERT(node->l < rb_shape_tree.cache_size); + redblack_node_t *left = &rb_shape_tree.shape_cache[node->l - 1]; return left; } } @@ -61,8 +61,8 @@ redblack_right(redblack_node_t *node) return LEAF; } else { - RUBY_ASSERT(node->r < GET_SHAPE_TREE()->cache_size); - redblack_node_t *right = &GET_SHAPE_TREE()->shape_cache[node->r - 1]; + RUBY_ASSERT(node->r < rb_shape_tree.cache_size); + redblack_node_t *right = &rb_shape_tree.shape_cache[node->r - 1]; return right; } } @@ -120,7 +120,7 @@ redblack_id_for(redblack_node_t *node) return 0; } else { - redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache; + redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache; redblack_id_t id = (redblack_id_t)(node - redblack_nodes); return id + 1; } @@ -129,7 +129,7 @@ redblack_id_for(redblack_node_t *node) static redblack_node_t * redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redblack_node_t *right) { - if (GET_SHAPE_TREE()->cache_size + 1 >= REDBLACK_CACHE_SIZE) { + if (rb_shape_tree.cache_size + 1 >= REDBLACK_CACHE_SIZE) { // We're out of cache, just quit return LEAF; } @@ -137,8 +137,8 @@ redblack_new(char color, ID key, rb_shape_t *value, redblack_node_t *left, redbl RUBY_ASSERT(left == LEAF || left->key < key); RUBY_ASSERT(right == LEAF || right->key > key); - redblack_node_t *redblack_nodes = GET_SHAPE_TREE()->shape_cache; - redblack_node_t *node = &redblack_nodes[(GET_SHAPE_TREE()->cache_size)++]; + redblack_node_t *redblack_nodes = rb_shape_tree.shape_cache; + redblack_node_t *node = &redblack_nodes[(rb_shape_tree.cache_size)++]; node->key = key; node->value = (rb_shape_t *)((uintptr_t)value | color); node->l = redblack_id_for(left); @@ -288,20 +288,20 @@ redblack_insert(redblack_node_t *tree, ID key, rb_shape_t *value) } #endif -rb_shape_tree_t *rb_shape_tree_ptr = NULL; +rb_shape_tree_t rb_shape_tree = { 0 }; static VALUE shape_tree_obj = Qfalse; rb_shape_t * rb_shape_get_root_shape(void) { - return GET_SHAPE_TREE()->root_shape; + return rb_shape_tree.root_shape; } static void shape_tree_mark(void *data) { rb_shape_t *cursor = rb_shape_get_root_shape(); - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1); while (cursor < end) { if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) { rb_gc_mark_movable(cursor->edges); @@ -314,7 +314,7 @@ static void shape_tree_compact(void *data) { rb_shape_t *cursor = rb_shape_get_root_shape(); - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id - 1); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id - 1); while (cursor < end) { if (cursor->edges && !SINGLE_CHILD_P(cursor->edges)) { cursor->edges = rb_gc_location(cursor->edges); @@ -326,7 +326,7 @@ shape_tree_compact(void *data) static size_t shape_tree_memsize(const void *data) { - return GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t); + return rb_shape_tree.cache_size * sizeof(redblack_node_t); } static const rb_data_type_t shape_tree_type = { @@ -349,14 +349,14 @@ static inline shape_id_t raw_shape_id(rb_shape_t *shape) { RUBY_ASSERT(shape); - return (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list); + return (shape_id_t)(shape - rb_shape_tree.shape_list); } static inline shape_id_t shape_id(rb_shape_t *shape, shape_id_t previous_shape_id) { RUBY_ASSERT(shape); - shape_id_t raw_id = (shape_id_t)(shape - GET_SHAPE_TREE()->shape_list); + shape_id_t raw_id = (shape_id_t)(shape - rb_shape_tree.shape_list); return raw_id | (previous_shape_id & SHAPE_ID_FLAGS_MASK); } @@ -373,22 +373,13 @@ rb_shape_each_shape_id(each_shape_callback callback, void *data) { rb_shape_t *start = rb_shape_get_root_shape(); rb_shape_t *cursor = start; - rb_shape_t *end = RSHAPE(GET_SHAPE_TREE()->next_shape_id); + rb_shape_t *end = RSHAPE(rb_shape_tree.next_shape_id); while (cursor < end) { callback((shape_id_t)(cursor - start), data); cursor += 1; } } -RUBY_FUNC_EXPORTED rb_shape_t * -rb_shape_lookup(shape_id_t shape_id) -{ - uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK); - RUBY_ASSERT(offset != INVALID_SHAPE_ID); - - return &GET_SHAPE_TREE()->shape_list[offset]; -} - RUBY_FUNC_EXPORTED shape_id_t rb_obj_shape_id(VALUE obj) { @@ -396,6 +387,13 @@ rb_obj_shape_id(VALUE obj) return SPECIAL_CONST_SHAPE_ID; } + if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + return RBASIC_SHAPE_ID(fields_obj); + } + return ROOT_SHAPE_ID; + } return RBASIC_SHAPE_ID(obj); } @@ -416,14 +414,14 @@ rb_shape_depth(shape_id_t shape_id) static rb_shape_t * shape_alloc(void) { - shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(GET_SHAPE_TREE()->next_shape_id, 1); + shape_id_t shape_id = (shape_id_t)RUBY_ATOMIC_FETCH_ADD(rb_shape_tree.next_shape_id, 1); if (shape_id == (MAX_SHAPE_ID + 1)) { // TODO: Make an OutOfShapesError ?? rb_bug("Out of shapes"); } - return &GET_SHAPE_TREE()->shape_list[shape_id]; + return &rb_shape_tree.shape_list[shape_id]; } static rb_shape_t * @@ -487,7 +485,7 @@ redblack_cache_ancestors(rb_shape_t *shape) static attr_index_t shape_grow_capa(attr_index_t current_capa) { - const attr_index_t *capacities = GET_SHAPE_TREE()->capacities; + const attr_index_t *capacities = rb_shape_tree.capacities; // First try to use the next size that will be embeddable in a larger object slot. attr_index_t capa; @@ -566,7 +564,7 @@ retry: if (!res) { // If we're not allowed to create a new variation, of if we're out of shapes // we return TOO_COMPLEX_SHAPE. - if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) { + if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) { res = NULL; } else { @@ -642,7 +640,7 @@ get_next_shape_internal(rb_shape_t *shape, ID id, enum shape_type shape_type, bo if (!res) { // If we're not allowed to create a new variation, of if we're out of shapes // we return TOO_COMPLEX_SHAPE. - if (!new_variations_allowed || GET_SHAPE_TREE()->next_shape_id > MAX_SHAPE_ID) { + if (!new_variations_allowed || rb_shape_tree.next_shape_id > MAX_SHAPE_ID) { res = NULL; } else { @@ -881,14 +879,11 @@ shape_get_next(rb_shape_t *shape, VALUE obj, ID id, bool emit_warnings) #endif VALUE klass; - switch (BUILTIN_TYPE(obj)) { - case T_CLASS: - case T_MODULE: - klass = rb_singleton_class(obj); - break; - default: + if (IMEMO_TYPE_P(obj, imemo_class_fields)) { // HACK + klass = CLASS_OF(obj); + } + else { klass = rb_obj_class(obj); - break; } bool allow_new_shape = RCLASS_VARIATION_COUNT(klass) < SHAPE_MAX_VARIATIONS; @@ -1184,6 +1179,31 @@ rb_shape_memsize(shape_id_t shape_id) return memsize; } +bool +rb_shape_foreach_field(shape_id_t initial_shape_id, rb_shape_foreach_transition_callback func, void *data) +{ + RUBY_ASSERT(!rb_shape_too_complex_p(initial_shape_id)); + + rb_shape_t *shape = RSHAPE(initial_shape_id); + if (shape->type == SHAPE_ROOT) { + return true; + } + + shape_id_t parent_id = shape_id(RSHAPE(shape->parent_id), initial_shape_id); + if (rb_shape_foreach_field(parent_id, func, data)) { + switch (func(shape_id(shape, initial_shape_id), data)) { + case ST_STOP: + return false; + case ST_CHECK: + case ST_CONTINUE: + break; + default: + rb_bug("unreachable"); + } + } + return true; +} + #if RUBY_DEBUG bool rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id) @@ -1218,11 +1238,11 @@ rb_shape_verify_consistency(VALUE obj, shape_id_t shape_id) uint8_t flags_heap_index = rb_shape_heap_index(shape_id); if (RB_TYPE_P(obj, T_OBJECT)) { - size_t shape_id_slot_size = GET_SHAPE_TREE()->capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic); + size_t shape_id_slot_size = rb_shape_tree.capacities[flags_heap_index - 1] * sizeof(VALUE) + sizeof(struct RBasic); size_t actual_slot_size = rb_gc_obj_slot_size(obj); if (shape_id_slot_size != actual_slot_size) { - rb_bug("shape_id heap_index flags mismatch: shape_id_slot_size=%lu, gc_slot_size=%lu\n", shape_id_slot_size, actual_slot_size); + rb_bug("shape_id heap_index flags mismatch: shape_id_slot_size=%zu, gc_slot_size=%zu\n", shape_id_slot_size, actual_slot_size); } } else { @@ -1368,7 +1388,7 @@ rb_shape_root_shape(VALUE self) static VALUE rb_shape_shapes_available(VALUE self) { - return INT2NUM(MAX_SHAPE_ID - (GET_SHAPE_TREE()->next_shape_id - 1)); + return INT2NUM(MAX_SHAPE_ID - (rb_shape_tree.next_shape_id - 1)); } static VALUE @@ -1376,7 +1396,7 @@ rb_shape_exhaust(int argc, VALUE *argv, VALUE self) { rb_check_arity(argc, 0, 1); int offset = argc == 1 ? NUM2INT(argv[0]) : 0; - GET_SHAPE_TREE()->next_shape_id = MAX_SHAPE_ID - offset + 1; + rb_shape_tree.next_shape_id = MAX_SHAPE_ID - offset + 1; return Qnil; } @@ -1391,12 +1411,14 @@ static enum rb_id_table_iterator_result collect_keys_and_values(ID key, VALUE va static VALUE edges(VALUE edges) { VALUE hash = rb_hash_new(); - if (SINGLE_CHILD_P(edges)) { - rb_shape_t *child = SINGLE_CHILD(edges); - collect_keys_and_values(child->edge_name, (VALUE)child, &hash); - } - else { - rb_managed_id_table_foreach(edges, collect_keys_and_values, &hash); + if (edges) { + if (SINGLE_CHILD_P(edges)) { + rb_shape_t *child = SINGLE_CHILD(edges); + collect_keys_and_values(child->edge_name, (VALUE)child, &hash); + } + else { + rb_managed_id_table_foreach(edges, collect_keys_and_values, &hash); + } } return hash; } @@ -1430,7 +1452,7 @@ static VALUE rb_shape_find_by_id(VALUE mod, VALUE id) { shape_id_t shape_id = NUM2UINT(id); - if (shape_id >= GET_SHAPE_TREE()->next_shape_id) { + if (shape_id >= rb_shape_tree.next_shape_id) { rb_raise(rb_eArgError, "Shape ID %d is out of bounds\n", shape_id); } return shape_id_t_to_rb_cShape(shape_id); @@ -1444,8 +1466,6 @@ rb_shape_find_by_id(VALUE mod, VALUE id) void Init_default_shapes(void) { - rb_shape_tree_ptr = xcalloc(1, sizeof(rb_shape_tree_t)); - size_t *heap_sizes = rb_gc_heap_sizes(); size_t heaps_count = 0; while (heap_sizes[heaps_count]) { @@ -1457,23 +1477,23 @@ Init_default_shapes(void) for (index = 0; index < heaps_count; index++) { capacities[index] = (heap_sizes[index] - sizeof(struct RBasic)) / sizeof(VALUE); } - GET_SHAPE_TREE()->capacities = capacities; + rb_shape_tree.capacities = capacities; #ifdef HAVE_MMAP size_t shape_list_mmap_size = rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError); - rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size, + rb_shape_tree.shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (GET_SHAPE_TREE()->shape_list == MAP_FAILED) { - GET_SHAPE_TREE()->shape_list = 0; + if (rb_shape_tree.shape_list == MAP_FAILED) { + rb_shape_tree.shape_list = 0; } else { - ruby_annotate_mmap(rb_shape_tree_ptr->shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list"); + ruby_annotate_mmap(rb_shape_tree.shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list"); } #else - GET_SHAPE_TREE()->shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t)); + rb_shape_tree.shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t)); #endif - if (!GET_SHAPE_TREE()->shape_list) { + if (!rb_shape_tree.shape_list) { rb_memerror(); } @@ -1483,19 +1503,19 @@ Init_default_shapes(void) #ifdef HAVE_MMAP size_t shape_cache_mmap_size = rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError); - rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size, + rb_shape_tree.shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - rb_shape_tree_ptr->cache_size = 0; + rb_shape_tree.cache_size = 0; // If mmap fails, then give up on the redblack tree cache. // We set the cache size such that the redblack node allocators think // the cache is full. - if (GET_SHAPE_TREE()->shape_cache == MAP_FAILED) { - GET_SHAPE_TREE()->shape_cache = 0; - GET_SHAPE_TREE()->cache_size = REDBLACK_CACHE_SIZE; + if (rb_shape_tree.shape_cache == MAP_FAILED) { + rb_shape_tree.shape_cache = 0; + rb_shape_tree.cache_size = REDBLACK_CACHE_SIZE; } else { - ruby_annotate_mmap(rb_shape_tree_ptr->shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache"); + ruby_annotate_mmap(rb_shape_tree.shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache"); } #endif @@ -1506,8 +1526,8 @@ Init_default_shapes(void) rb_shape_t *root = rb_shape_alloc_with_parent_id(0, INVALID_SHAPE_ID); root->capacity = 0; root->type = SHAPE_ROOT; - GET_SHAPE_TREE()->root_shape = root; - RUBY_ASSERT(raw_shape_id(GET_SHAPE_TREE()->root_shape) == ROOT_SHAPE_ID); + rb_shape_tree.root_shape = root; + RUBY_ASSERT(raw_shape_id(rb_shape_tree.root_shape) == ROOT_SHAPE_ID); rb_shape_t *root_with_obj_id = rb_shape_alloc_with_parent_id(0, ROOT_SHAPE_ID); root_with_obj_id->type = SHAPE_OBJ_ID; @@ -1519,8 +1539,7 @@ Init_default_shapes(void) void rb_shape_free_all(void) { - xfree((void *)GET_SHAPE_TREE()->capacities); - xfree(GET_SHAPE_TREE()); + xfree((void *)rb_shape_tree.capacities); } void @@ -19,7 +19,7 @@ STATIC_ASSERT(shape_id_num_bits, SHAPE_ID_NUM_BITS == sizeof(shape_id_t) * CHAR_ #define SHAPE_ID_FL_NON_CANONICAL_MASK (SHAPE_FL_NON_CANONICAL_MASK << SHAPE_ID_OFFSET_NUM_BITS) #define SHAPE_ID_HEAP_INDEX_BITS 3 -#define SHAPE_ID_HEAP_INDEX_OFFSET (SHAPE_ID_NUM_BITS - SHAPE_ID_HEAP_INDEX_BITS - 1) // FIXME: -1 to avoid crashing YJIT +#define SHAPE_ID_HEAP_INDEX_OFFSET (SHAPE_ID_NUM_BITS - SHAPE_ID_HEAP_INDEX_BITS) #define SHAPE_ID_HEAP_INDEX_MAX ((1 << SHAPE_ID_HEAP_INDEX_BITS) - 1) #define SHAPE_ID_HEAP_INDEX_MASK (SHAPE_ID_HEAP_INDEX_MAX << SHAPE_ID_HEAP_INDEX_OFFSET) @@ -92,7 +92,10 @@ typedef struct { redblack_node_t *shape_cache; unsigned int cache_size; } rb_shape_tree_t; -RUBY_EXTERN rb_shape_tree_t *rb_shape_tree_ptr; + +RUBY_SYMBOL_EXPORT_BEGIN +RUBY_EXTERN rb_shape_tree_t rb_shape_tree; +RUBY_SYMBOL_EXPORT_END union rb_attr_index_cache { uint64_t pack; @@ -102,18 +105,11 @@ union rb_attr_index_cache { } unpack; }; -static inline rb_shape_tree_t * -rb_current_shape_tree(void) -{ - return rb_shape_tree_ptr; -} -#define GET_SHAPE_TREE() rb_current_shape_tree() - static inline shape_id_t RBASIC_SHAPE_ID(VALUE obj) { RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj)); - RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO)); + RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_class_fields)); #if RBASIC_SHAPE_ID_FIELD return (shape_id_t)((RBASIC(obj)->shape_id)); #else @@ -137,8 +133,9 @@ static inline void RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id) { RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj)); - RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO)); + RUBY_ASSERT(!RB_TYPE_P(obj, T_IMEMO) || IMEMO_TYPE_P(obj, imemo_class_fields)); RUBY_ASSERT(rb_shape_verify_consistency(obj, shape_id)); + #if RBASIC_SHAPE_ID_FIELD RBASIC(obj)->shape_id = (VALUE)shape_id; #else @@ -148,16 +145,25 @@ RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id) #endif } -#define RSHAPE rb_shape_lookup +static inline rb_shape_t * +RSHAPE(shape_id_t shape_id) +{ + uint32_t offset = (shape_id & SHAPE_ID_OFFSET_MASK); + RUBY_ASSERT(offset != INVALID_SHAPE_ID); + + return &rb_shape_tree.shape_list[offset]; +} int32_t rb_shape_id_offset(void); -RUBY_FUNC_EXPORTED rb_shape_t *rb_shape_lookup(shape_id_t shape_id); RUBY_FUNC_EXPORTED shape_id_t rb_obj_shape_id(VALUE obj); shape_id_t rb_shape_get_next_iv_shape(shape_id_t shape_id, ID id); bool rb_shape_get_iv_index(shape_id_t shape_id, ID id, attr_index_t *value); bool rb_shape_get_iv_index_with_hint(shape_id_t shape_id, ID id, attr_index_t *value, shape_id_t *shape_id_hint); +typedef int rb_shape_foreach_transition_callback(shape_id_t shape_id, void *data); +bool rb_shape_foreach_field(shape_id_t shape_id, rb_shape_foreach_transition_callback func, void *data); + shape_id_t rb_shape_transition_frozen(VALUE obj); shape_id_t rb_shape_transition_complex(VALUE obj); shape_id_t rb_shape_transition_remove_ivar(VALUE obj, ID id, shape_id_t *removed_shape_id); @@ -211,10 +217,22 @@ rb_shape_root(size_t heap_id) return ROOT_SHAPE_ID | ((heap_index + 1) << SHAPE_ID_HEAP_INDEX_OFFSET); } +static inline shape_id_t +RSHAPE_PARENT(shape_id_t shape_id) +{ + return RSHAPE(shape_id)->parent_id; +} + +static inline enum shape_type +RSHAPE_TYPE(shape_id_t shape_id) +{ + return RSHAPE(shape_id)->type; +} + static inline bool RSHAPE_TYPE_P(shape_id_t shape_id, enum shape_type type) { - return RSHAPE(shape_id)->type == type; + return RSHAPE_TYPE(shape_id) == type; } static inline attr_index_t @@ -222,7 +240,7 @@ RSHAPE_EMBEDDED_CAPACITY(shape_id_t shape_id) { uint8_t heap_index = rb_shape_heap_index(shape_id); if (heap_index) { - return GET_SHAPE_TREE()->capacities[heap_index - 1]; + return rb_shape_tree.capacities[heap_index - 1]; } return 0; } diff --git a/spec/bin/rspec b/spec/bin/rspec new file mode 100755 index 0000000000..1f61e3c64c --- /dev/null +++ b/spec/bin/rspec @@ -0,0 +1,6 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require_relative "../bundler/support/rubygems_ext" + +Spec::Rubygems.gem_load("rspec-core", "rspec") diff --git a/spec/bundler/bundler/friendly_errors_spec.rb b/spec/bundler/bundler/friendly_errors_spec.rb index e0310344fd..d6a9d4813d 100644 --- a/spec/bundler/bundler/friendly_errors_spec.rb +++ b/spec/bundler/bundler/friendly_errors_spec.rb @@ -2,7 +2,8 @@ require "bundler" require "bundler/friendly_errors" -require "cgi" +require "cgi/escape" +require "cgi/util" unless defined?(CGI::EscapeExt) RSpec.describe Bundler, "friendly errors" do context "with invalid YAML in .gemrc" do diff --git a/spec/bundler/commands/install_spec.rb b/spec/bundler/commands/install_spec.rb index 41aa903f27..98883b1e72 100644 --- a/spec/bundler/commands/install_spec.rb +++ b/spec/bundler/commands/install_spec.rb @@ -1500,6 +1500,55 @@ RSpec.describe "bundle install with gem sources" do end end + context "when lockfile has incorrect dependencies" do + before do + build_repo2 + + gemfile <<-G + source "https://gem.repo2" + gem "myrack_middleware" + G + + system_gems "myrack_middleware-1.0", path: default_bundle_path + + # we want to raise when the 1.0 line should be followed by " myrack (= 0.9.1)" but isn't + lockfile <<-L + GEM + remote: https://gem.repo2/ + specs: + myrack_middleware (1.0) + + PLATFORMS + #{lockfile_platforms} + + DEPENDENCIES + myrack_middleware + + BUNDLED WITH + #{Bundler::VERSION} + L + end + + it "raises a clear error message when frozen" do + bundle "config set frozen true" + bundle "install", raise_on_error: false + + expect(exitstatus).to eq(41) + expect(err).to eq("Bundler found incorrect dependencies in the lockfile for myrack_middleware-1.0") + end + + it "updates the lockfile when not frozen" do + missing_dep = "myrack (0.9.1)" + expect(lockfile).not_to include(missing_dep) + + bundle "config set frozen false" + bundle :install + + expect(lockfile).to include(missing_dep) + expect(out).to include("now installed") + end + end + context "with --local flag" do before do system_gems "myrack-1.0.0", path: default_bundle_path diff --git a/spec/ruby/core/file/birthtime_spec.rb b/spec/ruby/core/file/birthtime_spec.rb index ff43aa7cef..f82eaf7cca 100644 --- a/spec/ruby/core/file/birthtime_spec.rb +++ b/spec/ruby/core/file/birthtime_spec.rb @@ -1,6 +1,11 @@ require_relative '../../spec_helper' platform_is :windows, :darwin, :freebsd, :netbsd, :linux do + not_implemented_messages = [ + "birthtime() function is unimplemented", # unsupported OS/version + "birthtime is unimplemented", # unsupported filesystem + ] + describe "File.birthtime" do before :each do @file = __FILE__ @@ -14,20 +19,20 @@ platform_is :windows, :darwin, :freebsd, :netbsd, :linux do File.birthtime(@file) File.birthtime(@file).should be_kind_of(Time) rescue NotImplementedError => e - skip e.message if e.message.start_with?("birthtime() function") + e.message.should.start_with?(*not_implemented_messages) end it "accepts an object that has a #to_path method" do File.birthtime(@file) # Avoid to failure of mock object with old Kernel and glibc File.birthtime(mock_to_path(@file)) rescue NotImplementedError => e - e.message.should.start_with?("birthtime() function") + e.message.should.start_with?(*not_implemented_messages) end it "raises an Errno::ENOENT exception if the file is not found" do -> { File.birthtime('bogus') }.should raise_error(Errno::ENOENT) rescue NotImplementedError => e - e.message.should.start_with?("birthtime() function") + e.message.should.start_with?(*not_implemented_messages) end end @@ -45,7 +50,7 @@ platform_is :windows, :darwin, :freebsd, :netbsd, :linux do @file.birthtime @file.birthtime.should be_kind_of(Time) rescue NotImplementedError => e - e.message.should.start_with?("birthtime() function") + e.message.should.start_with?(*not_implemented_messages) end end end diff --git a/spec/ruby/core/file/stat/birthtime_spec.rb b/spec/ruby/core/file/stat/birthtime_spec.rb index 5350a571aa..adecee15b0 100644 --- a/spec/ruby/core/file/stat/birthtime_spec.rb +++ b/spec/ruby/core/file/stat/birthtime_spec.rb @@ -3,6 +3,11 @@ require_relative '../../../spec_helper' platform_is(:windows, :darwin, :freebsd, :netbsd, *ruby_version_is("3.5") { :linux }, ) do + not_implemented_messages = [ + "birthtime() function is unimplemented", # unsupported OS/version + "birthtime is unimplemented", # unsupported filesystem + ] + describe "File::Stat#birthtime" do before :each do @file = tmp('i_exist') @@ -18,7 +23,7 @@ platform_is(:windows, :darwin, :freebsd, :netbsd, st.birthtime.should be_kind_of(Time) st.birthtime.should <= Time.now rescue NotImplementedError => e - e.message.should.start_with?("birthtime() function") + e.message.should.start_with?(*not_implemented_messages) end end end diff --git a/spec/ruby/core/kernel/inspect_spec.rb b/spec/ruby/core/kernel/inspect_spec.rb index 1f9ce834ab..e60f7576c5 100644 --- a/spec/ruby/core/kernel/inspect_spec.rb +++ b/spec/ruby/core/kernel/inspect_spec.rb @@ -28,4 +28,34 @@ describe "Kernel#inspect" do end obj.inspect.should be_kind_of(String) end + + ruby_version_is "3.5" do + it "calls #instance_variables_to_inspect private method to know which variables to display" do + obj = Object.new + obj.instance_eval do + @host = "localhost" + @user = "root" + @password = "hunter2" + end + obj.singleton_class.class_eval do + private def instance_variables_to_inspect = %i[@host @user @does_not_exist] + end + + inspected = obj.inspect.sub(/^#<Object:0x[0-9a-f]+/, '#<Object:0x00') + inspected.should == '#<Object:0x00 @host="localhost", @user="root">' + + obj = Object.new + obj.instance_eval do + @host = "localhost" + @user = "root" + @password = "hunter2" + end + obj.singleton_class.class_eval do + private def instance_variables_to_inspect = [] + end + + inspected = obj.inspect.sub(/^#<Object:0x[0-9a-f]+/, '#<Object:0x00') + inspected.should == "#<Object:0x00>" + end + end end diff --git a/spec/ruby/library/net-http/http/post_spec.rb b/spec/ruby/library/net-http/http/post_spec.rb index ac020bd6be..cebbee4ff3 100644 --- a/spec/ruby/library/net-http/http/post_spec.rb +++ b/spec/ruby/library/net-http/http/post_spec.rb @@ -25,9 +25,11 @@ describe "Net::HTTP.post" do response.should be_kind_of(Net::HTTPResponse) end - it "sends Content-Type: application/x-www-form-urlencoded by default" do - response = Net::HTTP.post(URI("http://localhost:#{NetHTTPSpecs.port}/request/header"), "test=test") - response.body.should include({ "Content-Type" => "application/x-www-form-urlencoded" }.inspect.delete("{}")) + ruby_version_is ""..."3.5" do + it "sends Content-Type: application/x-www-form-urlencoded by default" do + response = Net::HTTP.post(URI("http://localhost:#{NetHTTPSpecs.port}/request/header"), "test=test") + response.body.should include({ "Content-Type" => "application/x-www-form-urlencoded" }.inspect.delete("{}")) + end end it "does not support HTTP Basic Auth" do diff --git a/spec/ruby/library/net-http/httpgenericrequest/exec_spec.rb b/spec/ruby/library/net-http/httpgenericrequest/exec_spec.rb index 7de03d7da0..0912e5a71f 100644 --- a/spec/ruby/library/net-http/httpgenericrequest/exec_spec.rb +++ b/spec/ruby/library/net-http/httpgenericrequest/exec_spec.rb @@ -31,18 +31,20 @@ describe "Net::HTTPGenericRequest#exec when passed socket, version, path" do end describe "when a request body is set" do - it "sets the 'Content-Type' header to 'application/x-www-form-urlencoded' unless the 'Content-Type' header is supplied" do - request = Net::HTTPGenericRequest.new("POST", true, true, "/some/path") - request.body = "Some Content" - - request.exec(@buffered_socket, "1.1", "/some/other/path") - str = @socket.string - - str.should =~ %r[POST /some/other/path HTTP/1.1\r\n] - str.should =~ %r[Accept: \*/\*\r\n] - str.should =~ %r[Content-Type: application/x-www-form-urlencoded\r\n] - str.should =~ %r[Content-Length: 12\r\n] - str[-16..-1].should == "\r\n\r\nSome Content" + ruby_version_is ""..."3.5" do + it "sets the 'Content-Type' header to 'application/x-www-form-urlencoded' unless the 'Content-Type' header is supplied" do + request = Net::HTTPGenericRequest.new("POST", true, true, "/some/path") + request.body = "Some Content" + + request.exec(@buffered_socket, "1.1", "/some/other/path") + str = @socket.string + + str.should =~ %r[POST /some/other/path HTTP/1.1\r\n] + str.should =~ %r[Accept: \*/\*\r\n] + str.should =~ %r[Content-Type: application/x-www-form-urlencoded\r\n] + str.should =~ %r[Content-Length: 12\r\n] + str[-16..-1].should == "\r\n\r\nSome Content" + end end it "correctly sets the 'Content-Length' header and includes the body" do @@ -62,19 +64,21 @@ describe "Net::HTTPGenericRequest#exec when passed socket, version, path" do end describe "when a body stream is set" do - it "sets the 'Content-Type' header to 'application/x-www-form-urlencoded' unless the 'Content-Type' header is supplied" do - request = Net::HTTPGenericRequest.new("POST", true, true, "/some/path", - "Content-Length" => "10") - request.body_stream = StringIO.new("a" * 20) - - request.exec(@buffered_socket, "1.1", "/some/other/path") - str = @socket.string - - str.should =~ %r[POST /some/other/path HTTP/1.1\r\n] - str.should =~ %r[Accept: \*/\*\r\n] - str.should =~ %r[Content-Type: application/x-www-form-urlencoded\r\n] - str.should =~ %r[Content-Length: 10\r\n] - str[-24..-1].should == "\r\n\r\naaaaaaaaaaaaaaaaaaaa" + ruby_version_is ""..."3.5" do + it "sets the 'Content-Type' header to 'application/x-www-form-urlencoded' unless the 'Content-Type' header is supplied" do + request = Net::HTTPGenericRequest.new("POST", true, true, "/some/path", + "Content-Length" => "10") + request.body_stream = StringIO.new("a" * 20) + + request.exec(@buffered_socket, "1.1", "/some/other/path") + str = @socket.string + + str.should =~ %r[POST /some/other/path HTTP/1.1\r\n] + str.should =~ %r[Accept: \*/\*\r\n] + str.should =~ %r[Content-Type: application/x-www-form-urlencoded\r\n] + str.should =~ %r[Content-Length: 10\r\n] + str[-24..-1].should == "\r\n\r\naaaaaaaaaaaaaaaaaaaa" + end end it "sends the whole stream, regardless of the 'Content-Length' header" do diff --git a/test/-ext-/bug_reporter/test_bug_reporter.rb b/test/-ext-/bug_reporter/test_bug_reporter.rb index 8293408518..d402ab1382 100644 --- a/test/-ext-/bug_reporter/test_bug_reporter.rb +++ b/test/-ext-/bug_reporter/test_bug_reporter.rb @@ -6,8 +6,6 @@ require_relative '../../lib/parser_support' class TestBugReporter < Test::Unit::TestCase def test_bug_reporter_add - pend "macOS 15 is not working with this test" if macos?(15) - description = RUBY_DESCRIPTION description = description.sub(/\+PRISM /, '') unless ParserSupport.prism_enabled_in_subprocess? expected_stderr = [ diff --git a/test/net/http/test_http.rb b/test/net/http/test_http.rb index c9a27d87cb..366b4cd12c 100644 --- a/test/net/http/test_http.rb +++ b/test/net/http/test_http.rb @@ -494,12 +494,10 @@ module TestNetHTTP_version_1_1_methods def test_s_post url = "http://#{config('host')}:#{config('port')}/?q=a" - res = assert_warning(/Content-Type did not set/) do - Net::HTTP.post( - URI.parse(url), - "a=x") - end - assert_equal "application/x-www-form-urlencoded", res["Content-Type"] + res = Net::HTTP.post( + URI.parse(url), + "a=x") + assert_equal "application/octet-stream", res["Content-Type"] assert_equal "a=x", res.body assert_equal url, res["X-request-uri"] @@ -570,9 +568,7 @@ module TestNetHTTP_version_1_1_methods th = Thread.new do err = !windows? ? Net::WriteTimeout : Net::ReadTimeout assert_raise(err) do - assert_warning(/Content-Type did not set/) do - conn.post('/', "a"*50_000_000) - end + conn.post('/', "a"*50_000_000) end end assert th.join(EnvUtil.apply_timeout_scale(10)) diff --git a/test/net/http/utils.rb b/test/net/http/utils.rb index b41341d0a0..067cca02e3 100644 --- a/test/net/http/utils.rb +++ b/test/net/http/utils.rb @@ -71,6 +71,11 @@ module TestNetHTTPUtils socket.write "HTTP/1.1 100 Continue\r\n\r\n" end + # Set default Content-Type if not provided + if !headers['Content-Type'] && (method == 'POST' || method == 'PUT' || method == 'PATCH') + headers['Content-Type'] = 'application/octet-stream' + end + req = Request.new(method, path, headers, socket) if @procs.key?(req.path) || @procs.key?("#{req.path}/") proc = @procs[req.path] || @procs["#{req.path}/"] @@ -306,16 +311,18 @@ module TestNetHTTPUtils scheme = headers['X-Request-Scheme'] || 'http' host = @config['host'] port = socket.addr[1] - charset = parse_content_type(headers['Content-Type'])[1] + content_type = headers['Content-Type'] || 'application/octet-stream' + charset = parse_content_type(content_type)[1] path = "#{scheme}://#{host}:#{port}#{path}" path = path.encode(charset) if charset - response = "HTTP/1.1 200 OK\r\nContent-Type: #{headers['Content-Type']}\r\nContent-Length: #{body.bytesize}\r\nX-request-uri: #{path}\r\n\r\n#{body}" + response = "HTTP/1.1 200 OK\r\nContent-Type: #{content_type}\r\nContent-Length: #{body.bytesize}\r\nX-request-uri: #{path}\r\n\r\n#{body}" socket.print(response) end def handle_patch(path, headers, socket) body = socket.read(headers['Content-Length'].to_i) - response = "HTTP/1.1 200 OK\r\nContent-Type: #{headers['Content-Type']}\r\nContent-Length: #{body.bytesize}\r\n\r\n#{body}" + content_type = headers['Content-Type'] || 'application/octet-stream' + response = "HTTP/1.1 200 OK\r\nContent-Type: #{content_type}\r\nContent-Length: #{body.bytesize}\r\n\r\n#{body}" socket.print(response) end diff --git a/test/prism/fixtures/strings.txt b/test/prism/fixtures/strings.txt index 0787152786..77e1e4acff 100644 --- a/test/prism/fixtures/strings.txt +++ b/test/prism/fixtures/strings.txt @@ -146,6 +146,10 @@ baz %Q{abc} +%Q(\«) + +%q(\«) + %^#$^# %@#@# diff --git a/test/ruby/namespace/instance_variables.rb b/test/ruby/namespace/instance_variables.rb new file mode 100644 index 0000000000..1562ad5d45 --- /dev/null +++ b/test/ruby/namespace/instance_variables.rb @@ -0,0 +1,21 @@ +class String + class << self + attr_reader :str_ivar1 + + def str_ivar2 + @str_ivar2 + end + end + + @str_ivar1 = 111 + @str_ivar2 = 222 +end + +class StringDelegator < BasicObject +private + def method_missing(...) + ::String.public_send(...) + end +end + +StringDelegatorObj = StringDelegator.new diff --git a/test/ruby/test_compile_prism.rb b/test/ruby/test_compile_prism.rb index 819d0d35aa..86f7f0b14f 100644 --- a/test/ruby/test_compile_prism.rb +++ b/test/ruby/test_compile_prism.rb @@ -1053,6 +1053,9 @@ module Prism assert_prism_eval("for foo, in [1,2,3] do end") assert_prism_eval("for i, j in {a: 'b'} do; i; j; end") + + # Test splat node as index in for loop + assert_prism_eval("for *x in [[1,2], [3,4]] do; x; end") end ############################################################################ diff --git a/test/ruby/test_namespace.rb b/test/ruby/test_namespace.rb index 395f244c8e..f13063be48 100644 --- a/test/ruby/test_namespace.rb +++ b/test/ruby/test_namespace.rb @@ -222,6 +222,26 @@ class TestNamespace < Test::Unit::TestCase end; end + def test_instance_variable + pend unless Namespace.enabled? + + @n.require_relative('namespace/instance_variables') + + assert_equal [], String.instance_variables + assert_equal [:@str_ivar1, :@str_ivar2], @n::StringDelegatorObj.instance_variables + assert_equal 111, @n::StringDelegatorObj.str_ivar1 + assert_equal 222, @n::StringDelegatorObj.str_ivar2 + assert_equal 222, @n::StringDelegatorObj.instance_variable_get(:@str_ivar2) + + @n::StringDelegatorObj.instance_variable_set(:@str_ivar3, 333) + assert_equal 333, @n::StringDelegatorObj.instance_variable_get(:@str_ivar3) + @n::StringDelegatorObj.remove_instance_variable(:@str_ivar1) + assert_nil @n::StringDelegatorObj.str_ivar1 + assert_equal [:@str_ivar2, :@str_ivar3], @n::StringDelegatorObj.instance_variables + + assert_equal [], String.instance_variables + end + def test_methods_added_in_namespace_are_invisible_globally pend unless Namespace.enabled? diff --git a/test/ruby/test_object.rb b/test/ruby/test_object.rb index 7d00422629..9074e54df5 100644 --- a/test/ruby/test_object.rb +++ b/test/ruby/test_object.rb @@ -950,6 +950,19 @@ class TestObject < Test::Unit::TestCase assert_match(/\bInspect\u{3042}:.* @\u{3044}=42\b/, x.inspect) x.instance_variable_set("@\u{3046}".encode(Encoding::EUC_JP), 6) assert_match(/@\u{3046}=6\b/, x.inspect) + + x = Object.new + x.singleton_class.class_eval do + private def instance_variables_to_inspect = [:@host, :@user] + end + + x.instance_variable_set(:@host, "localhost") + x.instance_variable_set(:@user, "root") + x.instance_variable_set(:@password, "hunter2") + s = x.inspect + assert_include(s, "@host=\"localhost\"") + assert_include(s, "@user=\"root\"") + assert_not_include(s, "@password=") end def test_singleton_methods diff --git a/test/ruby/test_ractor.rb b/test/ruby/test_ractor.rb index b423993df1..e463b504d1 100644 --- a/test/ruby/test_ractor.rb +++ b/test/ruby/test_ractor.rb @@ -79,6 +79,26 @@ class TestRactor < Test::Unit::TestCase end; end + def test_class_instance_variables + assert_ractor(<<~'RUBY') + # Once we're in multi-ractor mode, the codepaths + # for class instance variables are a bit different. + Ractor.new {}.value + + class TestClass + @a = 1 + @b = 2 + @c = 3 + @d = 4 + end + + assert_equal 4, TestClass.remove_instance_variable(:@d) + assert_nil TestClass.instance_variable_get(:@d) + assert_equal 4, TestClass.instance_variable_set(:@d, 4) + assert_equal 4, TestClass.instance_variable_get(:@d) + RUBY + end + def test_require_raises_and_no_ractor_belonging_issue assert_ractor(<<~'RUBY') require "tempfile" @@ -98,6 +118,21 @@ class TestRactor < Test::Unit::TestCase RUBY end + def test_require_non_string + assert_ractor(<<~'RUBY') + require "tempfile" + require "pathname" + f = Tempfile.new(["file_to_require_from_ractor", ".rb"]) + f.write("puts 'success'") + f.flush + result = Ractor.new(f.path) do |path| + require Pathname.new(path) + "success" + end.value + assert_equal "success", result + RUBY + end + def assert_make_shareable(obj) refute Ractor.shareable?(obj), "object was already shareable" Ractor.make_shareable(obj) diff --git a/test/ruby/test_rubyoptions.rb b/test/ruby/test_rubyoptions.rb index 833b6a3b7d..3f79c2afd7 100644 --- a/test/ruby/test_rubyoptions.rb +++ b/test/ruby/test_rubyoptions.rb @@ -836,8 +836,6 @@ class TestRubyOptions < Test::Unit::TestCase end def assert_segv(args, message=nil, list: SEGVTest::ExpectedStderrList, **opt, &block) - pend "macOS 15 is not working with this assertion" if macos?(15) - # We want YJIT to be enabled in the subprocess if it's enabled for us # so that the Ruby description matches. env = Hash === args.first ? args.shift : {} @@ -881,8 +879,6 @@ class TestRubyOptions < Test::Unit::TestCase end def assert_crash_report(path, cmd = nil, &block) - pend "macOS 15 is not working with this assertion" if macos?(15) - Dir.mktmpdir("ruby_crash_report") do |dir| list = SEGVTest::ExpectedStderrList if cmd diff --git a/test/ruby/test_set.rb b/test/ruby/test_set.rb index 2bb7858eb2..3a8568762a 100644 --- a/test/ruby/test_set.rb +++ b/test/ruby/test_set.rb @@ -130,6 +130,12 @@ class TC_Set < Test::Unit::TestCase assert_equal(Set['a','b','c'], set) set = Set[1,2] + ret = set.replace(Set.new('a'..'c')) + + assert_same(set, ret) + assert_equal(Set['a','b','c'], set) + + set = Set[1,2] assert_raise(ArgumentError) { set.replace(3) } diff --git a/test/ruby/test_vm_dump.rb b/test/ruby/test_vm_dump.rb index 709fd5eadf..a3e7b69913 100644 --- a/test/ruby/test_vm_dump.rb +++ b/test/ruby/test_vm_dump.rb @@ -5,8 +5,6 @@ return unless /darwin/ =~ RUBY_PLATFORM class TestVMDump < Test::Unit::TestCase def assert_darwin_vm_dump_works(args, timeout=nil) - pend "macOS 15 is not working with this assertion" if macos?(15) - assert_in_out_err(args, "", [], /^\[IMPORTANT\]/, timeout: timeout || 300) end diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb index 3b64887f92..6095b0b734 100644 --- a/test/ruby/test_zjit.rb +++ b/test/ruby/test_zjit.rb @@ -102,12 +102,39 @@ class TestZJIT < Test::Unit::TestCase }, call_threshold: 2 end + def test_opt_plus_type_guard_exit_with_locals + assert_compiles '[6, 6.0]', %q{ + def test(a) + local = 3 + 1 + a + local + end + test(1) # profile opt_plus + [test(2), test(2.0)] + }, call_threshold: 2 + end + def test_opt_plus_type_guard_nested_exit - omit 'rewind_caller_frames is not implemented yet' - assert_compiles '[3, 3.0]', %q{ + assert_compiles '[4, 4.0]', %q{ def side_exit(n) = 1 + n def jit_frame(n) = 1 + side_exit(n) def entry(n) = jit_frame(n) + entry(2) # profile send + [entry(2), entry(2.0)] + }, call_threshold: 2 + end + + def test_opt_plus_type_guard_nested_exit_with_locals + assert_compiles '[9, 9.0]', %q{ + def side_exit(n) + local = 2 + 1 + n + local + end + def jit_frame(n) + local = 3 + 1 + side_exit(n) + local + end + def entry(n) = jit_frame(n) + entry(2) # profile send [entry(2), entry(2.0)] }, call_threshold: 2 end @@ -130,7 +157,6 @@ class TestZJIT < Test::Unit::TestCase end def test_opt_mult_overflow - omit 'side exits are not implemented yet' assert_compiles '[6, -6, 9671406556917033397649408, -9671406556917033397649408, 21267647932558653966460912964485513216]', %q{ def test(a, b) a * b @@ -205,6 +231,48 @@ class TestZJIT < Test::Unit::TestCase }, insns: [:opt_gt], call_threshold: 2 end + def test_opt_empty_p + assert_compiles('[false, false, true]', <<~RUBY, insns: [:opt_empty_p]) + def test(x) = x.empty? + return test([1]), test("1"), test({}) + RUBY + end + + def test_opt_succ + assert_compiles('[0, "B"]', <<~RUBY, insns: [:opt_succ]) + def test(obj) = obj.succ + return test(-1), test("A") + RUBY + end + + def test_opt_and + assert_compiles('[1, [3, 2, 1]]', <<~RUBY, insns: [:opt_and]) + def test(x, y) = x & y + return test(0b1101, 3), test([3, 2, 1, 4], [8, 1, 2, 3]) + RUBY + end + + def test_opt_or + assert_compiles('[11, [3, 2, 1]]', <<~RUBY, insns: [:opt_or]) + def test(x, y) = x | y + return test(0b1000, 3), test([3, 2, 1], [1, 2, 3]) + RUBY + end + + def test_opt_not + assert_compiles('[true, true, false]', <<~RUBY, insns: [:opt_not]) + def test(obj) = !obj + return test(nil), test(false), test(0) + RUBY + end + + def test_opt_regexpmatch2 + assert_compiles('[1, nil]', <<~RUBY, insns: [:opt_regexpmatch2]) + def test(haystack) = /needle/ =~ haystack + return test("kneedle"), test("") + RUBY + end + def test_opt_ge assert_compiles '[false, true, true]', %q{ def test(a, b) = a >= b @@ -568,6 +636,22 @@ class TestZJIT < Test::Unit::TestCase } end + def test_send_backtrace + backtrace = [ + "-e:2:in 'Object#jit_frame1'", + "-e:3:in 'Object#entry'", + "-e:5:in 'block in <main>'", + "-e:6:in '<main>'", + ] + assert_compiles backtrace.inspect, %q{ + def jit_frame2 = caller # 1 + def jit_frame1 = jit_frame2 # 2 + def entry = jit_frame1 # 3 + entry # profile send # 4 + entry # 5 + }, call_threshold: 2 + end + # tool/ruby_vm/views/*.erb relies on the zjit instructions a) being contiguous and # b) being reliably ordered after all the other instructions. def test_instruction_order @@ -589,11 +673,7 @@ class TestZJIT < Test::Unit::TestCase pipe_fd = 3 script = <<~RUBY - _test_proc = -> { - RubyVM::ZJIT.assert_compiles - #{test_script} - } - ret_val = _test_proc.call + ret_val = (_test_proc = -> { RubyVM::ZJIT.assert_compiles; #{test_script.lstrip} }).call result = { ret_val:, #{ unless insns.empty? diff --git a/test/rubygems/test_gem_commands_install_command.rb b/test/rubygems/test_gem_commands_install_command.rb index 468aecde56..77525aed2c 100644 --- a/test/rubygems/test_gem_commands_install_command.rb +++ b/test/rubygems/test_gem_commands_install_command.rb @@ -1005,6 +1005,38 @@ ERROR: Possible alternatives: non_existent_with_hint assert_equal %W[a-3-#{local}], @cmd.installed_specs.map(&:full_name) end + def test_install_gem_platform_specificity_match + util_set_arch "arm64-darwin-20" + + spec_fetcher do |fetcher| + %w[ruby universal-darwin universal-darwin-20 x64-darwin-20 arm64-darwin-20].each do |platform| + fetcher.download "a", 3 do |s| + s.platform = platform + end + end + end + + @cmd.install_gem "a", ">= 0" + + assert_equal %w[a-3-arm64-darwin-20], @cmd.installed_specs.map(&:full_name) + end + + def test_install_gem_platform_specificity_match_reverse_order + util_set_arch "arm64-darwin-20" + + spec_fetcher do |fetcher| + %w[ruby universal-darwin universal-darwin-20 x64-darwin-20 arm64-darwin-20].reverse_each do |platform| + fetcher.download "a", 3 do |s| + s.platform = platform + end + end + end + + @cmd.install_gem "a", ">= 0" + + assert_equal %w[a-3-arm64-darwin-20], @cmd.installed_specs.map(&:full_name) + end + def test_install_gem_ignore_dependencies_specific_file spec = util_spec "a", 2 diff --git a/test/rubygems/test_gem_commands_pristine_command.rb b/test/rubygems/test_gem_commands_pristine_command.rb index 46c06db014..e9c4d32945 100644 --- a/test/rubygems/test_gem_commands_pristine_command.rb +++ b/test/rubygems/test_gem_commands_pristine_command.rb @@ -125,8 +125,8 @@ class TestGemCommandsPristineCommand < Gem::TestCase @cmd.execute end - assert File.exist?(gem_bin) - assert File.exist?(gem_stub) + assert_path_exist gem_bin + assert_path_exist gem_stub out = @ui.output.split "\n" @@ -537,8 +537,8 @@ class TestGemCommandsPristineCommand < Gem::TestCase @cmd.execute end - assert File.exist? gem_exec - refute File.exist? gem_lib + assert_path_exist gem_exec + assert_path_not_exist gem_lib end def test_execute_only_plugins @@ -572,9 +572,9 @@ class TestGemCommandsPristineCommand < Gem::TestCase @cmd.execute end - refute File.exist? gem_exec - assert File.exist? gem_plugin - refute File.exist? gem_lib + assert_path_not_exist gem_exec + assert_path_exist gem_plugin + assert_path_not_exist gem_lib end def test_execute_bindir @@ -606,8 +606,8 @@ class TestGemCommandsPristineCommand < Gem::TestCase @cmd.execute end - refute File.exist? gem_exec - assert File.exist? gem_bindir + assert_path_not_exist gem_exec + assert_path_exist gem_bindir end def test_execute_unknown_gem_at_remote_source @@ -659,6 +659,42 @@ class TestGemCommandsPristineCommand < Gem::TestCase refute_includes "ruby_executable_hooks", File.read(exe) end + def test_execute_default_gem_and_regular_gem + a_default = new_default_spec("a", "1.2.0") + + a = util_spec "a" do |s| + s.extensions << "ext/a/extconf.rb" + end + + ext_path = File.join @tempdir, "ext", "a", "extconf.rb" + write_file ext_path do |io| + io.write <<-'RUBY' + File.open "Makefile", "w" do |f| + f.puts "clean:\n\techo cleaned\n" + f.puts "all:\n\techo built\n" + f.puts "install:\n\techo installed\n" + end + RUBY + end + + install_default_gems a_default + install_gem a + + # Remove the extension files for a + FileUtils.rm_rf a.gem_build_complete_path + + @cmd.options[:args] = %w[a] + + use_ui @ui do + @cmd.execute + end + + assert_includes @ui.output, "Restored #{a.full_name}" + + # Check extension files for a were restored + assert_path_exist a.gem_build_complete_path + end + def test_execute_multi_platform a = util_spec "a" do |s| s.extensions << "ext/a/extconf.rb" diff --git a/test/rubygems/test_gem_ext_cargo_builder.rb b/test/rubygems/test_gem_ext_cargo_builder.rb index 5035937544..b970e442c2 100644 --- a/test/rubygems/test_gem_ext_cargo_builder.rb +++ b/test/rubygems/test_gem_ext_cargo_builder.rb @@ -141,6 +141,58 @@ class TestGemExtCargoBuilder < Gem::TestCase end end + def test_linker_args + orig_cc = RbConfig::MAKEFILE_CONFIG["CC"] + RbConfig::MAKEFILE_CONFIG["CC"] = "clang" + + builder = Gem::Ext::CargoBuilder.new + args = builder.send(:linker_args) + + assert args[1], "linker=clang" + assert_nil args[2] + ensure + RbConfig::MAKEFILE_CONFIG["CC"] = orig_cc + end + + def test_linker_args_with_options + orig_cc = RbConfig::MAKEFILE_CONFIG["CC"] + RbConfig::MAKEFILE_CONFIG["CC"] = "gcc -Wl,--no-undefined" + + builder = Gem::Ext::CargoBuilder.new + args = builder.send(:linker_args) + + assert args[1], "linker=clang" + assert args[3], "link-args=-Wl,--no-undefined" + ensure + RbConfig::MAKEFILE_CONFIG["CC"] = orig_cc + end + + def test_linker_args_with_cachetools + orig_cc = RbConfig::MAKEFILE_CONFIG["CC"] + RbConfig::MAKEFILE_CONFIG["CC"] = "sccache clang" + + builder = Gem::Ext::CargoBuilder.new + args = builder.send(:linker_args) + + assert args[1], "linker=clang" + assert_nil args[2] + ensure + RbConfig::MAKEFILE_CONFIG["CC"] = orig_cc + end + + def test_linker_args_with_cachetools_and_options + orig_cc = RbConfig::MAKEFILE_CONFIG["CC"] + RbConfig::MAKEFILE_CONFIG["CC"] = "ccache gcc -Wl,--no-undefined" + + builder = Gem::Ext::CargoBuilder.new + args = builder.send(:linker_args) + + assert args[1], "linker=clang" + assert args[3], "link-args=-Wl,--no-undefined" + ensure + RbConfig::MAKEFILE_CONFIG["CC"] = orig_cc + end + private def skip_unsupported_platforms! @@ -519,12 +519,8 @@ thread_cleanup_func(void *th_ptr, int atfork) th->locking_mutex = Qfalse; thread_cleanup_func_before_exec(th_ptr); - /* - * Unfortunately, we can't release native threading resource at fork - * because libc may have unstable locking state therefore touching - * a threading resource may cause a deadlock. - */ if (atfork) { + native_thread_destroy_atfork(th->nt); th->nt = NULL; return; } @@ -6210,7 +6206,11 @@ threadptr_interrupt_exec_exec(rb_thread_t *th) RUBY_DEBUG_LOG("task:%p", task); if (task) { - (*task->func)(task->data); + if (task->flags & rb_interrupt_exec_flag_new_thread) { + rb_thread_create(task->func, task->data); + } else { + (*task->func)(task->data); + } ruby_xfree(task); } else { @@ -6233,43 +6233,15 @@ threadptr_interrupt_exec_cleanup(rb_thread_t *th) rb_native_mutex_unlock(&th->interrupt_lock); } -struct interrupt_ractor_new_thread_data { - rb_interrupt_exec_func_t *func; - void *data; -}; - -static VALUE -interrupt_ractor_new_thread_func(void *data) -{ - struct interrupt_ractor_new_thread_data d = *(struct interrupt_ractor_new_thread_data *)data; - ruby_xfree(data); - - d.func(d.data); - return Qnil; -} - -static VALUE -interrupt_ractor_func(void *data) -{ - rb_thread_create(interrupt_ractor_new_thread_func, data); - return Qnil; -} - // native thread safe // func/data should be native thread safe void rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags) { - struct interrupt_ractor_new_thread_data *d = ALLOC(struct interrupt_ractor_new_thread_data); - RUBY_DEBUG_LOG("flags:%d", (int)flags); - d->func = func; - d->data = data; rb_thread_t *main_th = target_r->threads.main; - rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags); - - // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now. + rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread); } diff --git a/thread_none.c b/thread_none.c index d535d9af4c..38686e17c1 100644 --- a/thread_none.c +++ b/thread_none.c @@ -137,6 +137,12 @@ ruby_mn_threads_params(void) { } +static void +native_thread_destroy_atfork(struct rb_native_thread *nt) +{ + /* no-op */ +} + static int native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame) { diff --git a/thread_pthread.c b/thread_pthread.c index f9352bbb56..377e1d9f64 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -1817,6 +1817,27 @@ native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th) } static void +native_thread_destroy_atfork(struct rb_native_thread *nt) +{ + if (nt) { + /* We can't call rb_native_cond_destroy here because according to the + * specs of pthread_cond_destroy: + * + * Attempting to destroy a condition variable upon which other threads + * are currently blocked results in undefined behavior. + * + * Specifically, glibc's pthread_cond_destroy waits on all the other + * listeners. Since after forking all the threads are dead, the condition + * variable's listeners will never wake up, so it will hang forever. + */ + + RB_ALTSTACK_FREE(nt->altstack); + ruby_xfree(nt->nt_context); + ruby_xfree(nt); + } +} + +static void native_thread_destroy(struct rb_native_thread *nt) { if (nt) { @@ -1826,9 +1847,7 @@ native_thread_destroy(struct rb_native_thread *nt) rb_native_cond_destroy(&nt->cond.intr); } - RB_ALTSTACK_FREE(nt->altstack); - ruby_xfree(nt->nt_context); - ruby_xfree(nt); + native_thread_destroy_atfork(nt); } } diff --git a/thread_win32.c b/thread_win32.c index ed8a99dd88..576f617e8d 100644 --- a/thread_win32.c +++ b/thread_win32.c @@ -617,6 +617,12 @@ native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame) th->ec->machine.stack_maxsize = size - space; } +static void +native_thread_destroy_atfork(struct rb_native_thread *nt) +{ + /* no-op */ +} + #ifndef InterlockedExchangePointer #define InterlockedExchangePointer(t, v) \ (void *)InterlockedExchange((long *)(t), (long)(v)) diff --git a/tool/fetch-bundled_gems.rb b/tool/fetch-bundled_gems.rb index f50bda360a..b76feefd94 100755 --- a/tool/fetch-bundled_gems.rb +++ b/tool/fetch-bundled_gems.rb @@ -24,20 +24,22 @@ next unless n next if n =~ /^#/ next if bundled_gems&.all? {|pat| !File.fnmatch?(pat, n)} -if File.directory?(n) - puts "updating #{color.notice(n)} ..." - system("git", "fetch", "--all", chdir: n) or abort -else +unless File.exist?("#{n}/.git") puts "retrieving #{color.notice(n)} ..." - system(*%W"git clone #{u} #{n}") or abort + system(*%W"git clone --depth=1 --no-tags #{u} #{n}") or abort end if r puts "fetching #{color.notice(r)} ..." system("git", "fetch", "origin", r, chdir: n) or abort + c = r +else + c = ["v#{v}", v].find do |c| + puts "fetching #{color.notice(c)} ..." + system("git", "fetch", "origin", "refs/tags/#{c}:refs/tags/#{c}", chdir: n) + end or abort end -c = r || "v#{v}" checkout = %w"git -c advice.detachedHead=false checkout" print %[checking out #{color.notice(c)} (v=#{color.info(v)}] print %[, r=#{color.info(r)}] if r diff --git a/tool/sync_default_gems.rb b/tool/sync_default_gems.rb index 932f37b77c..ca0b15dd19 100755 --- a/tool/sync_default_gems.rb +++ b/tool/sync_default_gems.rb @@ -136,9 +136,11 @@ module SyncDefaultGems cp_r("#{upstream}/bundler/spec", "spec/bundler") rm_rf("spec/bundler/bin") - parallel_tests_content = File.read("#{upstream}/bundler/bin/parallel_rspec").gsub("../spec", "../bundler") - File.write("spec/bin/parallel_rspec", parallel_tests_content) - chmod("+x", "spec/bin/parallel_rspec") + ["parallel_rspec", "rspec"].each do |binstub| + content = File.read("#{upstream}/bundler/bin/#{binstub}").gsub("../spec", "../bundler") + File.write("spec/bin/#{binstub}", content) + chmod("+x", "spec/bin/#{binstub}") + end %w[dev_gems test_gems rubocop_gems standard_gems].each do |gemfile| ["rb.lock", "rb"].each do |ext| diff --git a/variable.c b/variable.c index 6d0e9832e7..93ae6bb8b2 100644 --- a/variable.c +++ b/variable.c @@ -1228,19 +1228,10 @@ gen_fields_tbl_bytes(size_t n) } static struct gen_fields_tbl * -gen_fields_tbl_resize(struct gen_fields_tbl *old, uint32_t n) +gen_fields_tbl_resize(struct gen_fields_tbl *old, uint32_t new_capa) { - RUBY_ASSERT(n > 0); - - uint32_t len = old ? old->as.shape.fields_count : 0; - struct gen_fields_tbl *fields_tbl = xrealloc(old, gen_fields_tbl_bytes(n)); - - fields_tbl->as.shape.fields_count = n; - for (; len < n; len++) { - fields_tbl->as.shape.fields[len] = Qundef; - } - - return fields_tbl; + RUBY_ASSERT(new_capa > 0); + return xrealloc(old, gen_fields_tbl_bytes(new_capa)); } void @@ -1253,7 +1244,8 @@ rb_mark_generic_ivar(VALUE obj) rb_mark_tbl_no_pin(fields_tbl->as.complex.table); } else { - for (uint32_t i = 0; i < fields_tbl->as.shape.fields_count; i++) { + uint32_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj)); + for (uint32_t i = 0; i < fields_count; i++) { rb_gc_mark_movable(fields_tbl->as.shape.fields[i]); } } @@ -1290,7 +1282,7 @@ rb_generic_ivar_memsize(VALUE obj) return sizeof(struct gen_fields_tbl) + st_memsize(fields_tbl->as.complex.table); } else { - return gen_fields_tbl_bytes(fields_tbl->as.shape.fields_count); + return gen_fields_tbl_bytes(RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj))); } } return 0; @@ -1299,36 +1291,35 @@ rb_generic_ivar_memsize(VALUE obj) static size_t gen_fields_tbl_count(VALUE obj, const struct gen_fields_tbl *fields_tbl) { - uint32_t i; - size_t n = 0; - if (rb_shape_obj_too_complex_p(obj)) { - n = st_table_size(fields_tbl->as.complex.table); + return st_table_size(fields_tbl->as.complex.table); } else { - for (i = 0; i < fields_tbl->as.shape.fields_count; i++) { - if (!UNDEF_P(fields_tbl->as.shape.fields[i])) { - n++; - } - } + return RSHAPE_LEN(RBASIC_SHAPE_ID(obj)); } - - return n; } VALUE rb_obj_field_get(VALUE obj, shape_id_t target_shape_id) { RUBY_ASSERT(!SPECIAL_CONST_P(obj)); - RUBY_ASSERT(RSHAPE(target_shape_id)->type == SHAPE_IVAR || RSHAPE(target_shape_id)->type == SHAPE_OBJ_ID); + RUBY_ASSERT(RSHAPE_TYPE_P(target_shape_id, SHAPE_IVAR) || RSHAPE_TYPE_P(target_shape_id, SHAPE_OBJ_ID)); + + if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { + ASSERT_vm_locking(); + VALUE field_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (field_obj) { + return rb_obj_field_get(field_obj, target_shape_id); + } + return Qundef; + } if (rb_shape_too_complex_p(target_shape_id)) { st_table *fields_hash; switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - ASSERT_vm_locking(); - fields_hash = RCLASS_FIELDS_HASH(obj); + rb_bug("Unreachable"); break; case T_OBJECT: fields_hash = ROBJECT_FIELDS_HASH(obj); @@ -1342,7 +1333,7 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id) break; } VALUE value = Qundef; - st_lookup(fields_hash, RSHAPE(target_shape_id)->edge_name, &value); + st_lookup(fields_hash, RSHAPE_EDGE_NAME(target_shape_id), &value); #if RUBY_DEBUG if (UNDEF_P(value)) { @@ -1354,13 +1345,12 @@ rb_obj_field_get(VALUE obj, shape_id_t target_shape_id) return value; } - attr_index_t attr_index = RSHAPE(target_shape_id)->next_field_index - 1; + attr_index_t attr_index = RSHAPE_INDEX(target_shape_id); VALUE *fields; switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - ASSERT_vm_locking(); - fields = RCLASS_PRIME_FIELDS(obj); + rb_bug("Unreachable"); break; case T_OBJECT: fields = ROBJECT_FIELDS(obj); @@ -1382,43 +1372,19 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef) if (SPECIAL_CONST_P(obj)) return undef; shape_id_t shape_id; - VALUE * ivar_list; - shape_id = RBASIC_SHAPE_ID(obj); + VALUE *ivar_list; switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: { - bool found = false; - VALUE val; - - RB_VM_LOCKING() { - if (rb_shape_too_complex_p(shape_id)) { - st_table * iv_table = RCLASS_FIELDS_HASH(obj); - if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) { - found = true; - } - else { - val = undef; - } - } - else { - attr_index_t index = 0; - found = rb_shape_get_iv_index(shape_id, id, &index); - - if (found) { - ivar_list = RCLASS_PRIME_FIELDS(obj); - RUBY_ASSERT(ivar_list); - - val = ivar_list[index]; - } - else { - val = undef; - } - } + VALUE val = undef; + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + val = rb_ivar_lookup(fields_obj, id, undef); } - if (found && + if (val != undef && rb_is_instance_id(id) && UNLIKELY(!rb_ractor_main_p()) && !rb_ractor_shareable_p(val)) { @@ -1427,10 +1393,32 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef) } return val; } + case T_IMEMO: + // Handled like T_OBJECT + { + RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields)); + shape_id = RBASIC_SHAPE_ID(obj); + + if (rb_shape_too_complex_p(shape_id)) { + st_table *iv_table = rb_imemo_class_fields_complex_tbl(obj); + VALUE val; + if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) { + return val; + } + else { + return undef; + } + } + + RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj)); + ivar_list = rb_imemo_class_fields_ptr(obj); + break; + } case T_OBJECT: { + shape_id = RBASIC_SHAPE_ID(obj); if (rb_shape_too_complex_p(shape_id)) { - st_table * iv_table = ROBJECT_FIELDS_HASH(obj); + st_table *iv_table = ROBJECT_FIELDS_HASH(obj); VALUE val; if (rb_st_lookup(iv_table, (st_data_t)id, (st_data_t *)&val)) { return val; @@ -1445,6 +1433,7 @@ rb_ivar_lookup(VALUE obj, ID id, VALUE undef) break; } default: + shape_id = RBASIC_SHAPE_ID(obj); if (FL_TEST_RAW(obj, FL_EXIVAR)) { struct gen_fields_tbl *fields_tbl; rb_gen_fields_tbl_get(obj, id, &fields_tbl); @@ -1493,13 +1482,22 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef) { rb_check_frozen(obj); - bool locked = false; - unsigned int lev = 0; VALUE val = undef; if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(id); - RB_VM_LOCK_ENTER_LEV(&lev); - locked = true; + + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + if (rb_multi_ractor_p()) { + fields_obj = rb_imemo_class_fields_clone(fields_obj); + val = rb_ivar_delete(fields_obj, id, undef); + RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, fields_obj); + } + else { + val = rb_ivar_delete(fields_obj, id, undef); + } + } + return val; } shape_id_t old_shape_id = rb_obj_shape_id(obj); @@ -1511,9 +1509,6 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef) shape_id_t next_shape_id = rb_shape_transition_remove_ivar(obj, id, &removed_shape_id); if (next_shape_id == old_shape_id) { - if (locked) { - RB_VM_LOCK_LEAVE_LEV(&lev); - } return undef; } @@ -1522,13 +1517,17 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef) goto too_complex; } - RUBY_ASSERT(RSHAPE(next_shape_id)->next_field_index == RSHAPE(old_shape_id)->next_field_index - 1); + RUBY_ASSERT(RSHAPE_LEN(next_shape_id) == RSHAPE_LEN(old_shape_id) - 1); VALUE *fields; switch(BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - fields = RCLASS_PRIME_FIELDS(obj); + rb_bug("Unreachable"); + break; + case T_IMEMO: + RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields)); + fields = rb_imemo_class_fields_ptr(obj); break; case T_OBJECT: fields = ROBJECT_FIELDS(obj); @@ -1543,9 +1542,9 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef) RUBY_ASSERT(removed_shape_id != INVALID_SHAPE_ID); - attr_index_t new_fields_count = RSHAPE(next_shape_id)->next_field_index; + attr_index_t new_fields_count = RSHAPE_LEN(next_shape_id); - attr_index_t removed_index = RSHAPE(removed_shape_id)->next_field_index - 1; + attr_index_t removed_index = RSHAPE_INDEX(removed_shape_id); val = fields[removed_index]; size_t trailing_fields = new_fields_count - removed_index; @@ -1563,10 +1562,6 @@ rb_ivar_delete(VALUE obj, ID id, VALUE undef) } rb_obj_set_shape_id(obj, next_shape_id); - if (locked) { - RB_VM_LOCK_LEAVE_LEV(&lev); - } - return val; too_complex: @@ -1575,7 +1570,12 @@ too_complex: switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - table = RCLASS_WRITABLE_FIELDS_HASH(obj); + rb_bug("Unreachable"); + break; + + case T_IMEMO: + RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields)); + table = rb_imemo_class_fields_complex_tbl(obj); break; case T_OBJECT: @@ -1598,10 +1598,6 @@ too_complex: } } - if (locked) { - RB_VM_LOCK_LEAVE_LEV(&lev); - } - return val; } @@ -1614,6 +1610,10 @@ rb_attr_delete(VALUE obj, ID id) static shape_id_t obj_transition_too_complex(VALUE obj, st_table *table) { + if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { + return obj_transition_too_complex(RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(obj), table); + } + RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj)); shape_id_t shape_id = rb_shape_transition_complex(obj); @@ -1629,9 +1629,7 @@ obj_transition_too_complex(VALUE obj, st_table *table) break; case T_CLASS: case T_MODULE: - old_fields = RCLASS_PRIME_FIELDS(obj); - RBASIC_SET_SHAPE_ID(obj, shape_id); - RCLASS_SET_FIELDS_HASH(obj, table); + rb_bug("Unreachable"); break; default: RB_VM_LOCKING() { @@ -1827,8 +1825,8 @@ generic_fields_lookup_ensure_size(st_data_t *k, st_data_t *v, st_data_t u, int e if (!existing || fields_lookup->resize) { if (existing) { - RUBY_ASSERT(RSHAPE(fields_lookup->shape_id)->type == SHAPE_IVAR || RSHAPE(fields_lookup->shape_id)->type == SHAPE_OBJ_ID); - RUBY_ASSERT(RSHAPE_CAPACITY(RSHAPE(fields_lookup->shape_id)->parent_id) < RSHAPE_CAPACITY(fields_lookup->shape_id)); + RUBY_ASSERT(RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_IVAR) || RSHAPE_TYPE_P(fields_lookup->shape_id, SHAPE_OBJ_ID)); + RUBY_ASSERT(RSHAPE_CAPACITY(RSHAPE_PARENT(fields_lookup->shape_id)) < RSHAPE_CAPACITY(fields_lookup->shape_id)); } else { FL_SET_RAW((VALUE)*k, FL_EXIVAR); @@ -2052,11 +2050,20 @@ rb_vm_set_ivar_id(VALUE obj, ID id, VALUE val) bool rb_obj_set_shape_id(VALUE obj, shape_id_t shape_id) { - if (rb_obj_shape_id(obj) == shape_id) { + shape_id_t old_shape_id = rb_obj_shape_id(obj); + if (old_shape_id == shape_id) { return false; } + if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) { + // Avoid creating the fields_obj just to freeze the class + if (!(shape_id == SPECIAL_CONST_SHAPE_ID && old_shape_id == ROOT_SHAPE_ID)) { + RBASIC_SET_SHAPE_ID(RCLASS_WRITABLE_ENSURE_FIELDS_OBJ(obj), shape_id); + } + } + // FIXME: How to do multi-shape? RBASIC_SET_SHAPE_ID(obj, shape_id); + return true; } @@ -2117,8 +2124,6 @@ rb_ivar_set_internal(VALUE obj, ID id, VALUE val) ivar_set(obj, id, val); } -static void class_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val); - void rb_obj_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val) { @@ -2128,8 +2133,8 @@ rb_obj_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val) break; case T_CLASS: case T_MODULE: - ASSERT_vm_locking(); - class_field_set(obj, target_shape_id, val); + // The only field is object_id and T_CLASS handle it differently. + rb_bug("Unreachable"); break; default: generic_field_set(obj, target_shape_id, val); @@ -2148,7 +2153,12 @@ ivar_defined0(VALUE obj, ID id) switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - table = (st_table *)RCLASS_FIELDS_HASH(obj); + rb_bug("Unreachable"); + break; + + case T_IMEMO: + RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_class_fields)); + table = rb_imemo_class_fields_complex_tbl(obj); break; case T_OBJECT: @@ -2180,12 +2190,15 @@ rb_ivar_defined(VALUE obj, ID id) { if (SPECIAL_CONST_P(obj)) return Qfalse; - VALUE defined; + VALUE defined = Qfalse; switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - RB_VM_LOCKING() { - defined = ivar_defined0(obj, id); + { + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + defined = ivar_defined0(fields_obj, id); + } } break; default: @@ -2200,60 +2213,50 @@ struct iv_itr_data { struct gen_fields_tbl *fields_tbl; st_data_t arg; rb_ivar_foreach_callback_func *func; + VALUE *fields; bool ivar_only; }; -/* - * Returns a flag to stop iterating depending on the result of +callback+. - */ -static bool -iterate_over_shapes_with_callback(rb_shape_t *shape, rb_ivar_foreach_callback_func *callback, struct iv_itr_data *itr_data) +static int +iterate_over_shapes_callback(shape_id_t shape_id, void *data) { - switch ((enum shape_type)shape->type) { - case SHAPE_ROOT: - return false; - case SHAPE_OBJ_ID: - if (itr_data->ivar_only) { - return iterate_over_shapes_with_callback(RSHAPE(shape->parent_id), callback, itr_data); - } - // fallthrough - case SHAPE_IVAR: - ASSUME(callback); - if (iterate_over_shapes_with_callback(RSHAPE(shape->parent_id), callback, itr_data)) { - return true; - } + struct iv_itr_data *itr_data = data; - VALUE * iv_list; - switch (BUILTIN_TYPE(itr_data->obj)) { - case T_OBJECT: - RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj)); - iv_list = ROBJECT_FIELDS(itr_data->obj); - break; - case T_CLASS: - case T_MODULE: - RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj)); - iv_list = RCLASS_PRIME_FIELDS(itr_data->obj); - break; - default: - iv_list = itr_data->fields_tbl->as.shape.fields; - break; - } - VALUE val = iv_list[shape->next_field_index - 1]; - if (!UNDEF_P(val)) { - switch (callback(shape->edge_name, val, itr_data->arg)) { - case ST_CHECK: - case ST_CONTINUE: - break; - case ST_STOP: - return true; - default: - rb_bug("unreachable"); - } - } - return false; + if (itr_data->ivar_only && !RSHAPE_TYPE_P(shape_id, SHAPE_IVAR)) { + return ST_CONTINUE; + } + + VALUE *iv_list; + switch (BUILTIN_TYPE(itr_data->obj)) { + case T_OBJECT: + RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj)); + iv_list = ROBJECT_FIELDS(itr_data->obj); + break; + case T_CLASS: + case T_MODULE: + rb_bug("Unreachable"); + case T_IMEMO: + RUBY_ASSERT(IMEMO_TYPE_P(itr_data->obj, imemo_class_fields)); + RUBY_ASSERT(!rb_shape_obj_too_complex_p(itr_data->obj)); + + iv_list = rb_imemo_class_fields_ptr(itr_data->obj); + break; default: - UNREACHABLE_RETURN(false); + iv_list = itr_data->fields_tbl->as.shape.fields; + break; } + + VALUE val = iv_list[RSHAPE_INDEX(shape_id)]; + return itr_data->func(RSHAPE_EDGE_NAME(shape_id), val, itr_data->arg); +} + +/* + * Returns a flag to stop iterating depending on the result of +callback+. + */ +static void +iterate_over_shapes(shape_id_t shape_id, rb_ivar_foreach_callback_func *callback, struct iv_itr_data *itr_data) +{ + rb_shape_foreach_field(shape_id, iterate_over_shapes_callback, itr_data); } static int @@ -2279,7 +2282,8 @@ obj_fields_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, b rb_st_foreach(ROBJECT_FIELDS_HASH(obj), each_hash_iv, (st_data_t)&itr_data); } else { - iterate_over_shapes_with_callback(RSHAPE(shape_id), func, &itr_data); + itr_data.fields = ROBJECT_FIELDS(obj); + iterate_over_shapes(shape_id, func, &itr_data); } } @@ -2302,28 +2306,30 @@ gen_fields_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, b rb_st_foreach(fields_tbl->as.complex.table, each_hash_iv, (st_data_t)&itr_data); } else { - iterate_over_shapes_with_callback(RSHAPE(shape_id), func, &itr_data); + itr_data.fields = fields_tbl->as.shape.fields; + iterate_over_shapes(shape_id, func, &itr_data); } } static void -class_fields_each(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, bool ivar_only) +class_fields_each(VALUE fields_obj, rb_ivar_foreach_callback_func *func, st_data_t arg, bool ivar_only) { - RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE)); + IMEMO_TYPE_P(fields_obj, imemo_class_fields); struct iv_itr_data itr_data = { - .obj = obj, + .obj = fields_obj, .arg = arg, .func = func, .ivar_only = ivar_only, }; - shape_id_t shape_id = RBASIC_SHAPE_ID(obj); + shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj); if (rb_shape_too_complex_p(shape_id)) { - rb_st_foreach(RCLASS_WRITABLE_FIELDS_HASH(obj), each_hash_iv, (st_data_t)&itr_data); + rb_st_foreach(rb_imemo_class_fields_complex_tbl(fields_obj), each_hash_iv, (st_data_t)&itr_data); } else { - iterate_over_shapes_with_callback(RSHAPE(shape_id), func, &itr_data); + itr_data.fields = rb_imemo_class_fields_ptr(fields_obj); + iterate_over_shapes(shape_id, func, &itr_data); } } @@ -2361,7 +2367,7 @@ rb_copy_generic_ivar(VALUE dest, VALUE obj) shape_id_t initial_shape_id = rb_obj_shape_id(dest); if (!rb_shape_canonical_p(src_shape_id)) { - RUBY_ASSERT(RSHAPE(initial_shape_id)->type == SHAPE_ROOT); + RUBY_ASSERT(RSHAPE_TYPE_P(initial_shape_id, SHAPE_ROOT)); dest_shape_id = rb_shape_rebuild(initial_shape_id, src_shape_id); if (UNLIKELY(rb_shape_too_complex_p(dest_shape_id))) { @@ -2431,18 +2437,26 @@ rb_field_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg, { if (SPECIAL_CONST_P(obj)) return; switch (BUILTIN_TYPE(obj)) { + case T_IMEMO: + if (IMEMO_TYPE_P(obj, imemo_class_fields)) { + class_fields_each(obj, func, arg, ivar_only); + } + break; case T_OBJECT: obj_fields_each(obj, func, arg, ivar_only); break; case T_CLASS: case T_MODULE: - IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0); - RB_VM_LOCKING() { - class_fields_each(obj, func, arg, ivar_only); + { + IVAR_ACCESSOR_SHOULD_BE_MAIN_RACTOR(0); + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (fields_obj) { + class_fields_each(fields_obj, func, arg, ivar_only); + } } break; default: - if (FL_TEST(obj, FL_EXIVAR)) { + if (FL_TEST_RAW(obj, FL_EXIVAR)) { gen_fields_each(obj, func, arg, ivar_only); } break; @@ -2467,8 +2481,16 @@ rb_ivar_count(VALUE obj) break; case T_CLASS: case T_MODULE: - iv_count = RCLASS_FIELDS_COUNT(obj); - break; + { + VALUE fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (!fields_obj) { + return 0; + } + if (rb_shape_obj_too_complex_p(fields_obj)) { + return rb_st_table_size(rb_imemo_class_fields_complex_tbl(fields_obj)); + } + return RBASIC_FIELDS_COUNT(fields_obj); + } default: if (FL_TEST(obj, FL_EXIVAR)) { struct gen_fields_tbl *fields_tbl; @@ -4674,71 +4696,106 @@ rb_iv_set(VALUE obj, const char *name, VALUE val) return rb_ivar_set(obj, id, val); } -static VALUE * -class_ivar_set_shape_fields(VALUE obj, void *_data) +static bool +class_fields_ivar_set(VALUE klass, VALUE fields_obj, ID id, VALUE val, bool concurrent, VALUE *new_fields_obj) { - RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj)); + bool existing = true; + const VALUE original_fields_obj = fields_obj; + fields_obj = original_fields_obj ? original_fields_obj : rb_imemo_class_fields_new(klass, 1); - return RCLASS_PRIME_FIELDS(obj); -} + shape_id_t current_shape_id = RBASIC_SHAPE_ID(fields_obj); + shape_id_t next_shape_id = current_shape_id; -static void -class_ivar_set_shape_resize_fields(VALUE obj, attr_index_t _old_capa, attr_index_t new_capa, void *_data) -{ - REALLOC_N(RCLASS_PRIME_FIELDS(obj), VALUE, new_capa); -} + if (UNLIKELY(rb_shape_too_complex_p(current_shape_id))) { + goto too_complex; + } -static void -class_ivar_set_set_shape_id(VALUE obj, shape_id_t shape_id, void *_data) -{ - rb_obj_set_shape_id(obj, shape_id); -} + attr_index_t index; + if (!rb_shape_get_iv_index(current_shape_id, id, &index)) { + existing = false; -static shape_id_t -class_ivar_set_transition_too_complex(VALUE obj, void *_data) -{ - return rb_evict_fields_to_hash(obj); -} + index = RSHAPE_LEN(current_shape_id); + if (index >= SHAPE_MAX_FIELDS) { + rb_raise(rb_eArgError, "too many instance variables"); + } -static st_table * -class_ivar_set_too_complex_table(VALUE obj, void *_data) -{ - RUBY_ASSERT(rb_shape_obj_too_complex_p(obj)); + next_shape_id = rb_shape_transition_add_ivar(fields_obj, id); + if (UNLIKELY(rb_shape_too_complex_p(next_shape_id))) { + attr_index_t current_len = RSHAPE_LEN(current_shape_id); + fields_obj = rb_imemo_class_fields_new_complex(klass, current_len + 1); + if (current_len) { + rb_obj_copy_fields_to_hash_table(original_fields_obj, rb_imemo_class_fields_complex_tbl(fields_obj)); + RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id); + } + goto too_complex; + } + + attr_index_t next_capacity = RSHAPE_CAPACITY(next_shape_id); + attr_index_t current_capacity = RSHAPE_CAPACITY(current_shape_id); - return RCLASS_WRITABLE_FIELDS_HASH(obj); + if (concurrent || next_capacity != current_capacity) { + RUBY_ASSERT(concurrent || next_capacity > current_capacity); + + // We allocate a new fields_obj even when concurrency isn't a concern + // so that we're embedded as long as possible. + fields_obj = rb_imemo_class_fields_new(klass, next_capacity); + if (original_fields_obj) { + MEMCPY(rb_imemo_class_fields_ptr(fields_obj), rb_imemo_class_fields_ptr(original_fields_obj), VALUE, RSHAPE_LEN(current_shape_id)); + } + } + + RUBY_ASSERT(RSHAPE(next_shape_id)->type == SHAPE_IVAR); + RUBY_ASSERT(index == (RSHAPE_LEN(next_shape_id) - 1)); + } + + VALUE *fields = rb_imemo_class_fields_ptr(fields_obj); + RB_OBJ_WRITE(fields_obj, &fields[index], val); + + if (!existing) { + RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id); + } + + *new_fields_obj = fields_obj; + return existing; + +too_complex: + { + st_table *table = rb_imemo_class_fields_complex_tbl(fields_obj); + existing = st_insert(table, (st_data_t)id, (st_data_t)val); + RB_OBJ_WRITTEN(fields_obj, Qundef, val); + + if (fields_obj != original_fields_obj) { + RBASIC_SET_SHAPE_ID(fields_obj, next_shape_id); + } + } + + *new_fields_obj = fields_obj; + return existing; } int rb_class_ivar_set(VALUE obj, ID id, VALUE val) { RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE)); - bool existing = false; rb_check_frozen(obj); rb_class_ensure_writable(obj); - RB_VM_LOCKING() { - existing = general_ivar_set(obj, id, val, NULL, - class_ivar_set_shape_fields, - class_ivar_set_shape_resize_fields, - class_ivar_set_set_shape_id, - class_ivar_set_transition_too_complex, - class_ivar_set_too_complex_table).existing; - } + const VALUE original_fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + VALUE new_fields_obj = 0; - return existing; -} + bool existing = class_fields_ivar_set(obj, original_fields_obj, id, val, rb_multi_ractor_p(), &new_fields_obj); -static void -class_field_set(VALUE obj, shape_id_t target_shape_id, VALUE val) -{ - RUBY_ASSERT(RB_TYPE_P(obj, T_CLASS) || RB_TYPE_P(obj, T_MODULE)); - general_field_set(obj, target_shape_id, val, NULL, - class_ivar_set_shape_fields, - class_ivar_set_shape_resize_fields, - class_ivar_set_set_shape_id, - class_ivar_set_transition_too_complex, - class_ivar_set_too_complex_table); + if (new_fields_obj != original_fields_obj) { + RCLASS_WRITABLE_SET_FIELDS_OBJ(obj, new_fields_obj); + + // TODO: What should we set as the T_CLASS shape_id? + // In most case we can replicate the single `fields_obj` shape + // but in namespaced case? + // Perhaps INVALID_SHAPE_ID? + RBASIC_SET_SHAPE_ID(obj, RBASIC_SHAPE_ID(new_fields_obj)); + } + return existing; } static int @@ -4754,9 +4811,7 @@ rb_fields_tbl_copy(VALUE dst, VALUE src) { RUBY_ASSERT(rb_type(dst) == rb_type(src)); RUBY_ASSERT(RB_TYPE_P(dst, T_CLASS) || RB_TYPE_P(dst, T_MODULE)); - RUBY_ASSERT(RSHAPE_TYPE_P(RBASIC_SHAPE_ID(dst), SHAPE_ROOT)); - RUBY_ASSERT(!RCLASS_PRIME_FIELDS(dst)); rb_ivar_foreach(src, tbl_copy_i, dst); } diff --git a/variable.h b/variable.h index a95fcc563d..54b7fc5461 100644 --- a/variable.h +++ b/variable.h @@ -15,7 +15,6 @@ struct gen_fields_tbl { union { struct { - uint32_t fields_count; VALUE fields[1]; } shape; struct { @@ -736,8 +736,8 @@ vm_stat(int argc, VALUE *argv, VALUE self) SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations); SET(constant_cache_misses, ruby_vm_constant_cache_misses); SET(global_cvar_state, ruby_vm_global_cvar_state); - SET(next_shape_id, (rb_serial_t)GET_SHAPE_TREE()->next_shape_id); - SET(shape_cache_size, (rb_serial_t)GET_SHAPE_TREE()->cache_size); + SET(next_shape_id, (rb_serial_t)rb_shape_tree.next_shape_id); + SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size); #undef SET #if USE_DEBUG_COUNTER diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 24709eee2e..5192ee2d82 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1213,9 +1213,10 @@ ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, const rb_iseq_t *, IVC, const s static inline VALUE vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, VALUE default_value) { + VALUE fields_obj; #if OPT_IC_FOR_IVAR VALUE val = Qundef; - VALUE * ivar_list; + VALUE *ivar_list; if (SPECIAL_CONST_P(obj)) { return default_value; @@ -1247,7 +1248,13 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call } } - ivar_list = RCLASS_PRIME_FIELDS(obj); + fields_obj = RCLASS_WRITABLE_FIELDS_OBJ(obj); + if (!fields_obj) { + return default_value; + } + ivar_list = rb_imemo_class_fields_ptr(fields_obj); + shape_id = rb_obj_shape_id(fields_obj); + break; } default: @@ -1318,7 +1325,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call switch (BUILTIN_TYPE(obj)) { case T_CLASS: case T_MODULE: - table = (st_table *)RCLASS_FIELDS_HASH(obj); + table = rb_imemo_class_fields_complex_tbl(fields_obj); break; case T_OBJECT: @@ -1374,6 +1381,7 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call RUBY_ASSERT(!UNDEF_P(val)); } + RB_GC_GUARD(fields_obj); return val; general_path: @@ -1455,9 +1463,7 @@ vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_i RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); } else if (dest_shape_id != INVALID_SHAPE_ID) { - rb_shape_t *dest_shape = RSHAPE(dest_shape_id); - - if (shape_id == dest_shape->parent_id && dest_shape->edge_name == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { + if (shape_id == RSHAPE_PARENT(dest_shape_id) && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { RUBY_ASSERT(index < RSHAPE_CAPACITY(dest_shape_id)); } else { @@ -1498,10 +1504,9 @@ vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t dest_shape_id, attr_index_t i VM_ASSERT(!rb_ractor_shareable_p(obj)); } else if (dest_shape_id != INVALID_SHAPE_ID) { - rb_shape_t *dest_shape = RSHAPE(dest_shape_id); - shape_id_t source_shape_id = dest_shape->parent_id; + shape_id_t source_shape_id = RSHAPE_PARENT(dest_shape_id); - if (shape_id == source_shape_id && dest_shape->edge_name == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { + if (shape_id == source_shape_id && RSHAPE_EDGE_NAME(dest_shape_id) == id && RSHAPE_CAPACITY(shape_id) == RSHAPE_CAPACITY(dest_shape_id)) { RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID); RBASIC_SET_SHAPE_ID(obj, dest_shape_id); @@ -778,7 +778,7 @@ VALUE rb_object_shape_count(void) { // next_shape_id starts from 0, so it's the same as the count - return ULONG2NUM((unsigned long)GET_SHAPE_TREE()->next_shape_id); + return ULONG2NUM((unsigned long)rb_shape_tree.next_shape_id); } bool @@ -799,6 +799,12 @@ rb_yjit_shape_capacity(shape_id_t shape_id) return RSHAPE_CAPACITY(shape_id); } +attr_index_t +rb_yjit_shape_index(shape_id_t shape_id) +{ + return RSHAPE_INDEX(shape_id); +} + // Assert that we have the VM lock. Relevant mostly for multi ractor situations. // The GC takes the lock before calling us, and this asserts that it indeed happens. void diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs index a139892741..e65f001145 100644 --- a/yjit/bindgen/src/main.rs +++ b/yjit/bindgen/src/main.rs @@ -95,13 +95,13 @@ fn main() { // From shape.h .allowlist_function("rb_obj_shape_id") - .allowlist_function("rb_shape_lookup") .allowlist_function("rb_shape_id_offset") .allowlist_function("rb_shape_get_iv_index") .allowlist_function("rb_shape_transition_add_ivar_no_warnings") .allowlist_function("rb_yjit_shape_obj_too_complex_p") .allowlist_function("rb_yjit_shape_too_complex_p") .allowlist_function("rb_yjit_shape_capacity") + .allowlist_function("rb_yjit_shape_index") .allowlist_var("SHAPE_ID_NUM_BITS") // From ruby/internal/intern/object.h diff --git a/yjit/src/asm/x86_64/mod.rs b/yjit/src/asm/x86_64/mod.rs index fbbfa714d8..0ef5e92117 100644 --- a/yjit/src/asm/x86_64/mod.rs +++ b/yjit/src/asm/x86_64/mod.rs @@ -1027,7 +1027,10 @@ pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { } let output_num_bits:u32 = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() }; - assert!(imm_num_bits(imm.value) <= (output_num_bits as u8)); + assert!( + mem.num_bits < 64 || imm_num_bits(imm.value) <= (output_num_bits as u8), + "immediate value should be small enough to survive sign extension" + ); cb.write_int(imm.value as u64, output_num_bits); }, // M + UImm @@ -1042,7 +1045,10 @@ pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { } let output_num_bits = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() }; - assert!(imm_num_bits(uimm.value as i64) <= (output_num_bits as u8)); + assert!( + mem.num_bits < 64 || imm_num_bits(uimm.value as i64) <= (output_num_bits as u8), + "immediate value should be small enough to survive sign extension" + ); cb.write_int(uimm.value, output_num_bits); }, // * + Imm/UImm diff --git a/yjit/src/asm/x86_64/tests.rs b/yjit/src/asm/x86_64/tests.rs index 5ae983270f..eefcbfd52e 100644 --- a/yjit/src/asm/x86_64/tests.rs +++ b/yjit/src/asm/x86_64/tests.rs @@ -193,6 +193,7 @@ fn test_mov() { check_bytes("48c7470801000000", |cb| mov(cb, mem_opnd(64, RDI, 8), imm_opnd(1))); //check_bytes("67c7400411000000", |cb| mov(cb, mem_opnd(32, EAX, 4), imm_opnd(0x34))); // We don't distinguish between EAX and RAX here - that's probably fine? check_bytes("c7400411000000", |cb| mov(cb, mem_opnd(32, RAX, 4), imm_opnd(17))); + check_bytes("c7400401000080", |cb| mov(cb, mem_opnd(32, RAX, 4), uimm_opnd(0x80000001))); check_bytes("41895814", |cb| mov(cb, mem_opnd(32, R8, 20), EBX)); check_bytes("4d8913", |cb| mov(cb, mem_opnd(64, R11, 0), R10)); check_bytes("48c742f8f4ffffff", |cb| mov(cb, mem_opnd(64, RDX, -8), imm_opnd(-12))); diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs index c0d42e79e6..ef435bca7e 100644 --- a/yjit/src/backend/x86_64/mod.rs +++ b/yjit/src/backend/x86_64/mod.rs @@ -315,19 +315,24 @@ impl Assembler let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); }, - (Opnd::Mem(_), Opnd::UImm(value)) => { - // 32-bit values will be sign-extended - if imm_num_bits(*value as i64) > 32 { + (Opnd::Mem(Mem { num_bits, .. }), Opnd::UImm(value)) => { + // For 64 bit destinations, 32-bit values will be sign-extended + if *num_bits == 64 && imm_num_bits(*value as i64) > 32 { let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); } else { asm.mov(*dest, *src); } }, - (Opnd::Mem(_), Opnd::Imm(value)) => { - if imm_num_bits(*value) > 32 { + (Opnd::Mem(Mem { num_bits, .. }), Opnd::Imm(value)) => { + // For 64 bit destinations, 32-bit values will be sign-extended + if *num_bits == 64 && imm_num_bits(*value) > 32 { let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); + } else if uimm_num_bits(*value as u64) <= *num_bits { + // If the bit string is short enough for the destination, use the unsigned representation. + // Note that 64-bit and negative values are ruled out. + asm.mov(*dest, Opnd::UImm(*value as u64)); } else { asm.mov(*dest, *src); } @@ -1317,4 +1322,19 @@ mod tests { 0x13: mov qword ptr [rbx], rax "}); } + + #[test] + fn test_mov_m32_imm32() { + let (mut asm, mut cb) = setup_asm(); + + let shape_opnd = Opnd::mem(32, C_RET_OPND, 0); + asm.mov(shape_opnd, Opnd::UImm(0x8000_0001)); + asm.mov(shape_opnd, Opnd::Imm(0x8000_0001)); + + asm.compile_with_num_regs(&mut cb, 0); + assert_disasm!(cb, "c70001000080c70001000080", {" + 0x0: mov dword ptr [rax], 0x80000001 + 0x6: mov dword ptr [rax], 0x80000001 + "}); + } } diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index 5f7d61f8b3..2e2ca51b17 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -3128,8 +3128,6 @@ fn gen_set_ivar( if new_shape_too_complex { Some((next_shape_id, None, 0_usize)) } else { - let current_shape = unsafe { rb_shape_lookup(current_shape_id) }; - let current_capacity = unsafe { rb_yjit_shape_capacity(current_shape_id) }; let next_capacity = unsafe { rb_yjit_shape_capacity(next_shape_id) }; @@ -3138,7 +3136,7 @@ fn gen_set_ivar( let needs_extension = next_capacity != current_capacity; // We can write to the object, but we need to transition the shape - let ivar_index = unsafe { (*current_shape).next_field_index } as usize; + let ivar_index = unsafe { rb_yjit_shape_index(next_shape_id) } as usize; let needs_extension = if needs_extension { Some((current_capacity, next_capacity)) diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs index ecb6475319..725a29fa70 100644 --- a/yjit/src/cruby.rs +++ b/yjit/src/cruby.rs @@ -448,18 +448,6 @@ impl VALUE { unsafe { rb_obj_shape_id(self) } } - pub fn shape_of(self) -> *mut rb_shape { - unsafe { - let shape = rb_shape_lookup(self.shape_id_of()); - - if shape.is_null() { - panic!("Shape should not be null"); - } else { - shape - } - } - } - pub fn embedded_p(self) -> bool { unsafe { FL_TEST_RAW(self, VALUE(ROBJECT_EMBED as usize)) != VALUE(0) diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index 23682ac63c..d42df7b267 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -409,6 +409,7 @@ pub const imemo_parser_strterm: imemo_type = 10; pub const imemo_callinfo: imemo_type = 11; pub const imemo_callcache: imemo_type = 12; pub const imemo_constcache: imemo_type = 13; +pub const imemo_class_fields: imemo_type = 14; pub type imemo_type = u32; #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -687,27 +688,6 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16; pub type vm_frame_env_flags = u32; pub type attr_index_t = u16; pub type shape_id_t = u32; -pub type redblack_id_t = u32; -pub type redblack_node_t = redblack_node; -#[repr(C)] -pub struct rb_shape { - pub edges: VALUE, - pub edge_name: ID, - pub ancestor_index: *mut redblack_node_t, - pub parent_id: shape_id_t, - pub next_field_index: attr_index_t, - pub capacity: attr_index_t, - pub type_: u8, -} -pub type rb_shape_t = rb_shape; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct redblack_node { - pub key: ID, - pub value: *mut rb_shape_t, - pub l: redblack_id_t, - pub r: redblack_id_t, -} #[repr(C)] pub struct rb_cvar_class_tbl_entry { pub index: u32, @@ -1132,7 +1112,6 @@ extern "C" { pub fn rb_obj_info(obj: VALUE) -> *const ::std::os::raw::c_char; pub fn rb_ec_stack_check(ec: *mut rb_execution_context_struct) -> ::std::os::raw::c_int; pub fn rb_shape_id_offset() -> i32; - pub fn rb_shape_lookup(shape_id: shape_id_t) -> *mut rb_shape_t; pub fn rb_obj_shape_id(obj: VALUE) -> shape_id_t; pub fn rb_shape_get_iv_index(shape_id: shape_id_t, id: ID, value: *mut attr_index_t) -> bool; pub fn rb_shape_transition_add_ivar_no_warnings(obj: VALUE, id: ID) -> shape_id_t; @@ -1264,6 +1243,7 @@ extern "C" { pub fn rb_yjit_shape_too_complex_p(shape_id: shape_id_t) -> bool; pub fn rb_yjit_shape_obj_too_complex_p(obj: VALUE) -> bool; pub fn rb_yjit_shape_capacity(shape_id: shape_id_t) -> attr_index_t; + pub fn rb_yjit_shape_index(shape_id: shape_id_t) -> attr_index_t; pub fn rb_yjit_assert_holding_vm_lock(); pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize; pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize; diff --git a/zjit/bindgen/src/main.rs b/zjit/bindgen/src/main.rs index 4aff3193f0..cf328fc68c 100644 --- a/zjit/bindgen/src/main.rs +++ b/zjit/bindgen/src/main.rs @@ -108,7 +108,6 @@ fn main() { // From shape.h .allowlist_function("rb_obj_shape_id") - .allowlist_function("rb_shape_lookup") .allowlist_function("rb_shape_id_offset") .allowlist_function("rb_shape_get_iv_index") .allowlist_function("rb_shape_transition_add_ivar_no_warnings") diff --git a/zjit/src/asm/mod.rs b/zjit/src/asm/mod.rs index a7f2705af1..0b571f9aff 100644 --- a/zjit/src/asm/mod.rs +++ b/zjit/src/asm/mod.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -//use std::fmt; +use std::fmt; use std::rc::Rc; use std::cell::RefCell; use std::mem; @@ -260,6 +260,18 @@ impl CodeBlock { } } +/// Produce hex string output from the bytes in a code block +impl fmt::LowerHex for CodeBlock { + fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { + for pos in 0..self.write_pos { + let mem_block = &*self.mem_block.borrow(); + let byte = unsafe { mem_block.start_ptr().raw_ptr(mem_block).add(pos).read() }; + fmtr.write_fmt(format_args!("{:02x}", byte))?; + } + Ok(()) + } +} + #[cfg(test)] impl CodeBlock { /// Stubbed CodeBlock for testing. Can't execute generated code. diff --git a/zjit/src/asm/x86_64/mod.rs b/zjit/src/asm/x86_64/mod.rs index efc58dfdb8..fea66c8a3b 100644 --- a/zjit/src/asm/x86_64/mod.rs +++ b/zjit/src/asm/x86_64/mod.rs @@ -1024,7 +1024,10 @@ pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { } let output_num_bits:u32 = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() }; - assert!(imm_num_bits(imm.value) <= (output_num_bits as u8)); + assert!( + mem.num_bits < 64 || imm_num_bits(imm.value) <= (output_num_bits as u8), + "immediate value should be small enough to survive sign extension" + ); cb.write_int(imm.value as u64, output_num_bits); }, // M + UImm @@ -1039,7 +1042,10 @@ pub fn mov(cb: &mut CodeBlock, dst: X86Opnd, src: X86Opnd) { } let output_num_bits = if mem.num_bits > 32 { 32 } else { mem.num_bits.into() }; - assert!(imm_num_bits(uimm.value as i64) <= (output_num_bits as u8)); + assert!( + mem.num_bits < 64 || imm_num_bits(uimm.value as i64) <= (output_num_bits as u8), + "immediate value should be small enough to survive sign extension" + ); cb.write_int(uimm.value, output_num_bits); }, // * + Imm/UImm diff --git a/zjit/src/asm/x86_64/tests.rs b/zjit/src/asm/x86_64/tests.rs index f2b949b7f7..ec490fd330 100644 --- a/zjit/src/asm/x86_64/tests.rs +++ b/zjit/src/asm/x86_64/tests.rs @@ -1,11 +1,10 @@ #![cfg(test)] -//use crate::asm::x86_64::*; +use crate::asm::x86_64::*; -/* /// Check that the bytes for an instruction sequence match a hex string fn check_bytes<R>(bytes: &str, run: R) where R: FnOnce(&mut super::CodeBlock) { - let mut cb = super::CodeBlock::new_dummy(4096); + let mut cb = super::CodeBlock::new_dummy(); run(&mut cb); assert_eq!(format!("{:x}", cb), bytes); } @@ -194,6 +193,7 @@ fn test_mov() { check_bytes("48c7470801000000", |cb| mov(cb, mem_opnd(64, RDI, 8), imm_opnd(1))); //check_bytes("67c7400411000000", |cb| mov(cb, mem_opnd(32, EAX, 4), imm_opnd(0x34))); // We don't distinguish between EAX and RAX here - that's probably fine? check_bytes("c7400411000000", |cb| mov(cb, mem_opnd(32, RAX, 4), imm_opnd(17))); + check_bytes("c7400401000080", |cb| mov(cb, mem_opnd(32, RAX, 4), uimm_opnd(0x80000001))); check_bytes("41895814", |cb| mov(cb, mem_opnd(32, R8, 20), EBX)); check_bytes("4d8913", |cb| mov(cb, mem_opnd(64, R11, 0), R10)); check_bytes("48c742f8f4ffffff", |cb| mov(cb, mem_opnd(64, RDX, -8), imm_opnd(-12))); @@ -439,9 +439,10 @@ fn basic_capstone_usage() -> std::result::Result<(), capstone::Error> { } #[test] +#[ignore] #[cfg(feature = "disasm")] fn block_comments() { - let mut cb = super::CodeBlock::new_dummy(4096); + let mut cb = super::CodeBlock::new_dummy(); let first_write_ptr = cb.get_write_ptr().raw_addr(&cb); cb.add_comment("Beginning"); @@ -458,4 +459,3 @@ fn block_comments() { assert_eq!(&vec!( "Two bytes in".to_string(), "Still two bytes in".to_string() ), cb.comments_at(second_write_ptr).unwrap()); assert_eq!(&vec!( "Ten bytes in".to_string() ), cb.comments_at(third_write_ptr).unwrap()); } -*/ diff --git a/zjit/src/assertions.rs b/zjit/src/assertions.rs new file mode 100644 index 0000000000..0dacc938fc --- /dev/null +++ b/zjit/src/assertions.rs @@ -0,0 +1,21 @@ +/// Assert that CodeBlock has the code specified with hex. In addition, if tested with +/// `cargo test --all-features`, it also checks it generates the specified disasm. +#[cfg(test)] +macro_rules! assert_disasm { + ($cb:expr, $hex:expr, $disasm:expr) => { + #[cfg(feature = "disasm")] + { + use $crate::disasm::disasm_addr_range; + use $crate::cruby::unindent; + let disasm = disasm_addr_range( + &$cb, + $cb.get_ptr(0).raw_addr(&$cb), + $cb.get_write_ptr().raw_addr(&$cb), + ); + assert_eq!(unindent(&disasm, false), unindent(&$disasm, true)); + } + assert_eq!(format!("{:x}", $cb), $hex); + }; +} +#[cfg(test)] +pub(crate) use assert_disasm; diff --git a/zjit/src/backend/arm64/mod.rs b/zjit/src/backend/arm64/mod.rs index f7e871523e..dd1eb52d34 100644 --- a/zjit/src/backend/arm64/mod.rs +++ b/zjit/src/backend/arm64/mod.rs @@ -211,11 +211,6 @@ impl Assembler vec![X1_REG, X9_REG, X10_REG, X11_REG, X12_REG, X13_REG, X14_REG, X15_REG] } - /// Get the address that the current frame returns to - pub fn return_addr_opnd() -> Opnd { - Opnd::Reg(X30_REG) - } - /// Split platform-specific instructions /// The transformations done here are meant to make our lives simpler in later /// stages of the compilation pipeline. @@ -1345,14 +1340,30 @@ impl Assembler } } -/* #[cfg(test)] mod tests { use super::*; - use crate::disasm::*; + use crate::assertions::assert_disasm; + + static TEMP_REGS: [Reg; 5] = [X1_REG, X9_REG, X10_REG, X14_REG, X15_REG]; fn setup_asm() -> (Assembler, CodeBlock) { - (Assembler::new(0), CodeBlock::new_dummy(1024)) + (Assembler::new(), CodeBlock::new_dummy()) + } + + #[test] + fn test_mul_with_immediate() { + let (mut asm, mut cb) = setup_asm(); + + let out = asm.mul(Opnd::Reg(TEMP_REGS[1]), 3.into()); + asm.mov(Opnd::Reg(TEMP_REGS[0]), out); + asm.compile_with_num_regs(&mut cb, 2); + + assert_disasm!(cb, "600080d2207d009be10300aa", {" + 0x0: mov x0, #3 + 0x4: mul x0, x9, x0 + 0x8: mov x1, x0 + "}); } #[test] @@ -1361,7 +1372,7 @@ mod tests { let opnd = asm.add(Opnd::Reg(X0_REG), Opnd::Reg(X1_REG)); asm.store(Opnd::mem(64, Opnd::Reg(X2_REG), 0), opnd); - asm.compile_with_regs(&mut cb, None, vec![X3_REG]); + asm.compile_with_regs(&mut cb, vec![X3_REG]); // Assert that only 2 instructions were written. assert_eq!(8, cb.get_write_pos()); @@ -1425,6 +1436,7 @@ mod tests { asm.compile_with_num_regs(&mut cb, 0); } + /* #[test] fn test_emit_lea_label() { let (mut asm, mut cb) = setup_asm(); @@ -1438,6 +1450,7 @@ mod tests { asm.compile_with_num_regs(&mut cb, 1); } + */ #[test] fn test_emit_load_mem_disp_fits_into_load() { @@ -1648,6 +1661,7 @@ mod tests { asm.compile_with_num_regs(&mut cb, 2); } + /* #[test] fn test_bcond_straddling_code_pages() { const LANDING_PAGE: usize = 65; @@ -1784,20 +1798,5 @@ mod tests { 0x8: mov x1, x11 "}); } - - #[test] - fn test_mul_with_immediate() { - let (mut asm, mut cb) = setup_asm(); - - let out = asm.mul(Opnd::Reg(TEMP_REGS[1]), 3.into()); - asm.mov(Opnd::Reg(TEMP_REGS[0]), out); - asm.compile_with_num_regs(&mut cb, 2); - - assert_disasm!(cb, "6b0080d22b7d0b9be1030baa", {" - 0x0: mov x11, #3 - 0x4: mul x11, x9, x11 - 0x8: mov x1, x11 - "}); - } + */ } -*/ diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs index e9ae8730f6..f46b35ded5 100644 --- a/zjit/src/backend/lir.rs +++ b/zjit/src/backend/lir.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use std::fmt; use std::mem::take; -use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32, VM_ENV_DATA_SIZE}; -use crate::state::ZJITState; +use crate::codegen::local_size_and_idx_to_ep_offset; +use crate::cruby::{Qundef, RUBY_OFFSET_CFP_PC, RUBY_OFFSET_CFP_SP, SIZEOF_VALUE_I32}; use crate::{cruby::VALUE}; use crate::backend::current::*; use crate::virtualmem::CodePtr; @@ -1751,6 +1751,15 @@ impl Assembler ret } + /// Compile with a limited number of registers. Used only for unit tests. + #[cfg(test)] + pub fn compile_with_num_regs(self, cb: &mut CodeBlock, num_regs: usize) -> (CodePtr, Vec<u32>) + { + let mut alloc_regs = Self::get_alloc_regs(); + let alloc_regs = alloc_regs.drain(0..num_regs).collect(); + self.compile_with_regs(cb, alloc_regs).unwrap() + } + /// Compile Target::SideExit and convert it into Target::CodePtr for all instructions #[must_use] pub fn compile_side_exits(&mut self) -> Option<()> { @@ -1788,7 +1797,7 @@ impl Assembler asm_comment!(self, "write locals: {locals:?}"); for (idx, &opnd) in locals.iter().enumerate() { let opnd = split_store_source(self, opnd); - self.store(Opnd::mem(64, SP, (-(VM_ENV_DATA_SIZE as i32) - locals.len() as i32 + idx as i32) * SIZEOF_VALUE_I32), opnd); + self.store(Opnd::mem(64, SP, (-local_size_and_idx_to_ep_offset(locals.len(), idx) - 1) * SIZEOF_VALUE_I32), opnd); } asm_comment!(self, "save cfp->pc"); @@ -1800,10 +1809,6 @@ impl Assembler let cfp_sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP); self.store(cfp_sp, Opnd::Reg(Assembler::SCRATCH_REG)); - asm_comment!(self, "rewind caller frames"); - self.mov(C_ARG_OPNDS[0], Assembler::return_addr_opnd()); - self.ccall(Self::rewind_caller_frames as *const u8, vec![]); - asm_comment!(self, "exit to the interpreter"); self.frame_teardown(); self.mov(C_RET_OPND, Opnd::UImm(Qundef.as_u64())); @@ -1814,13 +1819,6 @@ impl Assembler } Some(()) } - - #[unsafe(no_mangle)] - extern "C" fn rewind_caller_frames(addr: *const u8) { - if ZJITState::is_iseq_return_addr(addr) { - unimplemented!("Can't side-exit from JIT-JIT call: rewind_caller_frames is not implemented yet"); - } - } } impl fmt::Debug for Assembler { diff --git a/zjit/src/backend/x86_64/mod.rs b/zjit/src/backend/x86_64/mod.rs index cf62cdd7f5..d83fc184f9 100644 --- a/zjit/src/backend/x86_64/mod.rs +++ b/zjit/src/backend/x86_64/mod.rs @@ -109,11 +109,6 @@ impl Assembler vec![RAX_REG, RCX_REG, RDX_REG, RSI_REG, RDI_REG, R8_REG, R9_REG, R10_REG, R11_REG] } - /// Get the address that the current frame returns to - pub fn return_addr_opnd() -> Opnd { - Opnd::mem(64, Opnd::Reg(RSP_REG), 0) - } - // These are the callee-saved registers in the x86-64 SysV ABI // RBX, RSP, RBP, and R12–R15 @@ -298,19 +293,24 @@ impl Assembler let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); }, - (Opnd::Mem(_), Opnd::UImm(value)) => { - // 32-bit values will be sign-extended - if imm_num_bits(*value as i64) > 32 { + (Opnd::Mem(Mem { num_bits, .. }), Opnd::UImm(value)) => { + // For 64 bit destinations, 32-bit values will be sign-extended + if *num_bits == 64 && imm_num_bits(*value as i64) > 32 { let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); } else { asm.mov(*dest, *src); } }, - (Opnd::Mem(_), Opnd::Imm(value)) => { - if imm_num_bits(*value) > 32 { + (Opnd::Mem(Mem { num_bits, .. }), Opnd::Imm(value)) => { + // For 64 bit destinations, 32-bit values will be sign-extended + if *num_bits == 64 && imm_num_bits(*value) > 32 { let opnd1 = asm.load(*src); asm.mov(*dest, opnd1); + } else if uimm_num_bits(*value as u64) <= *num_bits { + // If the bit string is short enough for the destination, use the unsigned representation. + // Note that 64-bit and negative values are ruled out. + asm.mov(*dest, Opnd::UImm(*value as u64)); } else { asm.mov(*dest, *src); } @@ -859,20 +859,17 @@ impl Assembler } } -/* #[cfg(test)] mod tests { - use crate::disasm::assert_disasm; - #[cfg(feature = "disasm")] - use crate::disasm::{unindent, disasm_addr_range}; - + use crate::assertions::assert_disasm; use super::*; fn setup_asm() -> (Assembler, CodeBlock) { - (Assembler::new(0), CodeBlock::new_dummy(1024)) + (Assembler::new(), CodeBlock::new_dummy()) } #[test] + #[ignore] fn test_emit_add_lt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -883,6 +880,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_add_gt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -893,6 +891,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_and_lt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -903,6 +902,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_and_gt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -957,6 +957,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_or_lt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -967,6 +968,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_or_gt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -977,6 +979,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_sub_lt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -987,6 +990,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_sub_gt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -1017,6 +1021,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_xor_lt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -1027,6 +1032,7 @@ mod tests { } #[test] + #[ignore] fn test_emit_xor_gt_32_bits() { let (mut asm, mut cb) = setup_asm(); @@ -1050,6 +1056,7 @@ mod tests { } #[test] + #[ignore] fn test_merge_lea_mem() { let (mut asm, mut cb) = setup_asm(); @@ -1064,6 +1071,7 @@ mod tests { } #[test] + #[ignore] fn test_replace_cmp_0() { let (mut asm, mut cb) = setup_asm(); @@ -1216,6 +1224,7 @@ mod tests { } #[test] + #[ignore] fn test_reorder_c_args_with_insn_out() { let (mut asm, mut cb) = setup_asm(); @@ -1259,15 +1268,16 @@ mod tests { asm.compile_with_num_regs(&mut cb, 1); - assert_disasm!(cb, "48837b1001b804000000480f4f03488903", {" + assert_disasm!(cb, "48837b1001bf04000000480f4f3b48893b", {" 0x0: cmp qword ptr [rbx + 0x10], 1 - 0x5: mov eax, 4 - 0xa: cmovg rax, qword ptr [rbx] - 0xe: mov qword ptr [rbx], rax + 0x5: mov edi, 4 + 0xa: cmovg rdi, qword ptr [rbx] + 0xe: mov qword ptr [rbx], rdi "}); } #[test] + #[ignore] fn test_csel_split() { let (mut asm, mut cb) = setup_asm(); @@ -1284,6 +1294,19 @@ mod tests { 0x13: mov qword ptr [rbx], rax "}); } -} -*/ + #[test] + fn test_mov_m32_imm32() { + let (mut asm, mut cb) = setup_asm(); + + let shape_opnd = Opnd::mem(32, C_RET_OPND, 0); + asm.mov(shape_opnd, Opnd::UImm(0x8000_0001)); + asm.mov(shape_opnd, Opnd::Imm(0x8000_0001)); + + asm.compile_with_num_regs(&mut cb, 0); + assert_disasm!(cb, "c70001000080c70001000080", {" + 0x0: mov dword ptr [rax], 0x80000001 + 0x6: mov dword ptr [rax], 0x80000001 + "}); + } +} diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs index 0dbe815c71..8ced09d40a 100644 --- a/zjit/src/codegen.rs +++ b/zjit/src/codegen.rs @@ -258,7 +258,7 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio Insn::IfTrue { val, target } => return gen_if_true(jit, asm, opnd!(val), target), Insn::IfFalse { val, target } => return gen_if_false(jit, asm, opnd!(val), target), Insn::SendWithoutBlock { call_info, cd, state, self_val, args, .. } => gen_send_without_block(jit, asm, call_info, *cd, &function.frame_state(*state), self_val, args)?, - Insn::SendWithoutBlockDirect { iseq, self_val, args, .. } => gen_send_without_block_direct(cb, jit, asm, *iseq, opnd!(self_val), args)?, + Insn::SendWithoutBlockDirect { cme, iseq, self_val, args, state, .. } => gen_send_without_block_direct(cb, jit, asm, *cme, *iseq, opnd!(self_val), args, &function.frame_state(*state))?, Insn::Return { val } => return Some(gen_return(asm, opnd!(val))?), Insn::FixnumAdd { left, right, state } => gen_fixnum_add(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?, Insn::FixnumSub { left, right, state } => gen_fixnum_sub(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?, @@ -275,6 +275,8 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio Insn::PatchPoint(_) => return Some(()), // For now, rb_zjit_bop_redefined() panics. TODO: leave a patch point and fix rb_zjit_bop_redefined() Insn::CCall { cfun, args, name: _, return_type: _, elidable: _ } => gen_ccall(jit, asm, *cfun, args)?, Insn::GetIvar { self_val, id, state: _ } => gen_getivar(asm, opnd!(self_val), *id), + Insn::SetGlobal { id, val, state: _ } => gen_setglobal(asm, *id, opnd!(val)), + Insn::GetGlobal { id, state: _ } => gen_getglobal(asm, *id), Insn::SetIvar { self_val, id, val, state: _ } => gen_setivar(asm, opnd!(self_val), *id, opnd!(val)), _ => { debug!("ZJIT: gen_function: unexpected insn {:?}", insn); @@ -317,6 +319,24 @@ fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) -> Opnd { ) } +/// Look up global variables +fn gen_getglobal(asm: &mut Assembler, id: ID) -> Opnd { + asm_comment!(asm, "call rb_gvar_get"); + asm.ccall( + rb_gvar_get as *const u8, + vec![Opnd::UImm(id.0)], + ) +} + +/// Set global variables +fn gen_setglobal(asm: &mut Assembler, id: ID, val: Opnd) -> Opnd { + asm_comment!(asm, "call rb_gvar_set"); + asm.ccall( + rb_gvar_set as *const u8, + vec![Opnd::UImm(id.0), val], + ) +} + /// Compile an interpreter entry block to be inserted into an ISEQ fn gen_entry_prologue(asm: &mut Assembler, iseq: IseqPtr) { asm_comment!(asm, "ZJIT entry point: {}", iseq_get_location(iseq, 0)); @@ -464,8 +484,16 @@ fn gen_send_without_block( self_val: &InsnId, args: &Vec<InsnId>, ) -> Option<lir::Opnd> { - // Spill the receiver and the arguments onto the stack. They need to be marked by GC and may be caller-saved registers. + // Spill locals onto the stack. + // TODO: Don't spill locals eagerly; lazily reify frames + asm_comment!(asm, "spill locals"); + for (idx, &insn_id) in state.locals().enumerate() { + asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?); + } + // Spill the receiver and the arguments onto the stack. + // They need to be on the interpreter stack to let the interpreter access them. // TODO: Avoid spilling operands that have been spilled before. + asm_comment!(asm, "spill receiver and arguments"); for (idx, &insn_id) in [*self_val].iter().chain(args.iter()).enumerate() { // Currently, we don't move the SP register. So it's equal to the base pointer. let stack_opnd = Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32); @@ -495,10 +523,40 @@ fn gen_send_without_block_direct( cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, + cme: *const rb_callable_method_entry_t, iseq: IseqPtr, recv: Opnd, args: &Vec<InsnId>, + state: &FrameState, ) -> Option<lir::Opnd> { + // Save cfp->pc and cfp->sp for the caller frame + gen_save_pc(asm, state); + gen_save_sp(asm, state.stack().len() - args.len() - 1); // -1 for receiver + + // Spill the virtual stack and the locals of the caller onto the stack + // TODO: Lazily materialize caller frames on side exits or when needed + asm_comment!(asm, "spill locals and stack"); + for (idx, &insn_id) in state.locals().enumerate() { + asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?); + } + for (idx, &insn_id) in state.stack().enumerate() { + asm.mov(Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?); + } + + // Set up the new frame + // TODO: Lazily materialize caller frames on side exits or when needed + gen_push_frame(asm, args.len(), state, ControlFrame { + recv, + iseq, + cme, + frame_type: VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, + }); + + asm_comment!(asm, "switch to new SP register"); + let local_size = unsafe { get_iseq_body_local_table_size(iseq) } as usize; + let new_sp = asm.add(SP, ((state.stack().len() + local_size - args.len() + VM_ENV_DATA_SIZE as usize) * SIZEOF_VALUE).into()); + asm.mov(SP, new_sp); + asm_comment!(asm, "switch to new CFP"); let new_cfp = asm.sub(CFP, RUBY_SIZEOF_CONTROL_FRAME.into()); asm.mov(CFP, new_cfp); @@ -517,7 +575,15 @@ fn gen_send_without_block_direct( jit.branch_iseqs.push((branch.clone(), iseq)); // TODO(max): Add a PatchPoint here that can side-exit the function if the callee messed with // the frame's locals - Some(asm.ccall_with_branch(dummy_ptr, c_args, &branch)) + let ret = asm.ccall_with_branch(dummy_ptr, c_args, &branch); + + // If a callee side-exits, i.e. returns Qundef, propagate the return value to the caller. + // The caller will side-exit the callee into the interpreter. + // TODO: Let side exit code pop all JIT frames to optimize away this cmp + je. + asm.cmp(ret, Qundef.into()); + asm.je(ZJITState::get_exit_trampoline().into()); + + Some(ret) } /// Compile an array duplication instruction @@ -729,6 +795,45 @@ fn gen_save_sp(asm: &mut Assembler, stack_size: usize) { asm.mov(cfp_sp, sp_addr); } +/// Frame metadata written by gen_push_frame() +struct ControlFrame { + recv: Opnd, + iseq: IseqPtr, + cme: *const rb_callable_method_entry_t, + frame_type: u32, +} + +/// Compile an interpreter frame +fn gen_push_frame(asm: &mut Assembler, argc: usize, state: &FrameState, frame: ControlFrame) { + // Locals are written by the callee frame on side-exits or non-leaf calls + + // See vm_push_frame() for details + asm_comment!(asm, "push cme, specval, frame type"); + // ep[-2]: cref of cme + let local_size = unsafe { get_iseq_body_local_table_size(frame.iseq) } as i32; + let ep_offset = state.stack().len() as i32 + local_size - argc as i32 + VM_ENV_DATA_SIZE as i32 - 1; + asm.store(Opnd::mem(64, SP, (ep_offset - 2) * SIZEOF_VALUE_I32), VALUE::from(frame.cme).into()); + // ep[-1]: block_handler or prev EP + // block_handler is not supported for now + asm.store(Opnd::mem(64, SP, (ep_offset - 1) * SIZEOF_VALUE_I32), VM_BLOCK_HANDLER_NONE.into()); + // ep[0]: ENV_FLAGS + asm.store(Opnd::mem(64, SP, ep_offset * SIZEOF_VALUE_I32), frame.frame_type.into()); + + // Write to the callee CFP + fn cfp_opnd(offset: i32) -> Opnd { + Opnd::mem(64, CFP, offset - (RUBY_SIZEOF_CONTROL_FRAME as i32)) + } + + asm_comment!(asm, "push callee control frame"); + // cfp_opnd(RUBY_OFFSET_CFP_PC): written by the callee frame on side-exits or non-leaf calls + // cfp_opnd(RUBY_OFFSET_CFP_SP): written by the callee frame on side-exits or non-leaf calls + asm.mov(cfp_opnd(RUBY_OFFSET_CFP_ISEQ), VALUE::from(frame.iseq).into()); + asm.mov(cfp_opnd(RUBY_OFFSET_CFP_SELF), frame.recv); + let ep = asm.lea(Opnd::mem(64, SP, ep_offset * SIZEOF_VALUE_I32)); + asm.mov(cfp_opnd(RUBY_OFFSET_CFP_EP), ep); + asm.mov(cfp_opnd(RUBY_OFFSET_CFP_BLOCK_CODE), 0.into()); +} + /// Return a register we use for the basic block argument at a given index fn param_reg(idx: usize) -> Reg { // To simplify the implementation, allocate a fixed register for each basic block argument for now. @@ -744,10 +849,13 @@ fn param_reg(idx: usize) -> Reg { /// Inverse of ep_offset_to_local_idx(). See ep_offset_to_local_idx() for details. fn local_idx_to_ep_offset(iseq: IseqPtr, local_idx: usize) -> i32 { - let local_table_size: i32 = unsafe { get_iseq_body_local_table_size(iseq) } - .try_into() - .unwrap(); - local_table_size - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32 + let local_size = unsafe { get_iseq_body_local_table_size(iseq) }; + local_size_and_idx_to_ep_offset(local_size as usize, local_idx) +} + +/// Convert the number of locals and a local index to an offset in the EP +pub fn local_size_and_idx_to_ep_offset(local_size: usize, local_idx: usize) -> i32 { + local_size as i32 - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32 } /// Convert ISEQ into High-level IR @@ -796,9 +904,8 @@ impl Assembler { move |code_ptr, _| { start_branch.start_addr.set(Some(code_ptr)); }, - move |code_ptr, cb| { + move |code_ptr, _| { end_branch.end_addr.set(Some(code_ptr)); - ZJITState::add_iseq_return_addr(code_ptr.raw_ptr(cb)); }, ) } diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs index d5be47e026..de1c86e8d6 100644 --- a/zjit/src/cruby.rs +++ b/zjit/src/cruby.rs @@ -485,18 +485,6 @@ impl VALUE { unsafe { rb_obj_shape_id(self) } } - pub fn shape_of(self) -> *mut rb_shape { - unsafe { - let shape = rb_shape_lookup(self.shape_id_of()); - - if shape.is_null() { - panic!("Shape should not be null"); - } else { - shape - } - } - } - pub fn embedded_p(self) -> bool { unsafe { FL_TEST_RAW(self, VALUE(ROBJECT_EMBED as usize)) != VALUE(0) diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs index 0447f46fd0..bcc8f48c37 100644 --- a/zjit/src/cruby_bindings.inc.rs +++ b/zjit/src/cruby_bindings.inc.rs @@ -226,6 +226,7 @@ pub const imemo_parser_strterm: imemo_type = 10; pub const imemo_callinfo: imemo_type = 11; pub const imemo_callcache: imemo_type = 12; pub const imemo_constcache: imemo_type = 13; +pub const imemo_class_fields: imemo_type = 14; pub type imemo_type = u32; pub const METHOD_VISI_UNDEF: rb_method_visibility_t = 0; pub const METHOD_VISI_PUBLIC: rb_method_visibility_t = 1; @@ -395,26 +396,6 @@ pub const VM_ENV_FLAG_ISOLATED: vm_frame_env_flags = 16; pub type vm_frame_env_flags = u32; pub type attr_index_t = u16; pub type shape_id_t = u32; -pub type redblack_id_t = u32; -pub type redblack_node_t = redblack_node; -#[repr(C)] -pub struct rb_shape { - pub edges: VALUE, - pub edge_name: ID, - pub ancestor_index: *mut redblack_node_t, - pub parent_id: shape_id_t, - pub next_field_index: attr_index_t, - pub capacity: attr_index_t, - pub type_: u8, -} -pub type rb_shape_t = rb_shape; -#[repr(C)] -pub struct redblack_node { - pub key: ID, - pub value: *mut rb_shape_t, - pub l: redblack_id_t, - pub r: redblack_id_t, -} #[repr(C)] pub struct rb_cvar_class_tbl_entry { pub index: u32, @@ -865,7 +846,6 @@ unsafe extern "C" { pub fn rb_obj_info(obj: VALUE) -> *const ::std::os::raw::c_char; pub fn rb_ec_stack_check(ec: *mut rb_execution_context_struct) -> ::std::os::raw::c_int; pub fn rb_shape_id_offset() -> i32; - pub fn rb_shape_lookup(shape_id: shape_id_t) -> *mut rb_shape_t; pub fn rb_obj_shape_id(obj: VALUE) -> shape_id_t; pub fn rb_shape_get_iv_index(shape_id: shape_id_t, id: ID, value: *mut attr_index_t) -> bool; pub fn rb_shape_transition_add_ivar_no_warnings(obj: VALUE, id: ID) -> shape_id_t; diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs index 86e87d72ac..45a9024ca9 100644 --- a/zjit/src/hir.rs +++ b/zjit/src/hir.rs @@ -392,6 +392,11 @@ pub enum Insn { Defined { op_type: usize, obj: VALUE, pushval: VALUE, v: InsnId }, GetConstantPath { ic: *const iseq_inline_constant_cache }, + /// Get a global variable named `id` + GetGlobal { id: ID, state: InsnId }, + /// Set a global variable named `id` to `val` + SetGlobal { id: ID, val: InsnId, state: InsnId }, + //NewObject? /// Get an instance variable `id` from `self_val` GetIvar { self_val: InsnId, id: ID, state: InsnId }, @@ -421,7 +426,15 @@ pub enum Insn { /// Ignoring keyword arguments etc for now SendWithoutBlock { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, args: Vec<InsnId>, state: InsnId }, Send { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, blockiseq: IseqPtr, args: Vec<InsnId>, state: InsnId }, - SendWithoutBlockDirect { self_val: InsnId, call_info: CallInfo, cd: *const rb_call_data, iseq: IseqPtr, args: Vec<InsnId>, state: InsnId }, + SendWithoutBlockDirect { + self_val: InsnId, + call_info: CallInfo, + cd: *const rb_call_data, + cme: *const rb_callable_method_entry_t, + iseq: IseqPtr, + args: Vec<InsnId>, + state: InsnId, + }, /// Control flow instructions Return { val: InsnId }, @@ -459,7 +472,7 @@ impl Insn { Insn::ArraySet { .. } | Insn::Snapshot { .. } | Insn::Jump(_) | Insn::IfTrue { .. } | Insn::IfFalse { .. } | Insn::Return { .. } | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::ArrayExtend { .. } - | Insn::ArrayPush { .. } | Insn::SideExit { .. } => false, + | Insn::ArrayPush { .. } | Insn::SideExit { .. } | Insn::SetGlobal { .. } => false, _ => true, } } @@ -625,6 +638,8 @@ impl<'a> std::fmt::Display for InsnPrinter<'a> { Insn::DefinedIvar { self_val, id, .. } => write!(f, "DefinedIvar {self_val}, :{}", id.contents_lossy().into_owned()), Insn::GetIvar { self_val, id, .. } => write!(f, "GetIvar {self_val}, :{}", id.contents_lossy().into_owned()), Insn::SetIvar { self_val, id, val, .. } => write!(f, "SetIvar {self_val}, :{}, {val}", id.contents_lossy().into_owned()), + Insn::GetGlobal { id, .. } => write!(f, "GetGlobal :{}", id.contents_lossy().into_owned()), + Insn::SetGlobal { id, val, .. } => write!(f, "SetGlobal :{}, {val}", id.contents_lossy().into_owned()), Insn::ToArray { val, .. } => write!(f, "ToArray {val}"), Insn::ToNewArray { val, .. } => write!(f, "ToNewArray {val}"), Insn::ArrayExtend { left, right, .. } => write!(f, "ArrayExtend {left}, {right}"), @@ -950,10 +965,11 @@ impl Function { args: args.iter().map(|arg| find!(*arg)).collect(), state: *state, }, - SendWithoutBlockDirect { self_val, call_info, cd, iseq, args, state } => SendWithoutBlockDirect { + SendWithoutBlockDirect { self_val, call_info, cd, cme, iseq, args, state } => SendWithoutBlockDirect { self_val: find!(*self_val), call_info: call_info.clone(), cd: *cd, + cme: *cme, iseq: *iseq, args: args.iter().map(|arg| find!(*arg)).collect(), state: *state, @@ -982,6 +998,8 @@ impl Function { } &NewRange { low, high, flag, state } => NewRange { low: find!(low), high: find!(high), flag, state: find!(state) }, ArrayMax { elements, state } => ArrayMax { elements: find_vec!(*elements), state: find!(*state) }, + &GetGlobal { id, state } => GetGlobal { id, state }, + &SetGlobal { id, val, state } => SetGlobal { id, val: find!(val), state }, &GetIvar { self_val, id, state } => GetIvar { self_val: find!(self_val), id, state }, &SetIvar { self_val, id, val, state } => SetIvar { self_val: find!(self_val), id, val, state }, &ToArray { val, state } => ToArray { val: find!(val), state }, @@ -1012,7 +1030,7 @@ impl Function { assert!(self.insns[insn.0].has_output()); match &self.insns[insn.0] { Insn::Param { .. } => unimplemented!("params should not be present in block.insns"), - Insn::ArraySet { .. } | Insn::Snapshot { .. } | Insn::Jump(_) + Insn::SetGlobal { .. } | Insn::ArraySet { .. } | Insn::Snapshot { .. } | Insn::Jump(_) | Insn::IfTrue { .. } | Insn::IfFalse { .. } | Insn::Return { .. } | Insn::PatchPoint { .. } | Insn::SetIvar { .. } | Insn::ArrayExtend { .. } | Insn::ArrayPush { .. } | Insn::SideExit { .. } => @@ -1063,6 +1081,7 @@ impl Function { Insn::DefinedIvar { .. } => types::BasicObject, Insn::GetConstantPath { .. } => types::BasicObject, Insn::ArrayMax { .. } => types::BasicObject, + Insn::GetGlobal { .. } => types::BasicObject, Insn::GetIvar { .. } => types::BasicObject, Insn::ToNewArray { .. } => types::ArrayExact, Insn::ToArray { .. } => types::ArrayExact, @@ -1251,7 +1270,7 @@ impl Function { if let Some(expected) = guard_equal_to { self_val = self.push_insn(block, Insn::GuardBitEquals { val: self_val, expected, state }); } - let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { self_val, call_info, cd, iseq, args, state }); + let send_direct = self.push_insn(block, Insn::SendWithoutBlockDirect { self_val, call_info, cd, cme, iseq, args, state }); self.make_equal_to(insn_id, send_direct); } Insn::GetConstantPath { ic } => { @@ -1568,7 +1587,8 @@ impl Function { | Insn::Test { val } | Insn::IsNil { val } => worklist.push_back(val), - Insn::GuardType { val, state, .. } + Insn::SetGlobal { val, state, .. } + | Insn::GuardType { val, state, .. } | Insn::GuardBitEquals { val, state, .. } | Insn::ToArray { val, state } | Insn::ToNewArray { val, state } => { @@ -1635,6 +1655,7 @@ impl Function { worklist.push_back(val); worklist.push_back(state); } + Insn::GetGlobal { state, .. } | Insn::SideExit { state } => worklist.push_back(state), } } @@ -2355,6 +2376,10 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> { break; // Don't enqueue the next block as a successor } + // These are opt_send_without_block and all the opt_* instructions + // specialized to a certain method that could also be serviced + // using the general send implementation. The optimizer start from + // a general send for all of these later in the pipeline. YARVINSN_opt_nil_p | YARVINSN_opt_plus | YARVINSN_opt_minus | @@ -2371,6 +2396,12 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> { YARVINSN_opt_length | YARVINSN_opt_size | YARVINSN_opt_aref | + YARVINSN_opt_empty_p | + YARVINSN_opt_succ | + YARVINSN_opt_and | + YARVINSN_opt_or | + YARVINSN_opt_not | + YARVINSN_opt_regexpmatch2 | YARVINSN_opt_send_without_block => { let cd: *const rb_call_data = get_arg(pc, 0).as_ptr(); let call_info = unsafe { rb_get_call_data_ci(cd) }; @@ -2425,6 +2456,18 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> { let send = fun.push_insn(block, Insn::Send { self_val: recv, call_info: CallInfo { method_name }, cd, blockiseq, args, state: exit_id }); state.stack_push(send); } + YARVINSN_getglobal => { + let id = ID(get_arg(pc, 0).as_u64()); + let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state }); + let result = fun.push_insn(block, Insn::GetGlobal { id, state: exit_id }); + state.stack_push(result); + } + YARVINSN_setglobal => { + let id = ID(get_arg(pc, 0).as_u64()); + let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state }); + let val = state.stack_pop()?; + fun.push_insn(block, Insn::SetGlobal { id, val, state: exit_id }); + } YARVINSN_getinstancevariable => { let id = ID(get_arg(pc, 0).as_u64()); // ic is in arg 1 @@ -3702,6 +3745,35 @@ mod tests { } #[test] + fn test_setglobal() { + eval(" + def test = $foo = 1 + test + "); + assert_method_hir_with_opcode("test", YARVINSN_setglobal, expect![[r#" + fn test: + bb0(v0:BasicObject): + v2:Fixnum[1] = Const Value(1) + SetGlobal :$foo, v2 + Return v2 + "#]]); + } + + #[test] + fn test_getglobal() { + eval(" + def test = $foo + test + "); + assert_method_hir_with_opcode("test", YARVINSN_getglobal, expect![[r#" + fn test: + bb0(v0:BasicObject): + v3:BasicObject = GetGlobal :$foo + Return v3 + "#]]); + } + + #[test] fn test_splatarray_mut() { eval(" def test(a) = [*a] @@ -3807,6 +3879,84 @@ mod tests { } #[test] + fn opt_empty_p() { + eval(" + def test(x) = x.empty? + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_empty_p, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject): + v4:BasicObject = SendWithoutBlock v1, :empty? + Return v4 + "#]]); + } + + #[test] + fn opt_succ() { + eval(" + def test(x) = x.succ + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_succ, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject): + v4:BasicObject = SendWithoutBlock v1, :succ + Return v4 + "#]]); + } + + #[test] + fn opt_and() { + eval(" + def test(x, y) = x & y + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_and, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject, v2:BasicObject): + v5:BasicObject = SendWithoutBlock v1, :&, v2 + Return v5 + "#]]); + } + + #[test] + fn opt_or() { + eval(" + def test(x, y) = x | y + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_or, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject, v2:BasicObject): + v5:BasicObject = SendWithoutBlock v1, :|, v2 + Return v5 + "#]]); + } + + #[test] + fn opt_not() { + eval(" + def test(x) = !x + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_not, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject): + v4:BasicObject = SendWithoutBlock v1, :! + Return v4 + "#]]); + } + + #[test] + fn opt_regexpmatch2() { + eval(" + def test(regexp, matchee) = regexp =~ matchee + "); + assert_method_hir_with_opcode("test", YARVINSN_opt_regexpmatch2, expect![[r#" + fn test: + bb0(v0:BasicObject, v1:BasicObject, v2:BasicObject): + v5:BasicObject = SendWithoutBlock v1, :=~, v2 + Return v5 + "#]]); + } + + #[test] fn test_branchnil() { eval(" def test(x) = x&.itself diff --git a/zjit/src/lib.rs b/zjit/src/lib.rs index 8ccb6ae4c1..9d139b9801 100644 --- a/zjit/src/lib.rs +++ b/zjit/src/lib.rs @@ -21,3 +21,5 @@ mod disasm; mod options; mod profile; mod invariants; +#[cfg(test)] +mod assertions; diff --git a/zjit/src/state.rs b/zjit/src/state.rs index e8c389a5f8..acaac850c3 100644 --- a/zjit/src/state.rs +++ b/zjit/src/state.rs @@ -1,10 +1,10 @@ -use std::collections::HashSet; - use crate::cruby::{self, rb_bug_panic_hook, EcPtr, Qnil, VALUE}; use crate::cruby_methods; use crate::invariants::Invariants; use crate::options::Options; use crate::asm::CodeBlock; +use crate::backend::lir::{Assembler, C_RET_OPND}; +use crate::virtualmem::CodePtr; #[allow(non_upper_case_globals)] #[unsafe(no_mangle)] @@ -32,8 +32,8 @@ pub struct ZJITState { /// Properties of core library methods method_annotations: cruby_methods::Annotations, - /// The address of the instruction that JIT-to-JIT calls return to - iseq_return_addrs: HashSet<*const u8>, + /// Trampoline to propagate a callee's side exit to the caller + exit_trampoline: Option<CodePtr>, } /// Private singleton instance of the codegen globals @@ -88,9 +88,14 @@ impl ZJITState { invariants: Invariants::default(), assert_compiles: false, method_annotations: cruby_methods::init(), - iseq_return_addrs: HashSet::new(), + exit_trampoline: None, }; unsafe { ZJIT_STATE = Some(zjit_state); } + + // Generate trampolines after initializing ZJITState, which Assembler will use + let cb = ZJITState::get_code_block(); + let exit_trampoline = Self::gen_exit_trampoline(cb).unwrap(); + ZJITState::get_instance().exit_trampoline = Some(exit_trampoline); } /// Return true if zjit_state has been initialized @@ -133,14 +138,17 @@ impl ZJITState { instance.assert_compiles = true; } - /// Record an address that a JIT-to-JIT call returns to - pub fn add_iseq_return_addr(addr: *const u8) { - ZJITState::get_instance().iseq_return_addrs.insert(addr); + /// Generate a trampoline to propagate a callee's side exit to the caller + fn gen_exit_trampoline(cb: &mut CodeBlock) -> Option<CodePtr> { + let mut asm = Assembler::new(); + asm.frame_teardown(); + asm.cret(C_RET_OPND); + asm.compile(cb).map(|(start_ptr, _)| start_ptr) } - /// Returns true if a JIT-to-JIT call returns to a given address - pub fn is_iseq_return_addr(addr: *const u8) -> bool { - ZJITState::get_instance().iseq_return_addrs.contains(&addr) + /// Get the trampoline to propagate a callee's side exit to the caller + pub fn get_exit_trampoline() -> CodePtr { + ZJITState::get_instance().exit_trampoline.unwrap() } } |