Skip to content

Commit

Permalink
Sink CodeInfo transformation into transform_result_for_cache
Browse files Browse the repository at this point in the history
Several downstream consumers want to cache IRCode rather than CodeInfo.
Right now they need to override both `finish!` and `transform_result_for_cache`.
With this change, only overriding the latter should be sufficient.
As a nice bonus, we can avoid doing the work of converting to CodeInfo
for interpreters that don't cache at all such as the REPL interpreter.
  • Loading branch information
Keno committed Dec 23, 2024
1 parent dde5028 commit c02a83b
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 20 deletions.
9 changes: 6 additions & 3 deletions Compiler/src/optimize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -226,10 +226,13 @@ include("ssair/passes.jl")
include("ssair/irinterp.jl")

function ir_to_codeinf!(opt::OptimizationState)
(; linfo, src) = opt
src = ir_to_codeinf!(src, opt.ir::IRCode)
src.edges = Core.svec(opt.inlining.edges...)
(; linfo, src, ir) = opt
if ir === nothing
return src
end
src = ir_to_codeinf!(src, ir::IRCode)
opt.ir = nothing
opt.src = src
maybe_validate_code(linfo, src, "optimized")
return src
end
Expand Down
31 changes: 21 additions & 10 deletions Compiler/src/typeinfer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -92,20 +92,22 @@ If set to `true`, record per-method-instance timings within type inference in th
__set_measure_typeinf(onoff::Bool) = __measure_typeinf__[] = onoff
const __measure_typeinf__ = RefValue{Bool}(false)

function finish!(interp::AbstractInterpreter, caller::InferenceState;
can_discard_trees::Bool=may_discard_trees(interp))
function result_edges(interp::AbstractInterpreter, caller::InferenceState)
result = caller.result
opt = result.src
if opt isa OptimizationState
src = ir_to_codeinf!(opt)
edges = src.edges::SimpleVector
caller.src = result.src = src
if isa(opt, OptimizationState)
return Core.svec(opt.inlining.edges...)
else
edges = Core.svec(caller.edges...)
caller.src.edges = edges
return Core.svec(caller.edges...)
end
end

function finish!(interp::AbstractInterpreter, caller::InferenceState;
can_discard_trees::Bool=may_discard_trees(interp))
result = caller.result
#@assert last(result.valid_worlds) <= get_world_counter() || isempty(caller.edges)
if isdefined(result, :ci)
edges = result_edges(interp, caller)
ci = result.ci
# if we aren't cached, we don't need this edge
# but our caller might, so let's just make it anyways
Expand All @@ -123,7 +125,7 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState;
relocatability = 0x1
const_flag = is_result_constabi_eligible(result)
if !can_discard_trees || (is_cached(caller) && !const_flag)
inferred_result = transform_result_for_cache(interp, result)
inferred_result = transform_result_for_cache(interp, result, edges)
# TODO: do we want to augment edges here with any :invoke targets that we got from inlining (such that we didn't have a direct edge to it already)?
relocatability = 0x0
if inferred_result isa CodeInfo
Expand Down Expand Up @@ -221,7 +223,16 @@ function is_result_constabi_eligible(result::InferenceResult)
return isa(result_type, Const) && is_foldable_nothrow(result.ipo_effects) && is_inlineable_constant(result_type.val)
end

transform_result_for_cache(::AbstractInterpreter, result::InferenceResult) = result.src
function transform_result_for_cache(::AbstractInterpreter, result::InferenceResult, edges::SimpleVector)
src = result.src
if isa(src, OptimizationState)
src = ir_to_codeinf!(src)
end
if isa(src, CodeInfo)
src.edges = edges
end
return src
end

function maybe_compress_codeinfo(interp::AbstractInterpreter, mi::MethodInstance, ci::CodeInfo,
can_discard_trees::Bool=may_discard_trees(interp))
Expand Down
6 changes: 3 additions & 3 deletions Compiler/test/AbstractInterpreter.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Compiler.may_optimize(::AbsIntOnlyInterp1) = false
# it should work even if the interpreter discards inferred source entirely
@newinterp AbsIntOnlyInterp2
Compiler.may_optimize(::AbsIntOnlyInterp2) = false
Compiler.transform_result_for_cache(::AbsIntOnlyInterp2, ::Compiler.InferenceResult) = nothing
Compiler.transform_result_for_cache(::AbsIntOnlyInterp2, ::Compiler.InferenceResult, edges::Core.SimpleVector) = nothing
@test Base.infer_return_type(Base.init_stdio, (Ptr{Cvoid},); interp=AbsIntOnlyInterp2()) >: IO

# OverlayMethodTable
Expand Down Expand Up @@ -493,9 +493,9 @@ struct CustomData
inferred
CustomData(@nospecialize inferred) = new(inferred)
end
function Compiler.transform_result_for_cache(interp::CustomDataInterp, result::Compiler.InferenceResult)
function Compiler.transform_result_for_cache(interp::CustomDataInterp, result::Compiler.InferenceResult, edges::Core.SimpleVector)
inferred_result = @invoke Compiler.transform_result_for_cache(
interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult)
interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult, edges::Core.SimpleVector)
return CustomData(inferred_result)
end
function Compiler.src_inlining_policy(interp::CustomDataInterp, @nospecialize(src),
Expand Down
2 changes: 1 addition & 1 deletion src/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10063,7 +10063,7 @@ jl_llvm_functions_t jl_emit_codeinst(
JL_GC_POP();
return jl_emit_oc_wrapper(m, params, mi, codeinst->rettype);
}
if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def))
if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def) && jl_is_string(src))
src = jl_uncompress_ir(def, codeinst, (jl_value_t*)src);
if (!src || !jl_is_code_info(src)) {
JL_GC_POP();
Expand Down
2 changes: 1 addition & 1 deletion stdlib/REPL/src/REPLCompletions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ CC.cache_owner(::REPLInterpreter) = REPLCacheToken()
CC.may_optimize(::REPLInterpreter) = false

# REPLInterpreter doesn't need any sources to be cached, so discard them aggressively
CC.transform_result_for_cache(::REPLInterpreter, ::CC.InferenceResult) = nothing
CC.transform_result_for_cache(::REPLInterpreter, ::CC.InferenceResult, edges::Core.SimpleVector) = nothing

# REPLInterpreter analyzes a top-level frame, so better to not bail out from it
CC.bail_out_toplevel_call(::REPLInterpreter, ::CC.InferenceLoopState, ::CC.InferenceState) = false
Expand Down
4 changes: 2 additions & 2 deletions test/precompile_absint2.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ precompile_test_harness() do load_path
inferred
CustomData(@nospecialize inferred) = new(inferred)
end
function Compiler.transform_result_for_cache(interp::PrecompileInterpreter, result::Compiler.InferenceResult)
function Compiler.transform_result_for_cache(interp::PrecompileInterpreter, result::Compiler.InferenceResult, edges::Core.SimpleVector)
inferred_result = @invoke Compiler.transform_result_for_cache(
interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult)
interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult, edges::Core.SimpleVector)
return CustomData(inferred_result)
end
function Compiler.src_inlining_policy(interp::PrecompileInterpreter, @nospecialize(src),
Expand Down

0 comments on commit c02a83b

Please sign in to comment.