Benchmarking
using Distributed
@everywhere using MonotoneDecomposition
Candidate functions
fs = ["x^2" "x^3" "exp(x)" "sigmoid" "SE_1" "SE_0.1" "Mat12_1" "Mat12_0.1" "Mat32_1" "Mat32_0.1" "RQ_0.1_0.5" "Periodic_0.1_4"]
nrep = 1
nλ = 2
nfold = 2
idxf = 1:2 # run locally
competitor = "ss_single_lambda"
nλ = ifelse(occursin("single_lambda", competitor), 1, nλ)
one_se_rule = false
resfolder0 = "/tmp"
timestamp = replace(strip(read(`date -Iseconds`, String)), ":" => "_")
if length(ARGS) > 0
@info "Use args passed from CLI"
competitor = ARGS[1]
resfolder0 = ARGS[2]
if !isdir(resfolder0)
mkdir(resfolder0)
end
nλ = parse(Int, ARGS[3])
nrep = parse(Int, ARGS[4])
nfold = parse(Int, ARGS[5])
one_se_rule = parse(Bool, ARGS[6])
idxf = 1:length(fs)
if length(ARGS) > 6
timestamp = AGRS[end] # passed from scripts
end
end
resfolder = joinpath(resfolder0, "nrep$nrep-nfold$nfold-nlam$nλ-1se$(one_se_rule)-$competitor-$timestamp")
if !isdir(resfolder)
mkdir(resfolder)
end
@info "Results are saved into $resfolder"
pmap(
f->benchmarking(
f;
σs = [0.1, 0.2, 0.4, 0.5, 1.0, 1.5, 2.0], # noise level to be surveyed
jplot = false, # μerr vs σs
nrep = nrep, # NB: for fast auto-generation procedure, only use nrep = 1; in the paper, use nrep = 100
competitor = competitor,
nfold = nfold, # number of folds
one_se_rule = one_se_rule,
nλ = nλ, # the number of λ to be searched
rλ = 0.5, # the search region of λ, (1-rλ, 1+rλ)*λ
resfolder = resfolder,
verbose = false,
show_progress = f == "x^3" # keep one progressbar
),
fs[idxf]
);
[ Info: Results are saved into /tmp/nrep1-nfold2-nlam1-1sefalse-ss_single_lambda-2025-09-07T10_04_59+00_00
[ Info: Benchmarking x^2 with 1 repetitions
[ Info: σ = 0.1, resulting SNR = 8.939393574819517
[ Info: σ = 0.2, resulting SNR = 2.2157542525563083
[ Info: σ = 0.4, resulting SNR = 0.5328047788433465
[ Info: σ = 0.5, resulting SNR = 0.35906518901115336
[ Info: σ = 1.0, resulting SNR = 0.08386341184936616
[ Info: σ = 1.5, resulting SNR = 0.037987290955304426
[ Info: σ = 2.0, resulting SNR = 0.0222852463726476
[ Info: Benchmarking x^3 with 1 repetitions
[ Info: σ = 0.1, resulting SNR = 15.584465603665109
[ Info: σ = 0.2, resulting SNR = 3.356604302747731
x^3 (nrep = 1), iter = 1: 29%|███████▏ | ETA: 0:00:11[ Info: σ = 0.4, resulting SNR = 0.8976222436053701
x^3 (nrep = 1), iter = 1: 43%|██████████▊ | ETA: 0:00:08[ Info: σ = 0.5, resulting SNR = 0.5015464555440978
x^3 (nrep = 1), iter = 1: 57%|██████████████▎ | ETA: 0:00:06[ Info: σ = 1.0, resulting SNR = 0.1405200712320286
x^3 (nrep = 1), iter = 1: 71%|█████████████████▉ | ETA: 0:00:04[ Info: σ = 1.5, resulting SNR = 0.06058340417580875
┌ Warning: the optimal is on the right boundary of λs
└ @ MonotoneDecomposition ~/work/MonotoneDecomposition.jl/MonotoneDecomposition.jl/src/mono_decomp.jl:206
x^3 (nrep = 1), iter = 1: 86%|█████████████████████▍ | ETA: 0:00:02[ Info: σ = 2.0, resulting SNR = 0.03512230944224016
┌ Warning: the optimal is on the right boundary of λs
└ @ MonotoneDecomposition ~/work/MonotoneDecomposition.jl/MonotoneDecomposition.jl/src/mono_decomp.jl:206
x^3 (nrep = 1), iter = 1: 100%|█████████████████████████| Time: 0:00:14
julia examples/benchmark.jl ss_single_lambda /tmp 2 1 2 false
You can also enable the debug mode to print more internal steps as follows
JULIA_DEBUG=MonotoneDecomposition julia examples/benchmark.jl ss_single_lambda /tmp 2 1 2 false
summary the results
# MonotoneDecomposition.summary(resfolder = resfolder, format = "tex")