Benchmarking

using Distributed
@everywhere using MonotoneDecomposition

Candidate functions

fs = ["x^2" "x^3" "exp(x)" "sigmoid" "SE_1" "SE_0.1" "Mat12_1" "Mat12_0.1" "Mat32_1" "Mat32_0.1" "RQ_0.1_0.5" "Periodic_0.1_4"]

nrep = 1
nλ = 2
nfold = 2
idxf = 1:2 # run locally
competitor = "ss_single_lambda"
nλ = ifelse(occursin("single_lambda", competitor), 1, nλ)
one_se_rule = false
resfolder0 = "/tmp"
timestamp = replace(strip(read(`date -Iseconds`, String)), ":" => "_")
if length(ARGS) > 0
    @info "Use args passed from CLI"
    competitor = ARGS[1]
    resfolder0 = ARGS[2]
    if !isdir(resfolder0)
        mkdir(resfolder0)
    end
    nλ = parse(Int, ARGS[3])
    nrep = parse(Int, ARGS[4])
    nfold = parse(Int, ARGS[5])
    one_se_rule = parse(Bool, ARGS[6])
    idxf = 1:length(fs)
    if length(ARGS) > 6
        timestamp = AGRS[end] # passed from scripts
    end
end
resfolder = joinpath(resfolder0, "nrep$nrep-nfold$nfold-nlam$nλ-1se$(one_se_rule)-$competitor-$timestamp")
if !isdir(resfolder)
    mkdir(resfolder)
end
@info "Results are saved into $resfolder"

pmap(
    f->benchmarking(
        f;
        σs = [0.1, 0.2, 0.4, 0.5, 1.0, 1.5, 2.0], # noise level to be surveyed
        jplot = false, # μerr vs σs
        nrep = nrep, # NB: for fast auto-generation procedure, only use nrep = 1; in the paper, use nrep = 100
        competitor = competitor,
        nfold = nfold, # number of folds
        one_se_rule = one_se_rule,
        nλ = nλ, # the number of λ to be searched
        rλ = 0.5, # the search region of λ, (1-rλ, 1+rλ)*λ
        resfolder = resfolder,
        verbose = false,
        show_progress = f == "x^3" # keep one progressbar
    ),
    fs[idxf]
);
[ Info: Results are saved into /tmp/nrep1-nfold2-nlam1-1sefalse-ss_single_lambda-2025-05-03T06_50_21+00_00
[ Info: Benchmarking x^2 with 1 repetitions
[ Info: σ = 0.1, resulting SNR = 9.124734452410856
[ Info: σ = 0.2, resulting SNR = 2.368051262365274
[ Info: σ = 0.4, resulting SNR = 0.5767455137937239
[ Info: σ = 0.5, resulting SNR = 0.3511119998564668
[ Info: σ = 1.0, resulting SNR = 0.0911734637991093
[ Info: σ = 1.5, resulting SNR = 0.03971233282509224
┌ Warning: the optimal is on the right boundary of λs
└ @ MonotoneDecomposition ~/work/MonotoneDecomposition.jl/MonotoneDecomposition.jl/src/mono_decomp.jl:200
[ Info: σ = 2.0, resulting SNR = 0.02264847156348041
[ Info: Benchmarking x^3 with 1 repetitions
[ Info: σ = 0.1, resulting SNR = 14.080654958346955
[ Info: σ = 0.2, resulting SNR = 3.043060298416267

x^3 (nrep = 1), iter = 1:  29%|███████▏                 |  ETA: 0:00:10[ Info: σ = 0.4, resulting SNR = 0.9236522090290988

x^3 (nrep = 1), iter = 1:  43%|██████████▊              |  ETA: 0:00:08[ Info: σ = 0.5, resulting SNR = 0.5406874093311401

x^3 (nrep = 1), iter = 1:  57%|██████████████▎          |  ETA: 0:00:06[ Info: σ = 1.0, resulting SNR = 0.15560632761374799

x^3 (nrep = 1), iter = 1:  71%|█████████████████▉       |  ETA: 0:00:04[ Info: σ = 1.5, resulting SNR = 0.06202823063330562

x^3 (nrep = 1), iter = 1:  86%|█████████████████████▍   |  ETA: 0:00:02[ Info: σ = 2.0, resulting SNR = 0.034644599132219574
┌ Warning: the optimal is on the right boundary of λs
└ @ MonotoneDecomposition ~/work/MonotoneDecomposition.jl/MonotoneDecomposition.jl/src/mono_decomp.jl:200

x^3 (nrep = 1), iter = 1: 100%|█████████████████████████| Time: 0:00:13
run from command line
julia examples/benchmark.jl ss_single_lambda /tmp 2 1 2 false

You can also enable the debug mode to print more internal steps as follows

JULIA_DEBUG=MonotoneDecomposition julia examples/benchmark.jl ss_single_lambda /tmp 2 1 2 false

summary the results

# MonotoneDecomposition.summary(resfolder = resfolder, format = "tex")