localbinary | /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/run/oneview_runs/defaults/orig/oneview_results_1760608126/binaries/exec |
keep_executable_location | false |
_scalability_bins | { }, |
multiruns_params | { }, |
basebinary | exec |
is_all_external_libraries_in_cc | false |
delay | 0 |
ranges_count | 20 |
#__scalability_reference | true |
decan_threshold | 500 |
repetitions | 31 |
excluded_areas | { }, |
decan_multi_variant | true |
dataset_handler | link |
cqa_params | { }, |
object_coverage_threshold | 0.1 |
profile_stop | { unit = none ; }, |
lprof_params | btm=fp |
run_command | <executable> -m meta-llama-3.1-8b-instruct-Q8_0.gguf -t 192 -n 0 -p 512 -r 3 |
vprof_params | |
maximal_path_number | 4 |
base_run_index | 0 |
qplot_path | nil |
environment_variables | { }, |
__profile_start | true |
_is_loaded | /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/run/oneview_runs/defaults/orig/oneview_results_1760608126/shared/run_0/config.json |
scalability_reference | main |
outliers_count | 0 |
thread_filter_threshold | 1% |
number_processes_per_node | 1 |
lprof_post_process_params | { }, |
job_submission_threshold | 0s |
throughput_max_core | 0 |
throughput_core | -1 |
__maximal_path_number | true |
optimizer_loop_count | 10 |
is_sudo_available | false |
number_nodes | 1 |
bucket_threshold | 1 |
number_processes | 1 |
base_run_name | orig_0 |
filter_decan | { type = all ; }, |
profile_start | { unit = none ; }, |
__profile_stop | true |
dataset | |
source_code_location | |
comments | |
experiment_name | |
external_libraries | { 1 = /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/build/llama.cpp/../build/bin/libggml-base.so ; 2 = /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/build/llama.cpp/../build/bin/libggml-blas.so ; 3 = /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/build/llama.cpp/../build/bin/libggml-cpu.so ; 4 = /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/build/llama.cpp/../build/bin/libggml.so ; 5 = /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/build/llama.cpp/../build/bin/libllama.so ; }, |
mpi_command | mpirun -n <number_processes> |
run_directory | /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/run/oneview_runs/defaults/orig/oneview_run_1760608126 |
included_areas | { }, |
binary | /beegfs/hackathon/users/eoseret/qaas_runs_test/176-060-7658/intel/llama.cpp/run/oneview_runs/defaults/orig/exec |
additional_hwc | { }, |
pinning_command | |
filter | { type = number ; value = 1 ; }, |
__filter | true |
decan_all_variants | true |
decan_params | |
scripts | { files = { }, variables = { }, }, |
_is_custom_categories | true |
custom_categories | { { type = library ; value = libggml-base.so ; }, { type = library ; value = libggml-blas.so ; }, { type = library ; value = libggml-cpu.so ; }, { type = library ; value = libggml.so ; }, { type = library ; value = libllama.so ; }, }, |
frequencies | { 1 = 0 ; }, |