|
|
#!/bin/bash |
|
|
set -e |
|
|
|
|
|
echo "🚀 ADUC-SDR Entrypoint: Configurando o ambiente de execução..." |
|
|
|
|
|
|
|
|
NUM_VCPUS=$(nproc) |
|
|
NUM_GPUS=$(nvidia-smi --query-gpu=count --format=csv,noheader | head -n 1 || echo 0) |
|
|
echo " > Hardware: ${NUM_VCPUS} vCPUs, ${NUM_GPUS} GPUs" |
|
|
if [[ ${NUM_GPUS} -gt 0 ]]; then |
|
|
VCPUS_PER_GPU=$((NUM_VCPUS / NUM_GPUS)) |
|
|
THREADS_PER_PROCESS=$((VCPUS_PER_GPU / 2)) |
|
|
else |
|
|
THREADS_PER_PROCESS=$((NUM_VCPUS / 2)) |
|
|
fi |
|
|
MIN_THREADS=4; MAX_THREADS=16 |
|
|
if [[ ${THREADS_PER_PROCESS} -lt ${MIN_THREADS} ]]; then THREADS_PER_PROCESS=${MIN_THREADS}; fi |
|
|
if [[ ${THREADS_PER_PROCESS} -gt ${MAX_THREADS} ]]; then THREADS_PER_PROCESS=${MAX_THREADS}; fi |
|
|
export OMP_NUM_THREADS=${OMP_NUM_THREADS:-${THREADS_PER_PROCESS}} |
|
|
export MKL_NUM_THREADS=${MKL_NUM_THREADS:-${THREADS_PER_PROCESS}} |
|
|
export MAX_JOBS=${MAX_JOBS:-${NUM_VCPUS}} |
|
|
export PYTORCH_CUDA_ALLOC_CONF=${PYTORCH_CUDA_ALLOC_CONF:-"max_split_size_mb:512"} |
|
|
export NVIDIA_TF32_OVERRIDE=${NVIDIA_TF32_OVERRIDE:-1} |
|
|
|
|
|
|
|
|
export ADUC_LOG_LEVEL=${ADUC_LOG_LEVEL:-"INFO"} |
|
|
export CUDA_LAUNCH_BLOCKING=${CUDA_LAUNCH_BLOCKING:-0} |
|
|
export PYTHONFAULTHANDLER=1 |
|
|
export GRADIO_DEBUG=${GRADIO_DEBUG:-"False"} |
|
|
|
|
|
echo " > Performance: OMP_NUM_THREADS=${OMP_NUM_THREADS}, MKL_NUM_THREADS=${MKL_NUM_THREADS}" |
|
|
echo " > Depuração: ADUC_LOG_LEVEL=${ADUC_LOG_LEVEL}, CUDA_LAUNCH_BLOCKING=${CUDA_LAUNCH_BLOCKING}" |
|
|
echo "" |
|
|
echo "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
echo " > Verificando dependências com setup.py..." |
|
|
python3 /app/setup.py |
|
|
|
|
|
echo "---------------------------------------------------------" |
|
|
echo "🔥 Ambiente configurado. Iniciando o comando principal: $@" |
|
|
exec "$@" |