-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_eval.sh
14 lines (6 loc) · 4.64 KB
/
run_eval.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Audio Only
CUDA_VISIBLE_DEVICES=0 python eval.py --output_dir=./save_ao/0/eval --model_name_or_path=save_ao/0/checkpoint-2925 --train_manifest_path=dataset/mm_train_metadata.csv --valid_manifest_path=dataset/mm_valid_metadata.csv --test_manifest_path=dataset/mm_test_metadata.csv --num_workers=8 --preprocessing_num_workers=8 --audio_column_name=audio_path --text_column_name=text_path --video_column_name=lip_image_path --per_device_train_batch_size=16 --per_device_eval_batch_size=16 --dataloader_num_workers=32 --dataloader_pin_memory --seed=0 --num_train_epochs=20 --learning_rate=5e-5 --fp16 --fp16_backend=amp --logging_strategy=steps --logging_steps=10 --report_to=tensorboard --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=100 --save_steps=1 --save_strategy=epoch --save_total_limit=1 --metric_for_best_model=mer --greater_is_better=False --load_best_model_at_end=True
CUDA_VISIBLE_DEVICES=0 python eval.py --output_dir=./save_ao/1/eval --model_name_or_path=save_ao/1/checkpoint-2475 --train_manifest_path=dataset/mm_train_metadata.csv --valid_manifest_path=dataset/mm_valid_metadata.csv --test_manifest_path=dataset/mm_test_metadata.csv --num_workers=8 --preprocessing_num_workers=8 --audio_column_name=audio_path --text_column_name=text_path --video_column_name=lip_image_path --per_device_train_batch_size=16 --per_device_eval_batch_size=16 --dataloader_num_workers=32 --dataloader_pin_memory --seed=0 --num_train_epochs=20 --learning_rate=5e-5 --fp16 --fp16_backend=amp --logging_strategy=steps --logging_steps=10 --report_to=tensorboard --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=100 --save_steps=1 --save_strategy=epoch --save_total_limit=1 --metric_for_best_model=mer --greater_is_better=False --load_best_model_at_end=True
CUDA_VISIBLE_DEVICES=0 python eval.py --output_dir=./save_ao/2/eval --model_name_or_path=save_ao/2/checkpoint-2250 --train_manifest_path=dataset/mm_train_metadata.csv --valid_manifest_path=dataset/mm_valid_metadata.csv --test_manifest_path=dataset/mm_test_metadata.csv --num_workers=8 --preprocessing_num_workers=8 --audio_column_name=audio_path --text_column_name=text_path --video_column_name=lip_image_path --per_device_train_batch_size=16 --per_device_eval_batch_size=16 --dataloader_num_workers=32 --dataloader_pin_memory --seed=0 --num_train_epochs=20 --learning_rate=5e-5 --fp16 --fp16_backend=amp --logging_strategy=steps --logging_steps=10 --report_to=tensorboard --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=100 --save_steps=1 --save_strategy=epoch --save_total_limit=1 --metric_for_best_model=mer --greater_is_better=False --load_best_model_at_end=True
CUDA_VISIBLE_DEVICES=0 python eval.py --output_dir=./save_ao/3/eval --model_name_or_path=save_ao/3/checkpoint-1800 --train_manifest_path=dataset/mm_train_metadata.csv --valid_manifest_path=dataset/mm_valid_metadata.csv --test_manifest_path=dataset/mm_test_metadata.csv --num_workers=8 --preprocessing_num_workers=8 --audio_column_name=audio_path --text_column_name=text_path --video_column_name=lip_image_path --per_device_train_batch_size=16 --per_device_eval_batch_size=16 --dataloader_num_workers=32 --dataloader_pin_memory --seed=0 --num_train_epochs=20 --learning_rate=5e-5 --fp16 --fp16_backend=amp --logging_strategy=steps --logging_steps=10 --report_to=tensorboard --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=100 --save_steps=1 --save_strategy=epoch --save_total_limit=1 --metric_for_best_model=mer --greater_is_better=False --load_best_model_at_end=True
CUDA_VISIBLE_DEVICES=0 python eval.py --output_dir=./save_ao/4/eval --model_name_or_path=save_ao/4/checkpoint-1575 --train_manifest_path=dataset/mm_train_metadata.csv --valid_manifest_path=dataset/mm_valid_metadata.csv --test_manifest_path=dataset/mm_test_metadata.csv --num_workers=8 --preprocessing_num_workers=8 --audio_column_name=audio_path --text_column_name=text_path --video_column_name=lip_image_path --per_device_train_batch_size=16 --per_device_eval_batch_size=16 --dataloader_num_workers=32 --dataloader_pin_memory --seed=0 --num_train_epochs=20 --learning_rate=5e-5 --fp16 --fp16_backend=amp --logging_strategy=steps --logging_steps=10 --report_to=tensorboard --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=100 --save_steps=1 --save_strategy=epoch --save_total_limit=1 --metric_for_best_model=mer --greater_is_better=False --load_best_model_at_end=True