-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.sh
104 lines (86 loc) · 6.29 KB
/
train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env bash
#$ -N name
# name of the experiment
#$ -l cuda=1
# remove this line when no GPU is needed!
#$ -q all.q
# do not fill the qlogin queue
#$ -cwd
# start processes in current directory
#$ -V
# provide environment variables
##$ -t 1-10 (commented out)
# start 10 instances: from 1 to 10
# Experiments from WaveUNet paper:
#########################################
# M3
# TODO: output activation function tanh?
# TODO: missing difference output layer
# Uses conv with sinc lowpass filter with given stride instead of straight decimation.
# Uses transposed conv with sinc lowpass filter with given stride instead of linear interpolation.
# --output_size 0.743 seconds => 0.743 * 22050 =16384 (output samples in paper).
########################################################
# with musdb
# 1st train
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals \
#--cycles 1 --sr 22050 --channels 1 --output_size 0.743 --patience 20 --separate 0 --features 24 \
#--lr 1e-4 --min_lr 1e-4 --batch_size 16 --levels 13 --depth 1 --downsampling_kernel_size 15 --bottleneck_kernel_size 15 \
#--upsampling_kernel_size 5 --strides 2 --loss L2 --conv_type normal --res naive --feature_growth add --num_convs 1
# 2nd train (refined)
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb \
#--load_model=/home/pml_17/checkpoints/waveunet/job_M3_musdb_acc_vocals_sr22050_mono_task0_exp0/checkpoint_98940 --cuda --instruments accompaniment vocals \
#--cycles 1 --sr 22050 --channels 1 --output_size 0.743 --patience 20 --separate 0 --features 24 \
#--lr 1e-5 --min_lr 1e-5 --batch_size 32 --levels 13 --depth 1 --downsampling_kernel_size 15 --bottleneck_kernel_size 15 \
#--upsampling_kernel_size 5 --strides 2 --loss L2 --conv_type normal --res naive --feature_growth add --num_convs 1
# with musdb_extended
# 1st train
#python train.py --hdf_dir=/home/space/datasets/musdb_extended/hdf --dataset_dir=/home/space/datasets/musdb_extended --cuda --instruments accompaniment vocals \
#--cycles 1 --sr 22050 --channels 1 --output_size 0.743 --patience 20 --separate 0 --features 24 \
#--lr 1e-4 --min_lr 1e-4 --batch_size 16 --levels 13 --depth 1 --downsampling_kernel_size 15 --bottleneck_kernel_size 15 \
#--upsampling_kernel_size 5 --strides 2 --loss L2 --conv_type normal --res naive --feature_growth add --num_convs 1
# test
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals \
#--load_model=/home/pml_17/checkpoints/waveunet/job_M3_musdb_ext_acc_vocals_sr22050_mono_task0_exp0/checkpoint_765232 --skip_training \
#--cycles 1 --sr 22050 --channels 1 --output_size 0.743 --patience 20 --separate 0 --features 24 \
#--lr 1e-4 --min_lr 1e-4 --batch_size 16 --levels 13 --depth 1 --downsampling_kernel_size 15 --bottleneck_kernel_size 15 \
#--upsampling_kernel_size 5 --strides 2 --loss L2 --conv_type normal --res naive --feature_growth add --num_convs 1
# 2nd train (refined)
#python train.py --hdf_dir=/home/space/datasets/musdb_extended/hdf --dataset_dir=/home/space/datasets/musdb_extended \
#--load_model=/home/pml_17/checkpoints/waveunet/job_M3_musdb_acc_vocals_sr22050_mono_task0_exp0/checkpoint_98940 --cuda --instruments accompaniment vocals \
#--cycles 1 --sr 22050 --channels 1 --output_size 0.743 --patience 20 --separate 0 --features 24 \
#--lr 1e-5 --min_lr 1e-5 --batch_size 32 --levels 13 --depth 1 --downsampling_kernel_size 15 --bottleneck_kernel_size 15 \
#--upsampling_kernel_size 5 --strides 2 --loss L2 --conv_type normal --res naive --feature_growth add --num_convs 1
#####################
# default
####################
# with musdb
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1
# with musdb_extended
#python train.py --hdf_dir=/home/space/datasets/musdb_extended/hdf --dataset_dir=/home/space/datasets/musdb_extended --cuda --instruments accompaniment vocals --sr 22050 --channels 1
# test with musdb default trained with musdb_extended
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals \
#--load_model=/home/pml_17/checkpoints/waveunet/job_default_pytorch_musdb_ext_acc_vocals_sr22050_mono_task0_exp0/checkpoint_1091600 \
#--sr 22050 --channels 1
#####################
# Model selection
####################
# 1) Learning params (x9)
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1 --patience 8 \
#--features 24 --levels 6 --depth 1 --loss L2 --num_convs 2 --res fixed --cycles 2 --conv_type normal \
#--lr 1e-3 1e-4 1e-5 --min_lr 1e-6 --batch_size 8 16 32
# 2) Normalization (Opt) -> Choose gn for batch_size <=16, bn for 32.
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1 --patience 8 \
#--features 24 --levels 6 --depth 1 --loss L2 --num_convs 2 --res fixed --cycles 2 --lr X --min_lr 1e-6 --batch_size X \
#--conv_type normal gn bn
# 3) Resampling (x3)
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1 --patience 8 \
#--features 24 --levels 6 --depth 1 --loss L2 --num_convs 2 --cycles 2 --lr X --min_lr 1e-6 --batch_size X --conv_type X \
#--res naive fixed learned
# 3) Model size (x8)
#python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1 --patience 8 \
#--loss L2 --num_convs 2 --res naive --cycles 2 --lr 1e-3 --min_lr 1e-6 --batch_size 32 --conv_type gn \
#--features 24 32 --levels 6 8 --depth 1 --feature_growth add
# Best model trained with L2 loss
python train.py --hdf_dir=/home/space/datasets/musdb/hdf --dataset_dir=/home/space/datasets/musdb --cuda --instruments accompaniment vocals --sr 22050 --channels 1 --patience 20 \
--loss L2 --num_convs 2 --res naive --cycles 2 --lr 1e-3 --min_lr 1e-6 --batch_size 32 --conv_type gn \
--features 32 --levels 6 --depth 1 --feature_growth double