Skip to content

Commit

Permalink
chore: code cleanup and parameter optimization
Browse files Browse the repository at this point in the history
- Remove redundant comments and debug information
- Adjust default parameters in training scripts
- Clean up code in lora_trainer and trainer implementations
  • Loading branch information
OleehyO committed Jan 13, 2025
1 parent 954ba28 commit f0f8316
Show file tree
Hide file tree
Showing 4 changed files with 2 additions and 13 deletions.
1 change: 0 additions & 1 deletion finetune/models/cogvideox_t2v/lora_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,6 @@ def prepare_rotary_positional_embeddings(
base_num_frames = num_frames
else:
base_num_frames = (num_frames + transformer_config.patch_size_t - 1) // transformer_config.patch_size_t
breakpoint()
freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
embed_dim=transformer_config.attention_head_dim,
crops_coords=None,
Expand Down
2 changes: 1 addition & 1 deletion finetune/train_zero_i2v.sh
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ SYSTEM_ARGS=(
CHECKPOINT_ARGS=(
--checkpointing_steps 10
--checkpointing_limit 2
--resume_from_checkpoint "/absolute/path/to/checkpoint_dir" # if you want to resume from a checkpoint, otherwise, comment this line
# --resume_from_checkpoint "/absolute/path/to/checkpoint_dir" # if you want to resume from a checkpoint, otherwise, comment this line
)

# Validation Configuration
Expand Down
2 changes: 1 addition & 1 deletion finetune/train_zero_t2v.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ SYSTEM_ARGS=(
CHECKPOINT_ARGS=(
--checkpointing_steps 10
--checkpointing_limit 2
--resume_from_checkpoint "/absolute/path/to/checkpoint_dir" # if you want to resume from a checkpoint, otherwise, comment this line
# --resume_from_checkpoint "/absolute/path/to/checkpoint_dir" # if you want to resume from a checkpoint, otherwise, comment this line
)

# Validation Configuration
Expand Down
10 changes: 0 additions & 10 deletions finetune/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -758,16 +758,6 @@ def load_model_hook(models, input_dir):
self.accelerator.register_save_state_pre_hook(save_model_hook)
self.accelerator.register_load_state_pre_hook(load_model_hook)

# def __maybe_save_checkpoint(self, global_step: int, must_save: bool = False):
# if self.accelerator.distributed_type == DistributedType.DEEPSPEED or self.accelerator.is_main_process:
# if must_save or global_step % self.args.checkpointing_steps == 0:
# save_path = get_intermediate_ckpt_path(
# checkpointing_limit=self.args.checkpointing_limit,
# step=global_step,
# output_dir=self.args.output_dir,
# )
# self.accelerator.save_state(save_path, safe_serialization=True)

def __maybe_save_checkpoint(self, global_step: int, must_save: bool = False):
if self.accelerator.distributed_type == DistributedType.DEEPSPEED or self.accelerator.is_main_process:
if must_save or global_step % self.args.checkpointing_steps == 0:
Expand Down

0 comments on commit f0f8316

Please sign in to comment.