Skip to content

Commit

Permalink
Disable quantization in llama config
Browse files Browse the repository at this point in the history
  • Loading branch information
mwaskom committed Feb 6, 2024
1 parent 5b2d0f0 commit 3aa70b0
Showing 1 changed file with 3 additions and 4 deletions.
7 changes: 3 additions & 4 deletions config/llama-2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ model_type: LlamaForCausalLM
tokenizer_type: LlamaTokenizer
is_llama_derived_model: true

load_in_8bit: true
load_in_8bit: false
load_in_4bit: false
strict: false

Expand Down Expand Up @@ -34,8 +34,8 @@ pad_to_sequence_len: false

adapter: lora
lora_model_dir:
lora_r: 32
lora_alpha: 16
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_linear: true
lora_fan_in_fan_out:
Expand All @@ -44,7 +44,6 @@ wandb_project:
wandb_entity:
wandb_watch:
wandb_run_id:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 32
Expand Down

0 comments on commit 3aa70b0

Please sign in to comment.