• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1tokenizer:
2  _component_: torchtune.models.qwen2.qwen2_tokenizer
3  path: /tmp/Qwen2-0.5B-Instruct/vocab.json
4  merges_file: /tmp/Qwen2-0.5B-Instruct/merges.txt
5  max_seq_len: 512
6
7dataset:
8  _component_: torchtune.datasets.alpaca_cleaned_dataset
9seed: null
10shuffle: True
11batch_size: 1
12
13loss:
14  _component_: torch.nn.CrossEntropyLoss
15
16model:
17  _component_: torchtune.models.qwen2.lora_qwen2_0_5b
18  lora_attn_modules: ['q_proj', 'k_proj', 'v_proj']
19  apply_lora_to_mlp: False
20  lora_rank: 32
21  lora_alpha: 64
22
23checkpointer:
24  _component_: torchtune.training.FullModelHFCheckpointer
25  checkpoint_dir: /tmp/Qwen2-0.5B-Instruct
26  checkpoint_files: [
27    model.safetensors
28  ]
29  recipe_checkpoint: null
30  output_dir: /tmp/Qwen2-0.5B-Instruct
31  model_type: QWEN2
32resume_from_checkpoint: False
33save_adapter_weights_only: False
34
35device: cpu
36dtype: fp32
37
38enable_activation_checkpointing: True
39compile: False
40