• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1tokenizer:
2  _component_: torchtune.models.phi3.phi3_mini_tokenizer
3  path: /tmp/Phi-3-mini-4k-instruct/tokenizer.model
4  max_seq_len: 1024
5
6dataset:
7  _component_: executorch.examples.llm_pte_finetuning.training_lib.python_code_instructions_alpaca
8
9seed: null
10shuffle: True
11batch_size: 1
12
13loss:
14  _component_: torch.nn.CrossEntropyLoss
15
16model:
17  _component_: torchtune.models.phi3.lora_phi3_mini
18  lora_attn_modules: ['q_proj', 'v_proj']
19  apply_lora_to_mlp: False
20  apply_lora_to_output: False
21  lora_rank: 8
22  lora_alpha: 16
23
24checkpointer:
25  _component_: torchtune.training.FullModelHFCheckpointer
26  checkpoint_dir: /tmp/Phi-3-mini-4k-instruct
27  checkpoint_files: [
28    model-00001-of-00002.safetensors,
29    model-00002-of-00002.safetensors
30  ]
31  recipe_checkpoint: null
32  output_dir: /tmp/Phi-3-mini-4k-instruct/
33  model_type: PHI3_MINI
34
35resume_from_checkpoint: False
36save_adapter_weights_only: False
37
38device: cpu
39dtype: fp32
40
41enable_activation_checkpointing: True
42compile: False
43