-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathautodl.txt
50 lines (24 loc) · 6.26 KB
/
autodl.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
cd /root/autodl-tmp/Why_Hallucinations_in_RAG/LLaMA-Factory
cd /root/autodl-tmp/proj/LLaMA-Factory
python /root/autodl-tmp/proj/main_run_benchmark.py
python /root/autodl-tmp/proj/LLaMA-Factory/src/train.py --stage sft --do_train --model_name_or_path /root/autodl-tmp/model/gemma-2-9b-it --create_new_adapter --dataset TrustworthyLLM_Cognition_Finetuning_Dataset --template gemma --finetuning_type lora --lora_target q_proj,v_proj --output_dir /root/autodl-tmp/model/TrustworthyLLM_Cognition_Finetuning_Model_Gemma --overwrite_cache --per_device_train_batch_size 1 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 1.0 --plot_loss --fp16
python /root/autodl-tmp/proj/LLaMA-Factory/src/train.py --stage sft --do_train --model_name_or_path /root/autodl-tmp/model/TrustworthyLLM_Cognition_Finetuning_Model_Gemma_Merged --create_new_adapter --dataset TrustworthyLLM_PromptSensitive_Finetuning_Dataset --template gemma --finetuning_type lora --lora_target q_proj,v_proj --output_dir /root/autodl-tmp/model/TrustworthyLLM_Cognition_PSQA_Finetuning_Model_Gemma --overwrite_cache --per_device_train_batch_size 1 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 10.0 --plot_loss --fp16
python /root/autodl-tmp/proj/LLaMA-Factory/src/train.py --stage sft --do_train --model_name_or_path /root/autodl-tmp/model/TrustworthyLLM_Cognition_Finetuning_Model_Gemma_Merged --create_new_adapter --dataset TrustworthyLLM_QA_Finetuning_Dataset --template gemma --finetuning_type lora --lora_target q_proj,v_proj --output_dir /root/autodl-tmp/model/TrustworthyLLM_Cognition_QA_Finetuning_Model_Gemma --overwrite_cache --per_device_train_batch_size 1 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 5e-5 --num_train_epochs 3.0 --plot_loss --fp16
Train Congnition
python src/train_bash.py --stage sft --do_train --model_name_or_path models/llama-2-7b-chat-hf --create_new_adapter --dataset TrustworthyLLM_Cognition_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_Cognition_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 3.0 --plot_loss --fp16
export Cognition
python src/export_model.py --model_name_or_path models/llama-2-7b-chat-hf --adapter_name_or_path models/TrustworthyLLM_Cognition_Finetuning_Model --template llama2 --finetuning_type lora --export_dir models/TrustworthyLLM_Cognition_Finetuning_Model_Merged --export_size 2 --export_legacy_format False
Train Cognition_QA
python src/train_bash.py --stage sft --do_train --model_name_or_path models/TrustworthyLLM_Cognition_Finetuning_Model_Merged --create_new_adapter --dataset TrustworthyLLM_QA_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_Cognition_QA_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 5e-5 --num_train_epochs 3.0 --plot_loss --fp16
Train Cognition_PSQA
python src/train_bash.py --stage sft --do_train --model_name_or_path models/TrustworthyLLM_Cognition_Finetuning_Model_Merged --create_new_adapter --dataset TrustworthyLLM_PromptSensitive_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_Cognition_PSQA_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 10.0 --plot_loss --fp16
Train Cognition_QA_large
python src/train_bash.py --stage sft --do_train --model_name_or_path models/llama-2-7b-chat-hf --create_new_adapter --dataset TrustworthyLLM_Cognition_QA_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_Cognition_QA_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 3.0 --plot_loss --fp16
Train Cognition_PSQA_large
python src/train_bash.py --stage sft --do_train --model_name_or_path models/llama-2-7b-chat-hf --create_new_adapter --dataset TrustworthyLLM_Cognition_PSQA_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_Cognition_PSQA_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 4e-5 --num_train_epochs 3.0 --plot_loss --fp16
Train PS-1
python src/train_bash.py --stage sft --do_train --model_name_or_path models/llama-2-7b-chat-hf --create_new_adapter --dataset TrustworthyLLM_PromptSensitive_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_PS_Finetuning_Model --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 5e-5 --num_train_epochs 10.0 --plot_loss --fp16
export PS-1
python src/export_model.py --model_name_or_path models/llama-2-7b-chat-hf --adapter_name_or_path models/TrustworthyLLM_PS_Finetuning_Model --template llama2 --finetuning_type lora --export_dir models/TrustworthyLLM_PS_Finetuning_Model_Merged --export_size 2 --export_legacy_format False
Train PS-2
python src/train_bash.py --stage sft --do_train --model_name_or_path models/TrustworthyLLM_PS_Finetuning_Model_Merged --create_new_adapter --dataset TrustworthyLLM_PromptSensitive_Finetuning_Dataset --template llama2 --finetuning_type lora --lora_target q_proj,v_proj --output_dir models/TrustworthyLLM_PS_Finetuning_Model-2 --overwrite_cache --per_device_train_batch_size 4 --gradient_accumulation_steps 4 --lr_scheduler_type cosine --logging_steps 10 --save_steps 1000 --learning_rate 5e-5 --num_train_epochs 10.0 --plot_loss --fp16