KozmosAI commited on
Commit
2d88d28
·
verified ·
1 Parent(s): c146e6c

Create kozmosai

Browse files
Files changed (1) hide show
  1. kozmosai +45 -0
kozmosai ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
3
+ from peft import LoraConfig, get_peft_model
4
+
5
+ model_id = "mistralai/Mistral-7B-Instruct-v0.2"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ load_in_4bit=True,
11
+ device_map="auto"
12
+ )
13
+
14
+ lora_config = LoraConfig(
15
+ r=8,
16
+ lora_alpha=16,
17
+ target_modules=["q_proj", "v_proj"],
18
+ lora_dropout=0.05,
19
+ task_type="CAUSAL_LM"
20
+ )
21
+
22
+ model = get_peft_model(model, lora_config)
23
+
24
+ def tokenize(batch):
25
+ return tokenizer(batch["text"], truncation=True, padding="max_length", max_length=256)
26
+
27
+ dataset = dataset.map(tokenize, batched=True)
28
+
29
+ args = TrainingArguments(
30
+ output_dir="./lora-output",
31
+ per_device_train_batch_size=1,
32
+ gradient_accumulation_steps=4,
33
+ num_train_epochs=1,
34
+ fp16=True,
35
+ logging_steps=10,
36
+ save_strategy="epoch"
37
+ )
38
+
39
+ trainer = Trainer(
40
+ model=model,
41
+ args=args,
42
+ train_dataset=dataset
43
+ )
44
+
45
+ trainer.train()