HighQualityPython / kozmosai
KozmosAI's picture
Create kozmosai
2d88d28 verified
raw
history blame
1.02 kB
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
from peft import LoraConfig, get_peft_model
model_id = "mistralai/Mistral-7B-Instruct-v0.2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
load_in_4bit=True,
device_map="auto"
)
lora_config = LoraConfig(
r=8,
lora_alpha=16,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
task_type="CAUSAL_LM"
)
model = get_peft_model(model, lora_config)
def tokenize(batch):
return tokenizer(batch["text"], truncation=True, padding="max_length", max_length=256)
dataset = dataset.map(tokenize, batched=True)
args = TrainingArguments(
output_dir="./lora-output",
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
num_train_epochs=1,
fp16=True,
logging_steps=10,
save_strategy="epoch"
)
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset
)
trainer.train()