GilbertAkham commited on
Commit
af43127
·
verified ·
1 Parent(s): 10715a6

Upload handler.py

Browse files
Files changed (1) hide show
  1. handler.py +36 -0
handler.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # handler.py
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from peft import PeftModel
5
+
6
+ # Model path in the repo
7
+ BASE_MODEL = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
8
+ ADAPTER_PATH = "."
9
+
10
+ class EndpointHandler:
11
+ def __init__(self, path=""):
12
+ print("Loading tokenizer and model...")
13
+ self.tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
14
+ base_model = AutoModelForCausalLM.from_pretrained(
15
+ BASE_MODEL, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True
16
+ )
17
+ self.model = PeftModel.from_pretrained(base_model, ADAPTER_PATH)
18
+ self.model = self.model.merge_and_unload()
19
+ self.model.eval()
20
+ print("Model loaded successfully.")
21
+
22
+ def __call__(self, data):
23
+ prompt = data.get("inputs", "")
24
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
25
+ with torch.no_grad():
26
+ outputs = self.model.generate(
27
+ **inputs,
28
+ max_new_tokens=512,
29
+ temperature=0.7,
30
+ top_p=0.9,
31
+ do_sample=True,
32
+ pad_token_id=self.tokenizer.eos_token_id,
33
+ eos_token_id=self.tokenizer.eos_token_id,
34
+ )
35
+ text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
36
+ return {"generated_text": text}