| | import torch
|
| | from transformers import AutoTokenizer, AutoModelForCausalLM
|
| |
|
| |
|
| | model_path = "./finetuned_codegen"
|
| | tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| | model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16)
|
| |
|
| |
|
| | tokenizer.pad_token = tokenizer.eos_token
|
| |
|
| |
|
| | device = torch.device("cpu")
|
| | model.to(device)
|
| |
|
| |
|
| | prompts = [
|
| | "Write a Python program to print 'Hello, you name or any other thing!'"
|
| | ]
|
| |
|
| |
|
| | for prompt in prompts:
|
| | inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128).to(device)
|
| | outputs = model.generate(
|
| | **inputs,
|
| | max_length=200,
|
| | num_return_sequences=1,
|
| | pad_token_id=tokenizer.eos_token_id,
|
| | do_sample=True,
|
| | temperature=0.7,
|
| | top_p=0.9
|
| | )
|
| | generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| | print(f"Prompt: {prompt}\nGenerated Code:\n{generated_code}\n{'-'*50}") |