Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import GPT2LMHeadModel, AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Initialize the tokenizer and model | |
| tokenizer = AutoTokenizer.from_pretrained('gpt2-large') | |
| model = AutoModelForCausalLM.from_pretrained('gpt2-large') | |
| def generate_blog(topic, max_length=500, num_return_sequences=1): | |
| # Encode the topic as input IDs | |
| input_ids = tokenizer.encode(topic, return_tensors='pt') | |
| # Generate the blog text | |
| outputs = model.generate( | |
| input_ids, | |
| max_length=max_length, | |
| num_return_sequences=num_return_sequences, | |
| no_repeat_ngram_size=2, | |
| early_stopping=True | |
| ) | |
| # Decode the generated IDs to text | |
| generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs] | |
| return generated_texts | |
| # Streamlit UI | |
| st.title("Blog Generator") | |
| topic = st.text_input("Enter the topic's name:") | |
| if st.button("Generate Blog"): | |
| if topic: | |
| generated_blogs = generate_blog(topic) | |
| for i, blog in enumerate(generated_blogs): | |
| st.subheader(f"Blog {i+1}") | |
| st.write(blog) | |
| else: | |
| st.write("Please enter a topic to generate a blog.") |