import streamlit as st from transformers import pipeline from PIL import Image import io from gtts import gTTS # Page config st.title("🖼️ → 📖 Image-to-Story Demo") st.write("Upload an image and watch as it’s captioned, turned into a short story, and even read aloud!") # Load and cache pipelines @st.cache_resource def load_captioner(): return pipeline("image-to-text", model="unography/blip-large-long-cap") @st.cache_resource def load_story_gen(): return pipeline( "text-generation", model="gpt2", tokenizer="gpt2" ) captioner = load_captioner() story_gen = load_story_gen() # 1) Image upload uploaded = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) if uploaded: img = Image.open(uploaded) st.image(img, use_column_width=True) # 2) Generate caption with st.spinner("Generating caption…"): caps = captioner(img) # `caps` is a list of dicts like [{"generated_text": "..."}] caption = caps[0]["generated_text"] st.write("**Caption:**", caption) # 3) Generate story from caption with st.spinner("Spinning up a story…"): story_out = story_gen( caption, max_length=200, num_return_sequences=1, do_sample=True, top_p=0.9 ) story = story_out[0]["generated_text"] st.write("**Story:**", story) # 4) Play story as audio if st.button("🔊 Play Story Audio"): with st.spinner("Generating audio…"): tts = gTTS(text=story, lang="en") buf = io.BytesIO() tts.write_to_fp(buf) buf.seek(0) st.audio(buf.read(), format="audio/mp3") """ import streamlit as st from transformers import pipeline def main(): sentiment_pipeline = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english") st.title("Sentiment Analysis with HuggingFace Spaces") st.write("Enter a sentence to analyze its sentiment:") user_input = st.text_input("") if user_input: result = sentiment_pipeline(user_input) sentiment = result[0]["label"] confidence = result[0]["score"] st.write(f"Sentiment: {sentiment}") st.write(f"Confidence: {confidence:.2f}") if __name__ == "__main__": main() """