Keyurjotaniya007 commited on
Commit
547c16c
·
verified ·
1 Parent(s): 78dc5b0

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +41 -64
  2. chatbot.py +108 -0
  3. requirements.txt +6 -1
app.py CHANGED
@@ -1,64 +1,41 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import streamlit as st
2
+ import time
3
+ from chatbot import generate_html_css_from_image
4
+
5
+ st.set_page_config(page_title="Gemini 2.5 Flash HTML/CSS Chatbot", layout="wide")
6
+
7
+ st.markdown("""
8
+ <div style='text-align: center; padding: 10px 0;'>
9
+ <h1>Welcome, Gemini 2.5 Flash HTML/CSS Chatbot</h1>
10
+ </div>
11
+ <div style='text-align: center;'>
12
+ <p style='font-size: 18px; background-color: #e6f0ff; padding: 10px; border-radius: 8px; display: inline-block;'>
13
+ 📁 Please upload an image of a website UI
14
+ </p>
15
+ </div>
16
+ """, unsafe_allow_html=True)
17
+
18
+ left, right = st.columns([1, 7])
19
+
20
+ with left:
21
+ st.markdown("### 📁 Upload Image")
22
+ uploaded_image = st.file_uploader("Drag and drop or browse", type=["jpg", "jpeg", "png"])
23
+
24
+ with right:
25
+ if uploaded_image:
26
+ with st.spinner("Generating HTML + CSS using Gemini 2.5 Flash..."):
27
+ try:
28
+ output = generate_html_css_from_image(uploaded_image)
29
+ if output:
30
+ st.subheader("💬 Generated HTML + CSS:")
31
+
32
+ placeholder = st.empty()
33
+ typed_text = ""
34
+ for char in output:
35
+ typed_text += char
36
+ placeholder.code(typed_text, language="html")
37
+ time.sleep(0.007)
38
+ else:
39
+ st.error("No output generated. Please try again.")
40
+ except Exception as e:
41
+ st.error(f"Error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chatbot.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import base64
3
+ import io
4
+ from PIL import Image
5
+ from bs4 import BeautifulSoup
6
+ from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_core.messages import HumanMessage
8
+
9
+ def resize_and_encode_image(image_file, max_size=(400, 400)):
10
+ img = Image.open(image_file)
11
+ img.thumbnail(max_size)
12
+ buffered = io.BytesIO()
13
+ img.save(buffered, format="JPEG")
14
+ image_bytes = buffered.getvalue()
15
+ base64_str = base64.b64encode(image_bytes).decode("utf-8")
16
+ return f"data:image/jpeg;base64,{base64_str}"
17
+
18
+ def beautify_html(html_code):
19
+ soup = BeautifulSoup(html_code, "html.parser")
20
+ return soup.prettify()
21
+
22
+ def generate_html_css_from_image(image_file):
23
+ image_data_url = resize_and_encode_image(image_file)
24
+
25
+ prompt_text = """
26
+ You are an expert front-end developer.
27
+
28
+ The input is a screenshot of a website UI. Carefully analyze its layout and generate accurate, semantic, and maintainable HTML and CSS.
29
+
30
+ Follow these professional guidelines:
31
+
32
+ 1) Structure & Semantics:
33
+ - Use HTML5 semantic tags that match the visual hierarchy (e.g., <header>, <nav>, <main>, <section>, <article>, <aside>, <footer>)
34
+ - Reflect layout grouping using appropriate containers and divs where needed
35
+
36
+ 2) Layout & Responsiveness:
37
+ - Use Flexbox or CSS Grid for layout
38
+ - Include responsive breakpoints (mobile-first) with at least one media query
39
+ - Ensure layout adapts well to mobile screen sizes
40
+
41
+ 3) CSS Practices:
42
+ - Keep CSS in a <style> block or separate file (no inline styles)
43
+ - Use class names that follow a clean naming convention (e.g., BEM or descriptive naming)
44
+ - Group CSS rules logically (layout, typography, components)
45
+
46
+ 4) Accessibility & UX:
47
+ - Add accessible markup: alt text, ARIA roles, labels
48
+ - Ensure good contrast and keyboard navigability
49
+
50
+ 5) Content & Comments:
51
+ - Use meaningful placeholder text (not lorem ipsum)
52
+ - Add short code comments to explain each major section
53
+
54
+ 6) Output:
55
+ - The output should be a complete single HTML file with embedded CSS
56
+ - Preserve the visual structure and content flow of the original screenshot as closely as possible
57
+ - Do not skip or summarize any sections
58
+
59
+ Assume this is for real production-ready front-end code generation from a web UI screenshot.
60
+ """
61
+
62
+ prompt = [
63
+ HumanMessage(
64
+ content=[
65
+ {"type": "text", "text": prompt_text},
66
+ {"type": "image_url", "image_url": {"url": image_data_url, "mime_type": "image/jpeg"}}
67
+ ]
68
+ )
69
+ ]
70
+
71
+ llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=0)
72
+ max_retries = 3
73
+ generated_code = None
74
+
75
+ for attempt in range(max_retries):
76
+ try:
77
+ response = llm.invoke(prompt)
78
+ generated_code = response.content
79
+ break
80
+ except Exception as e:
81
+ if "ResourceExhausted" in str(e) or "429" in str(e):
82
+ time.sleep(30 * (attempt + 1))
83
+ else:
84
+ raise e
85
+
86
+ if generated_code:
87
+ soup = BeautifulSoup(generated_code, "html.parser")
88
+ style_tag = soup.find("style")
89
+ css_code = style_tag.string if style_tag else ""
90
+ html_without_style = str(soup).replace(str(style_tag), "") if style_tag else str(soup)
91
+
92
+ cleaned_html = beautify_html(html_without_style)
93
+
94
+ final_output = f"""<!DOCTYPE html>
95
+ <html lang="en">
96
+ <head>
97
+ <meta charset="UTF-8">
98
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
99
+ <style>
100
+ {css_code}
101
+ </style>
102
+ </head>
103
+ {cleaned_html.split("</head>")[1]}
104
+ </html>"""
105
+
106
+ return final_output
107
+ else:
108
+ return None
requirements.txt CHANGED
@@ -1 +1,6 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
1
+ streamlit
2
+ Pillow
3
+ beautifulsoup4
4
+ langchain
5
+ langchain-google-genai
6
+ google-generativeai