FlameF0X commited on
Commit
cb13191
·
verified ·
1 Parent(s): a7254c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -217
app.py CHANGED
@@ -2,11 +2,6 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
3
  import torch
4
  from threading import Thread
5
- import requests
6
- from bs4 import BeautifulSoup
7
- from urllib.parse import quote_plus
8
- import re
9
-
10
  MODEL_NAMES = {
11
  "LFM2-350M": "LiquidAI/LFM2-350M",
12
  "LFM2-700M": "LiquidAI/LFM2-700M",
@@ -14,9 +9,7 @@ MODEL_NAMES = {
14
  "LFM2-2.6B": "LiquidAI/LFM2-2.6B",
15
  "LFM2-8B-A1B": "LiquidAI/LFM2-8B-A1B",
16
  }
17
-
18
  model_cache = {}
19
-
20
  def load_model(model_key):
21
  if model_key in model_cache:
22
  return model_cache[model_key]
@@ -30,176 +23,17 @@ def load_model(model_key):
30
  ).to(device)
31
  model_cache[model_key] = (tokenizer, model)
32
  return tokenizer, model
33
-
34
- def search_web(query, num_results=3):
35
- """
36
- Search DuckDuckGo and scrape results
37
- """
38
- try:
39
- headers = {
40
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
41
- }
42
-
43
- # DuckDuckGo HTML search
44
- search_url = f"https://html.duckduckgo.com/html/?q={quote_plus(query)}"
45
- response = requests.get(search_url, headers=headers, timeout=10)
46
- soup = BeautifulSoup(response.text, 'html.parser')
47
-
48
- results = []
49
- result_divs = soup.find_all('div', class_='result')
50
-
51
- for div in result_divs[:num_results]:
52
- try:
53
- title_tag = div.find('a', class_='result__a')
54
- snippet_tag = div.find('a', class_='result__snippet')
55
-
56
- if title_tag and snippet_tag:
57
- title = title_tag.get_text(strip=True)
58
- url = title_tag.get('href', '')
59
- snippet = snippet_tag.get_text(strip=True)
60
-
61
- results.append({
62
- 'title': title,
63
- 'url': url,
64
- 'snippet': snippet
65
- })
66
- except Exception as e:
67
- continue
68
-
69
- return results
70
- except Exception as e:
71
- return [{'title': 'Search Error', 'url': '', 'snippet': f'Could not perform search: {str(e)}'}]
72
-
73
- def scrape_page_content(url, max_chars=2000):
74
- """
75
- Scrape main content from a webpage
76
- """
77
- try:
78
- headers = {
79
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
80
- }
81
- response = requests.get(url, headers=headers, timeout=10)
82
- soup = BeautifulSoup(response.text, 'html.parser')
83
-
84
- # Remove script and style elements
85
- for script in soup(["script", "style", "nav", "footer", "header"]):
86
- script.decompose()
87
-
88
- # Get text
89
- text = soup.get_text()
90
- lines = (line.strip() for line in text.splitlines())
91
- chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
92
- text = ' '.join(chunk for chunk in chunks if chunk)
93
-
94
- return text[:max_chars]
95
- except Exception as e:
96
- return f"Could not scrape page: {str(e)}"
97
-
98
- def detect_urls(text):
99
- """
100
- Detect URLs in text
101
- """
102
- url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
103
- return re.findall(url_pattern, text)
104
-
105
- def extract_search_commands(text):
106
- """
107
- Extract SEARCH[query] commands from text
108
- """
109
- pattern = r'SEARCH\[(.*?)\]'
110
- matches = re.findall(pattern, text)
111
- return matches
112
-
113
- def process_with_search(message, history, tokenizer, model, enable_search):
114
- """
115
- Check if we need to search or scrape URLs
116
- """
117
- if not enable_search:
118
- return message, False, None
119
-
120
- search_results_text = ""
121
- searched = False
122
-
123
- # Check for URLs in the message - if found, scrape them
124
- urls = detect_urls(message)
125
- if urls:
126
- for url in urls[:3]: # Limit to 3 URLs
127
- content = scrape_page_content(url, max_chars=3000)
128
- search_results_text += f"\n\n[Content from {url}]\n{content}\n\n"
129
- searched = True
130
-
131
- if searched:
132
- enhanced_message = f"{message}\n\n{search_results_text}"
133
- return enhanced_message, True, "url"
134
-
135
- # If no URLs, generate initial response to see if model wants to search
136
- device = model.device
137
- prompt = "You are LFM2, an intelligent and conversational AI assistant. You have access to web search capabilities. When you need current information or don't know something, you can use the command SEARCH[query] to search the web.\n\n"
138
-
139
- for msg in history:
140
- role = msg["role"]
141
- content = msg["content"]
142
- prompt += f"{role.capitalize()}: {content}\n"
143
- prompt += f"User: {message}\nAssistant:"
144
-
145
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
146
-
147
- with torch.no_grad():
148
- outputs = model.generate(
149
- **inputs,
150
- max_new_tokens=150,
151
- temperature=0.7,
152
- top_p=0.9,
153
- do_sample=True,
154
- )
155
-
156
- initial_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
157
-
158
- # Check for search commands
159
- search_queries = extract_search_commands(initial_response)
160
-
161
- if search_queries:
162
- search_results_text = ""
163
- for query in search_queries[:2]: # Limit to 2 searches
164
- results = search_web(query)
165
- search_results_text += f"\n\n[Search results for: {query}]\n"
166
- for i, result in enumerate(results, 1):
167
- search_results_text += f"{i}. {result['title']}\n{result['snippet']}\n{result['url']}\n\n"
168
-
169
- # Add search results to the conversation
170
- enhanced_message = f"{message}\n\n{search_results_text}"
171
- return enhanced_message, True, "search"
172
-
173
- return message, False, None
174
-
175
- def chat_with_model(message, history, model_choice, enable_search):
176
  tokenizer, model = load_model(model_choice)
177
  device = model.device
178
-
179
- # Process search if needed
180
- enhanced_message, searched, search_type = process_with_search(
181
- message, history, tokenizer, model, enable_search
182
- )
183
-
184
- # Show search indicator
185
- if searched:
186
- if search_type == "url":
187
- search_indicator = [{"role": "assistant", "content": "🔗 Reading the provided URL..."}]
188
- else:
189
- search_indicator = [{"role": "assistant", "content": "🔍 Searching the web..."}]
190
- yield history + [{"role": "user", "content": message}] + search_indicator
191
-
192
- prompt = "You are LFM2, an intelligent and conversational AI assistant designed to help users with questions, problem-solving, and creative tasks. You communicate clearly, reason carefully, and explain your thoughts in an easy-to-understand way. Stay friendly, professional, and curious. If the user's request is ambiguous, ask clarifying questions before proceeding.\n\nWEB SEARCH CAPABILITY:\n- You have access to web search. When you need current information or don't know something, use: SEARCH[your query here]\n- Only search when necessary - use your existing knowledge first\n- After searching, you'll receive results to incorporate into your response\n- Always cite sources when using search results\n\n"
193
-
194
  for msg in history:
195
  role = msg["role"]
196
  content = msg["content"]
197
  prompt += f"{role.capitalize()}: {content}\n"
198
- prompt += f"User: {enhanced_message}\nAssistant:"
199
-
200
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
201
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
202
-
203
  generation_kwargs = dict(
204
  **inputs,
205
  streamer=streamer,
@@ -208,37 +42,23 @@ def chat_with_model(message, history, model_choice, enable_search):
208
  top_p=0.9,
209
  do_sample=True,
210
  )
211
-
212
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
213
  thread.start()
214
-
215
  partial_text = ""
216
  for new_text in streamer:
217
  partial_text += new_text
218
- # Remove SEARCH[] commands from output
219
- cleaned_text = re.sub(r'SEARCH\[.*?\]', '', partial_text)
220
  yield history + [
221
  {"role": "user", "content": message},
222
- {"role": "assistant", "content": cleaned_text},
223
  ]
224
-
225
  def create_demo():
226
- with gr.Blocks(title="LiquidAI Chat Playground", theme=gr.themes.Soft()) as demo:
227
  gr.Markdown("## 💧 LiquidAI Chat Playground")
228
-
229
- with gr.Row():
230
- model_choice = gr.Dropdown(
231
- label="Select Model",
232
- choices=list(MODEL_NAMES.keys()),
233
- value="LFM2-1.2B",
234
- scale=3
235
- )
236
- enable_search = gr.Checkbox(
237
- label="Enable Web Search",
238
- value=True,
239
- scale=1
240
- )
241
-
242
  chatbot = gr.Chatbot(
243
  label="Chat with LiquidAI",
244
  type="messages",
@@ -246,35 +66,14 @@ def create_demo():
246
  )
247
  msg = gr.Textbox(label="Your message", placeholder="Type something...")
248
  clear = gr.Button("Clear")
249
-
250
- def add_user_message_and_respond(user_message, chat_history, model_choice, enable_search):
251
- if not user_message.strip():
252
- return chat_history
253
- for response in chat_with_model(user_message, chat_history, model_choice, enable_search):
254
- yield response
255
-
256
- msg.submit(
257
- add_user_message_and_respond,
258
- [msg, chatbot, model_choice, enable_search],
259
- chatbot
260
- ).then(lambda: "", None, msg)
261
-
262
  clear.click(lambda: [], None, chatbot, queue=False)
263
-
264
- gr.Markdown("""
265
- ### Features:
266
- - **Smart Search**: Model searches only when it doesn't know something
267
- - **URL Access**: Paste a URL and ask the model to read it
268
- - **Toggle Control**: Enable/disable web search with the checkbox
269
-
270
- Examples:
271
- - "What's the current weather in Tokyo?" → *will search if needed*
272
- - "Explain quantum physics" → *answers from knowledge*
273
- - "Read this article: https://example.com" → *scrapes the URL*
274
- """)
275
-
276
  return demo
277
-
278
  if __name__ == "__main__":
279
  demo = create_demo()
280
  demo.queue()
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
3
  import torch
4
  from threading import Thread
 
 
 
 
 
5
  MODEL_NAMES = {
6
  "LFM2-350M": "LiquidAI/LFM2-350M",
7
  "LFM2-700M": "LiquidAI/LFM2-700M",
 
9
  "LFM2-2.6B": "LiquidAI/LFM2-2.6B",
10
  "LFM2-8B-A1B": "LiquidAI/LFM2-8B-A1B",
11
  }
 
12
  model_cache = {}
 
13
  def load_model(model_key):
14
  if model_key in model_cache:
15
  return model_cache[model_key]
 
23
  ).to(device)
24
  model_cache[model_key] = (tokenizer, model)
25
  return tokenizer, model
26
+ def chat_with_model(message, history, model_choice):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  tokenizer, model = load_model(model_choice)
28
  device = model.device
29
+ prompt = "You are LFM2, an intelligent and conversational AI assistant designed to help users with questions, problem-solving, and creative tasks. You communicate clearly, reason carefully, and explain your thoughts in an easy-to-understand way. Stay friendly, professional, and curious. If the user's request is ambiguous, ask clarifying questions before proceeding."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  for msg in history:
31
  role = msg["role"]
32
  content = msg["content"]
33
  prompt += f"{role.capitalize()}: {content}\n"
34
+ prompt += f"User: {message}\nAssistant:"
 
35
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
36
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
 
37
  generation_kwargs = dict(
38
  **inputs,
39
  streamer=streamer,
 
42
  top_p=0.9,
43
  do_sample=True,
44
  )
 
45
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
46
  thread.start()
 
47
  partial_text = ""
48
  for new_text in streamer:
49
  partial_text += new_text
 
 
50
  yield history + [
51
  {"role": "user", "content": message},
52
+ {"role": "assistant", "content": partial_text},
53
  ]
 
54
  def create_demo():
55
+ with gr.Blocks(title="LiquidAI Chat Playground") as demo:
56
  gr.Markdown("## 💧 LiquidAI Chat Playground")
57
+ model_choice = gr.Dropdown(
58
+ label="Select Model",
59
+ choices=list(MODEL_NAMES.keys()),
60
+ value="LFM2-1.2B"
61
+ )
 
 
 
 
 
 
 
 
 
62
  chatbot = gr.Chatbot(
63
  label="Chat with LiquidAI",
64
  type="messages",
 
66
  )
67
  msg = gr.Textbox(label="Your message", placeholder="Type something...")
68
  clear = gr.Button("Clear")
69
+ def add_user_message(user_message, chat_history):
70
+ chat_history = chat_history + [{"role": "user", "content": user_message}]
71
+ return "", chat_history
72
+ msg.submit(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
73
+ chat_with_model, [msg, chatbot, model_choice], chatbot
74
+ )
 
 
 
 
 
 
 
75
  clear.click(lambda: [], None, chatbot, queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  return demo
 
77
  if __name__ == "__main__":
78
  demo = create_demo()
79
  demo.queue()