Spestly commited on
Commit
174910b
·
verified ·
1 Parent(s): 442574e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -60
app.py CHANGED
@@ -9,34 +9,41 @@ AI_PFP = "media/pfps/cohere-pfp.png"
9
  USER_PFP = "media/pfps/user-pfp.jpg"
10
  BANNER = "media/banner.png"
11
 
12
- if not os.path.exists(AI_PFP) or not os.path.exists(USER_PFP):
13
- st.error("Missing profile pictures in media/pfps directory")
14
- st.stop()
15
-
16
-
17
  model_info = {
18
  "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "4K", "output": "4K"},
19
  "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "128K", "output": "4K"},
20
- "c4ai-aya-vision-8b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. This 8 billion parameter variant is focused on low latency and best-in-class performance.", "context": "16K", "output": "4K"},
21
- "c4ai-aya-vision-32b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. Serves 23 languages. This 32 billion parameter variant is focused on state-of-art multilingual performance.", "context": "16k", "output": "4K"},
22
  "command-a-03-2025": {"description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.", "context": "256K", "output": "8K"},
23
  "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.", "context": "128K", "output": "4K"},
24
  "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.", "context": "128K", "output": "4K"},
 
 
 
 
 
 
 
 
 
 
25
  }
26
 
27
-
28
  with st.sidebar:
29
  st.image(BANNER, use_container_width=True)
30
- st.markdown("Hugging Face 🤗 Community UI (Vision Model support coming soon)")
31
  st.title("Settings")
32
  api_key = st.text_input("Cohere API Key", type="password")
33
  selected_model = st.selectbox("Model", options=list(model_info.keys()))
34
- def clear_chat():
 
 
 
 
 
 
35
  st.session_state.messages = []
36
  st.session_state.first_message_sent = False
37
- st.button("Clear Chat", on_click=clear_chat)
 
38
  st.divider()
39
- st.image(AI_PFP, width=60)
40
  st.subheader(selected_model)
41
  st.markdown(model_info[selected_model]["description"])
42
  st.caption(f"Context: {model_info[selected_model]['context']}")
@@ -47,57 +54,38 @@ if "messages" not in st.session_state:
47
  st.session_state.messages = []
48
  if "first_message_sent" not in st.session_state:
49
  st.session_state.first_message_sent = False
50
- if "uploaded_image" not in st.session_state:
51
- st.session_state.uploaded_image = None
52
 
53
  if not st.session_state.first_message_sent:
54
- st.markdown(
55
- "<h1 style='text-align:center; color:#4a4a4a; margin-top:100px;'>How can Cohere help you today?</h1>",
56
- unsafe_allow_html=True
57
- )
58
-
59
  for msg in st.session_state.messages:
60
- avatar = USER_PFP if msg["role"] == "user" else AI_PFP
61
- with st.chat_message(msg["role"], avatar=avatar):
62
- st.markdown(msg["content"])
63
-
64
- col1, col2 = st.columns([1, 8])
65
- with col1:
66
- if selected_model.startswith("c4ai-aya-vision"):
67
- img = st.file_uploader(label="📷", key="uploader", type=["png","jpg","jpeg"], accept_multiple_files=False)
68
- if img is not None:
69
- st.session_state.uploaded_image = img
70
- st.image(img, width=80)
71
- else:
72
- st.write("")
73
- with col2:
74
- prompt = st.chat_input("Message...")
75
-
76
- if prompt or st.session_state.uploaded_image:
77
  if not api_key:
78
  st.error("API key required")
79
  st.stop()
80
- user_items = []
81
- if prompt:
82
- st.session_state.first_message_sent = True
83
- st.session_state.messages.append({"role": "user", "content": prompt})
84
- with st.chat_message("user", avatar=USER_PFP):
85
- st.markdown(prompt)
86
- user_items.append({"type": "text", "text": prompt})
87
- if st.session_state.uploaded_image:
88
- raw = st.session_state.uploaded_image.read()
89
- b64 = base64.b64encode(raw).decode("utf-8")
90
- url = f"data:image/jpeg;base64,{b64}"
91
- user_items.append({"type": "image_url", "image_url": {"url": url}})
92
- with st.chat_message("user", avatar=USER_PFP):
93
- st.image(raw, width=200)
94
- st.session_state.uploaded_image = None
95
- try:
96
- co = cohere.ClientV2(api_key)
97
- response = co.chat(model=selected_model, messages=[{"role":"user","content":user_items}])
98
- reply = "".join(getattr(item,'text','') for item in response.message.content)
99
- with st.chat_message("assistant", avatar=AI_PFP):
100
- st.markdown(reply)
101
- st.session_state.messages.append({"role":"assistant","content":reply})
102
- except Exception as e:
103
- st.error(f"Error: {e}")
 
9
  USER_PFP = "media/pfps/user-pfp.jpg"
10
  BANNER = "media/banner.png"
11
 
 
 
 
 
 
12
  model_info = {
13
  "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "4K", "output": "4K"},
14
  "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "128K", "output": "4K"},
 
 
15
  "command-a-03-2025": {"description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.", "context": "256K", "output": "8K"},
16
  "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.", "context": "128K", "output": "4K"},
17
  "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.", "context": "128K", "output": "4K"},
18
+ "command-r-plus": {"description": "command-r-plus is an alias for command-r-plus-04-2024, so if you use command-r-plus in the API, that's the model you're pointing to.", "context": "128K", "output": "4K"},
19
+ "command-r-08-2024": {"description": "Updated Command R model from August 2024.", "context": "128K", "output": "4K"},
20
+ "command-r-03-2024": {"description": "Instruction-following model for code generation, RAG, and agents.", "context": "128K", "output": "4K"},
21
+ "command-r": {"description": "Alias for command-r-03-2024.", "context": "128K", "output": "4K"},
22
+ "command": {"description": "Conversational model with long context capabilities.", "context": "4K", "output": "4K"},
23
+ "command-nightly": {"description": "Experimental nightly build (not for production).", "context": "128K", "output": "4K"},
24
+ "command-light": {"description": "Faster lightweight version of command.", "context": "4K", "output": "4K"},
25
+ "command-light-nightly": {"description": "Experimental nightly build of command-light.", "context": "128K", "output": "4K"},
26
+ "c4ai-aya-vision-8b": {"description": "Aya Vision is an 8B vision-language model enabling image-based chat and analysis.", "context": "4K", "output": "4K"},
27
+ "c4ai-aya-vision-32b": {"description": "Aya Vision is a 32B vision-language model with advanced image understanding and reasoning.", "context": "128K", "output": "4K"}
28
  }
29
 
 
30
  with st.sidebar:
31
  st.image(BANNER, use_container_width=True)
 
32
  st.title("Settings")
33
  api_key = st.text_input("Cohere API Key", type="password")
34
  selected_model = st.selectbox("Model", options=list(model_info.keys()))
35
+ if selected_model.startswith("c4ai-aya-vision"):
36
+ uploaded = st.file_uploader("Upload image", type=["png","jpg","jpeg"])
37
+ if uploaded:
38
+ data = uploaded.read()
39
+ session_image = base64.b64encode(data).decode('utf-8')
40
+ st.session_state.image_data = session_image
41
+ if st.button("Clear Chat"):
42
  st.session_state.messages = []
43
  st.session_state.first_message_sent = False
44
+ st.session_state.image_data = None
45
+ st.rerun()
46
  st.divider()
 
47
  st.subheader(selected_model)
48
  st.markdown(model_info[selected_model]["description"])
49
  st.caption(f"Context: {model_info[selected_model]['context']}")
 
54
  st.session_state.messages = []
55
  if "first_message_sent" not in st.session_state:
56
  st.session_state.first_message_sent = False
57
+ if "image_data" not in st.session_state:
58
+ st.session_state.image_data = None
59
 
60
  if not st.session_state.first_message_sent:
61
+ st.markdown("<h1 style='text-align: center; color: #4a4a4a; margin-top: 100px;'>How can Cohere help you today?</h1>", unsafe_allow_html=True)
 
 
 
 
62
  for msg in st.session_state.messages:
63
+ with st.chat_message(msg["role"], avatar=USER_PFP if msg["role"]=="user" else AI_PFP):
64
+ content = msg["content"]
65
+ if isinstance(content, list):
66
+ for item in content:
67
+ if item.get("type")=="text":
68
+ st.markdown(item.get("text"))
69
+ if item.get("type")=="image_url":
70
+ st.image(item.get("image_url").get("url"))
71
+ else:
72
+ st.markdown(content)
73
+ if prompt := st.chat_input("Message..."):
 
 
 
 
 
 
74
  if not api_key:
75
  st.error("API key required")
76
  st.stop()
77
+ st.session_state.first_message_sent = True
78
+ st.session_state.messages.append({"role":"user","content":prompt})
79
+ with st.chat_message("user", avatar=USER_PFP):
80
+ st.markdown(prompt)
81
+ co = cohere.ClientV2(api_key)
82
+ content = [{"type":"text","text":prompt}]
83
+ if st.session_state.image_data and selected_model.startswith("c4ai-aya-vision"):
84
+ data_url = f"data:image/jpeg;base64,{st.session_state.image_data}"
85
+ content.append({"type":"image_url","image_url":{"url":data_url}})
86
+ response = co.chat(model=selected_model, messages=[*st.session_state.messages, {"role":"user","content":content}], temperature=0.3)
87
+ items = response.message.content
88
+ reply = "".join([getattr(i,'text','') for i in items])
89
+ with st.chat_message("assistant", avatar=AI_PFP):
90
+ st.markdown(reply)
91
+ st.session_state.messages.append({"role":"assistant","content":items})