Spestly commited on
Commit
f68537f
·
verified ·
1 Parent(s): f7c6fab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -85
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import streamlit as st
2
  import cohere
3
  import os
 
4
 
5
  st.set_page_config(page_title="Cohere Chat", layout="wide")
6
 
@@ -13,71 +14,13 @@ if not os.path.exists(AI_PFP) or not os.path.exists(USER_PFP):
13
  st.stop()
14
 
15
  model_info = {
16
- "c4ai-aya-expanse-8b": {
17
- "description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.",
18
- "context": "4K",
19
- "output": "4K"
20
- },
21
- "c4ai-aya-expanse-32b": {
22
- "description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.",
23
- "context": "128K",
24
- "output": "4K"
25
- },
26
- "command-a-03-2025": {
27
- "description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.",
28
- "context": "256K",
29
- "output": "8K"
30
- },
31
- "command-r7b-12-2024": {
32
- "description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.",
33
- "context": "128K",
34
- "output": "4K"
35
- },
36
- "command-r-plus-04-2024": {
37
- "description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.",
38
- "context": "128K",
39
- "output": "4K"
40
- },
41
- "command-r-plus": {
42
- "description": "command-r-plus is an alias for command-r-plus-04-2024, so if you use command-r-plus in the API, that's the model you're pointing to.",
43
- "context": "128K",
44
- "output": "4K"
45
- },
46
- "command-r-08-2024": {
47
- "description": "Updated Command R model from August 2024.",
48
- "context": "128K",
49
- "output": "4K"
50
- },
51
- "command-r-03-2024": {
52
- "description": "Instruction-following model for code generation, RAG, and agents.",
53
- "context": "128K",
54
- "output": "4K"
55
- },
56
- "command-r": {
57
- "description": "Alias for command-r-03-2024.",
58
- "context": "128K",
59
- "output": "4K"
60
- },
61
- "command": {
62
- "description": "Conversational model with long context capabilities.",
63
- "context": "4K",
64
- "output": "4K"
65
- },
66
- "command-nightly": {
67
- "description": "Experimental nightly build (not for production).",
68
- "context": "128K",
69
- "output": "4K"
70
- },
71
- "command-light": {
72
- "description": "Faster lightweight version of command.",
73
- "context": "4K",
74
- "output": "4K"
75
- },
76
- "command-light-nightly": {
77
- "description": "Experimental nightly build of command-light.",
78
- "context": "128K",
79
- "output": "4K"
80
- },
81
  }
82
 
83
  with st.sidebar:
@@ -86,12 +29,10 @@ with st.sidebar:
86
  st.title("Settings")
87
  api_key = st.text_input("Cohere API Key", type="password")
88
  selected_model = st.selectbox("Model", options=list(model_info.keys()))
89
-
90
  if st.button("Clear Chat"):
91
  st.session_state.messages = []
92
  st.session_state.first_message_sent = False
93
  st.rerun()
94
-
95
  st.divider()
96
  st.image(AI_PFP, width=60)
97
  st.subheader(selected_model)
@@ -102,44 +43,49 @@ with st.sidebar:
102
 
103
  if "messages" not in st.session_state:
104
  st.session_state.messages = []
105
-
106
  if "first_message_sent" not in st.session_state:
107
  st.session_state.first_message_sent = False
108
 
109
  if not st.session_state.first_message_sent:
110
  st.markdown("<h1 style='text-align: center; color: #4a4a4a; margin-top: 100px;'>How can Cohere help you today?</h1>", unsafe_allow_html=True)
111
-
112
  for msg in st.session_state.messages:
113
  with st.chat_message(msg["role"], avatar=USER_PFP if msg["role"] == "user" else AI_PFP):
114
  st.markdown(msg["content"])
115
 
116
- if prompt := st.chat_input("Message..."):
 
 
 
 
 
 
 
 
 
117
  if not api_key:
118
  st.error("API key required")
119
  st.stop()
120
-
121
  st.session_state.first_message_sent = True
122
-
123
  st.session_state.messages.append({"role": "user", "content": prompt})
124
  with st.chat_message("user", avatar=USER_PFP):
125
  st.markdown(prompt)
126
 
127
  try:
128
  co = cohere.ClientV2(api_key)
 
 
 
 
 
 
 
 
 
 
 
 
129
  with st.chat_message("assistant", avatar=AI_PFP):
130
- response = co.chat(
131
- model=selected_model,
132
- messages=st.session_state.messages
133
- )
134
- if hasattr(response, "message") and hasattr(response.message, "content"):
135
- content_items = response.message.content
136
- reply = "".join(getattr(item, 'text', '') for item in content_items)
137
- else:
138
- st.write(response)
139
- reply = "❗️Couldn't extract reply from the Cohere response."
140
  st.markdown(reply)
141
-
142
  st.session_state.messages.append({"role": "assistant", "content": reply})
143
-
144
  except Exception as e:
145
- st.error(f"Error: {str(e)}")
 
1
  import streamlit as st
2
  import cohere
3
  import os
4
+ import base64
5
 
6
  st.set_page_config(page_title="Cohere Chat", layout="wide")
7
 
 
14
  st.stop()
15
 
16
  model_info = {
17
+ "c4ai-aya-expanse-8b": {"description": "Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "4K", "output": "4K"},
18
+ "c4ai-aya-expanse-32b": {"description": "Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.", "context": "128K", "output": "4K"},
19
+ "c4ai-aya-vision-8b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. This 8 billion parameter variant is focused on low latency and best-in-class performance.", "context": "16K", "output": "4K"},
20
+ "c4ai-aya-vision-32b": {"description": "Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. Serves 23 languages. This 32 billion parameter variant is focused on state-of-art multilingual performance.", "context": "16k", "output": "4K"},
21
+ "command-a-03-2025": {"description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.", "context": "256K", "output": "8K"},
22
+ "command-r7b-12-2024": {"description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.", "context": "128K", "output": "4K"},
23
+ "command-r-plus-04-2024": {"description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.", "context": "128K", "output": "4K"},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
 
26
  with st.sidebar:
 
29
  st.title("Settings")
30
  api_key = st.text_input("Cohere API Key", type="password")
31
  selected_model = st.selectbox("Model", options=list(model_info.keys()))
 
32
  if st.button("Clear Chat"):
33
  st.session_state.messages = []
34
  st.session_state.first_message_sent = False
35
  st.rerun()
 
36
  st.divider()
37
  st.image(AI_PFP, width=60)
38
  st.subheader(selected_model)
 
43
 
44
  if "messages" not in st.session_state:
45
  st.session_state.messages = []
 
46
  if "first_message_sent" not in st.session_state:
47
  st.session_state.first_message_sent = False
48
 
49
  if not st.session_state.first_message_sent:
50
  st.markdown("<h1 style='text-align: center; color: #4a4a4a; margin-top: 100px;'>How can Cohere help you today?</h1>", unsafe_allow_html=True)
 
51
  for msg in st.session_state.messages:
52
  with st.chat_message(msg["role"], avatar=USER_PFP if msg["role"] == "user" else AI_PFP):
53
  st.markdown(msg["content"])
54
 
55
+ col1, col2 = st.columns([1, 4])
56
+ with col1:
57
+ if selected_model.startswith("c4ai-aya-vision"):
58
+ uploaded = st.file_uploader("Upload image", type=["png", "jpg", "jpeg"], key="image_uploader")
59
+ else:
60
+ uploaded = None
61
+ with col2:
62
+ prompt = st.chat_input("Message...")
63
+
64
+ if prompt:
65
  if not api_key:
66
  st.error("API key required")
67
  st.stop()
 
68
  st.session_state.first_message_sent = True
 
69
  st.session_state.messages.append({"role": "user", "content": prompt})
70
  with st.chat_message("user", avatar=USER_PFP):
71
  st.markdown(prompt)
72
 
73
  try:
74
  co = cohere.ClientV2(api_key)
75
+ user_message = [{"type": "text", "text": prompt}]
76
+ if uploaded:
77
+ raw = uploaded.read()
78
+ b64 = base64.b64encode(raw).decode("utf-8")
79
+ data_url = f"data:image/jpeg;base64,{b64}"
80
+ user_message.append({"type": "image_url", "image_url": {"url": data_url}})
81
+ response = co.chat(
82
+ model=selected_model,
83
+ messages=[{"role": "user", "content": user_message}]
84
+ )
85
+ content_items = response.message.content
86
+ reply = "".join(getattr(item, 'text', '') for item in content_items)
87
  with st.chat_message("assistant", avatar=AI_PFP):
 
 
 
 
 
 
 
 
 
 
88
  st.markdown(reply)
 
89
  st.session_state.messages.append({"role": "assistant", "content": reply})
 
90
  except Exception as e:
91
+ st.error(f"Error: {str(e)}")