Spaces:
Paused
Paused
Commit
·
f33e2be
0
Parent(s):
Initial commit: Quantum-API with FastAPI and Streamlit integration
Browse files- .gitignore +0 -0
- Dockerfile +0 -0
- README.md +0 -0
- api/__init__.py +0 -0
- api/__pycache__/__init__.cpython-312.pyc +0 -0
- api/endpoints/__pycache__/codelama.cpython-312.pyc +0 -0
- api/endpoints/__pycache__/olama.cpython-312.pyc +0 -0
- api/endpoints/__pycache__/ollama.cpython-312.pyc +0 -0
- api/endpoints/codelama.py +16 -0
- api/endpoints/ollama.py +29 -0
- app/app.py +33 -0
- app/styles/style.css +0 -0
- requirements.txt +7 -0
.gitignore
ADDED
|
File without changes
|
Dockerfile
ADDED
|
File without changes
|
README.md
ADDED
|
File without changes
|
api/__init__.py
ADDED
|
File without changes
|
api/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (151 Bytes). View file
|
|
|
api/endpoints/__pycache__/codelama.cpython-312.pyc
ADDED
|
Binary file (885 Bytes). View file
|
|
|
api/endpoints/__pycache__/olama.cpython-312.pyc
ADDED
|
Binary file (1.95 kB). View file
|
|
|
api/endpoints/__pycache__/ollama.cpython-312.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
api/endpoints/codelama.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
import subprocess # Import subprocess module to run system commands
|
| 3 |
+
import ollama
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
@app.get("/")
|
| 8 |
+
def read_root():
|
| 9 |
+
return {"message": "Welcome to the Quantum-API/codelama"}
|
| 10 |
+
|
| 11 |
+
# Add other endpoints as needed for interacting with ollama
|
| 12 |
+
@app.get("/run-codelama")
|
| 13 |
+
def run_codelama():
|
| 14 |
+
# You can call ollama commands here or use subprocess
|
| 15 |
+
result = subprocess.run(["ollama", "run", "codellama:latest"], capture_output=True, text=True)
|
| 16 |
+
return {"result": result.stdout}
|
api/endpoints/ollama.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, APIRouter
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
import ollama # Make sure Ollama is installed and available
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
# Define request model for the input data
|
| 8 |
+
class UserInput(BaseModel):
|
| 9 |
+
question: str
|
| 10 |
+
|
| 11 |
+
# Function to generate responses using Ollama
|
| 12 |
+
def get_ollama_response(user_input: str) -> str:
|
| 13 |
+
try:
|
| 14 |
+
# Run Ollama model for generating a response
|
| 15 |
+
response = ollama.chat(model="llama", messages=[{"role": "user", "content": user_input}])
|
| 16 |
+
return response['text'] # Ensure you're extracting the response text from Ollama's response
|
| 17 |
+
except Exception as e:
|
| 18 |
+
return f"Error processing request: {str(e)}"
|
| 19 |
+
|
| 20 |
+
# Create an API router
|
| 21 |
+
router = APIRouter()
|
| 22 |
+
|
| 23 |
+
@router.post("/ollama-response")
|
| 24 |
+
async def ollama_response(user_input: UserInput):
|
| 25 |
+
response = get_ollama_response(user_input.question)
|
| 26 |
+
return {"response": response}
|
| 27 |
+
|
| 28 |
+
# Include router into the FastAPI app
|
| 29 |
+
app.include_router(router)
|
app/app.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import requests
|
| 3 |
+
|
| 4 |
+
# URL of the FastAPI backend endpoint
|
| 5 |
+
API_URL_OLAMA = "http://localhost:7860/ollama-response"
|
| 6 |
+
API_URL_CODELAMA = "http://localhost:7860/run-codelama"
|
| 7 |
+
|
| 8 |
+
def main():
|
| 9 |
+
st.title("Quantum-API Chat Interface with Olama and CodeLlama")
|
| 10 |
+
|
| 11 |
+
user_input = st.text_input("Ask a question:")
|
| 12 |
+
|
| 13 |
+
if user_input:
|
| 14 |
+
if st.button("Chat with Olama"):
|
| 15 |
+
# Make a POST request to the FastAPI server for Olama
|
| 16 |
+
response = requests.post(API_URL_OLAMA, json={"question": user_input})
|
| 17 |
+
if response.status_code == 200:
|
| 18 |
+
# Display the response from Olama
|
| 19 |
+
st.write(f"Olama says: {response.json()['response']}")
|
| 20 |
+
else:
|
| 21 |
+
st.error("Error contacting Olama API.")
|
| 22 |
+
|
| 23 |
+
if st.button("Run Code with CodeLlama"):
|
| 24 |
+
# Make a GET request to the FastAPI server for CodeLlama
|
| 25 |
+
response = requests.get(API_URL_CODELAMA)
|
| 26 |
+
if response.status_code == 200:
|
| 27 |
+
# Display the response from CodeLlama
|
| 28 |
+
st.write(f"CodeLlama result: {response.json()['result']}")
|
| 29 |
+
else:
|
| 30 |
+
st.error("Error contacting CodeLlama API.")
|
| 31 |
+
|
| 32 |
+
if __name__ == "__main__":
|
| 33 |
+
main()
|
app/styles/style.css
ADDED
|
File without changes
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
streamlit
|
| 4 |
+
olama
|
| 5 |
+
codelama
|
| 6 |
+
pennyLane
|
| 7 |
+
qiskit
|