Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,162 +1,105 @@
|
|
| 1 |
-
from fastapi import FastAPI, HTTPException, Depends, Header
|
| 2 |
from fastapi.responses import StreamingResponse
|
| 3 |
from pydantic import BaseModel
|
| 4 |
-
from typing import List
|
| 5 |
-
import
|
| 6 |
-
import g4f
|
| 7 |
-
from g4f.Provider import OpenaiAccount, RetryProvider
|
| 8 |
|
| 9 |
app = FastAPI()
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
"
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
],
|
| 18 |
-
|
| 19 |
-
# Anthropic
|
| 20 |
-
"anthropic": [
|
| 21 |
-
"claude-3-opus", "claude-3-sonnet", "claude-3-haiku",
|
| 22 |
-
"claude-3.5", "claude-3.7-sonnet", "claude-2.1"
|
| 23 |
-
],
|
| 24 |
-
|
| 25 |
-
# Google
|
| 26 |
-
"google": [
|
| 27 |
-
"gemini-pro", "gemini-1.5-pro", "gemini-1.5-flash",
|
| 28 |
-
"gemini-2.5-pro-exp-03-25"
|
| 29 |
-
],
|
| 30 |
-
|
| 31 |
-
# Meta
|
| 32 |
-
"meta": [
|
| 33 |
-
"llama-3-70b", "llama-3-8b", "llama-3.1-405b",
|
| 34 |
-
"llama-2-70b", "llama-2-13b", "llama-2-7b"
|
| 35 |
-
],
|
| 36 |
-
|
| 37 |
-
# XAI (Grok)
|
| 38 |
-
"xai": [
|
| 39 |
-
"grok-1", "grok-1.5", "grok-2", "grok-3"
|
| 40 |
-
],
|
| 41 |
-
|
| 42 |
-
# Other
|
| 43 |
-
"other": [
|
| 44 |
-
"o1", "o3-mini", "mistral-7b", "mixtral-8x7b",
|
| 45 |
-
"command-r-plus", "deepseek-chat", "code-llama-70b"
|
| 46 |
-
],
|
| 47 |
-
|
| 48 |
-
# Image Models
|
| 49 |
-
"image": [
|
| 50 |
-
"dall-e-3", "stable-diffusion-xl",
|
| 51 |
-
"flux", "flux-pro", "playground-v2.5"
|
| 52 |
-
]
|
| 53 |
-
}
|
| 54 |
-
|
| 55 |
-
# Flattened list for API endpoint
|
| 56 |
-
ALL_MODELS = [
|
| 57 |
-
*MODELS["openai"],
|
| 58 |
-
*MODELS["anthropic"],
|
| 59 |
-
*MODELS["google"],
|
| 60 |
-
*MODELS["meta"],
|
| 61 |
-
*MODELS["xai"],
|
| 62 |
-
*MODELS["other"]
|
| 63 |
]
|
| 64 |
|
| 65 |
-
#
|
| 66 |
class Message(BaseModel):
|
| 67 |
-
role:
|
| 68 |
content: str
|
| 69 |
|
| 70 |
class ChatRequest(BaseModel):
|
| 71 |
model: str
|
| 72 |
messages: List[Message]
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
meta: List[str]
|
| 83 |
-
xai: List[str]
|
| 84 |
-
other: List[str]
|
| 85 |
-
image: List[str]
|
| 86 |
|
| 87 |
-
|
| 88 |
-
@app.get("/v1/models", response_model=ModelListResponse)
|
| 89 |
async def get_models():
|
| 90 |
-
"""
|
| 91 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
@app.post("/v1/chat/completions")
|
| 94 |
-
async def chat_completion(request: ChatRequest):
|
| 95 |
-
"""Handle chat completion requests"""
|
| 96 |
-
if request.model not in ALL_MODELS:
|
| 97 |
-
raise HTTPException(
|
| 98 |
-
status_code=400,
|
| 99 |
-
detail=f"Invalid model. Available: {ALL_MODELS}"
|
| 100 |
-
)
|
| 101 |
-
|
| 102 |
-
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
| 103 |
-
|
| 104 |
try:
|
| 105 |
-
if
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
max_tokens=request.max_tokens,
|
| 113 |
-
provider=RetryProvider([g4f.Provider.BackendApi])
|
| 114 |
)
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
temperature=request.temperature,
|
| 126 |
-
top_p=request.top_p,
|
| 127 |
-
provider=RetryProvider([g4f.Provider.BackendApi])
|
| 128 |
-
)
|
| 129 |
-
return {"content": str(response)}
|
| 130 |
-
|
| 131 |
-
except Exception as e:
|
| 132 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 133 |
|
| 134 |
-
|
| 135 |
-
@app.post("/v1/images/generations")
|
| 136 |
-
async def generate_image(prompt: str, model: str = "dall-e-3"):
|
| 137 |
-
if model not in MODELS["image"]:
|
| 138 |
-
raise HTTPException(
|
| 139 |
-
status_code=400,
|
| 140 |
-
detail=f"Invalid image model. Available: {MODELS['image']}"
|
| 141 |
-
)
|
| 142 |
-
|
| 143 |
-
try:
|
| 144 |
-
if model in ["flux", "flux-pro"]:
|
| 145 |
-
image_data = g4f.ImageGeneration.create(
|
| 146 |
-
prompt=prompt,
|
| 147 |
-
model=model,
|
| 148 |
-
provider=g4f.Provider.BackendApi
|
| 149 |
-
)
|
| 150 |
-
return {"url": f"data:image/png;base64,{image_data.decode('utf-8')}"}
|
| 151 |
else:
|
| 152 |
-
#
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
|
|
|
| 156 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
except Exception as e:
|
| 158 |
raise HTTPException(status_code=500, detail=str(e))
|
| 159 |
|
| 160 |
if __name__ == "__main__":
|
| 161 |
import uvicorn
|
| 162 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException, Depends, Header, Request
|
| 2 |
from fastapi.responses import StreamingResponse
|
| 3 |
from pydantic import BaseModel
|
| 4 |
+
from typing import List
|
| 5 |
+
from g4f import ChatCompletion
|
|
|
|
|
|
|
| 6 |
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
+
# List of available models
|
| 10 |
+
models = [
|
| 11 |
+
"gpt-4o", "gpt-4o-mini", "gpt-4",
|
| 12 |
+
"gpt-4-turbo", "gpt-3.5-turbo",
|
| 13 |
+
"claude-3.7-sonnet", "o3-mini", "o1", "grok-3", "gemini-2.5-pro-exp-03-25", "claude-3.5",
|
| 14 |
+
"llama-3.1-405b"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
]
|
| 16 |
|
| 17 |
+
# Request model
|
| 18 |
class Message(BaseModel):
|
| 19 |
+
role: str
|
| 20 |
content: str
|
| 21 |
|
| 22 |
class ChatRequest(BaseModel):
|
| 23 |
model: str
|
| 24 |
messages: List[Message]
|
| 25 |
+
streaming: bool = True
|
| 26 |
+
|
| 27 |
+
class ChatResponse(BaseModel):
|
| 28 |
+
role: str
|
| 29 |
+
content: str
|
| 30 |
|
| 31 |
+
# Dependency to check API key
|
| 32 |
+
async def verify_api_key(x_api_key: str = Header(...)):
|
| 33 |
+
if x_api_key != "fb207532285886a5568298b4b4e61124":
|
| 34 |
+
raise HTTPException(status_code=403, detail="Invalid API key")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
@app.get("/v1/models", tags=["Models"])
|
|
|
|
| 37 |
async def get_models():
|
| 38 |
+
"""Endpoint to get the list of available models."""
|
| 39 |
+
return {"models": models}
|
| 40 |
+
|
| 41 |
+
@app.post("/v1/chat/completions", tags=["Chat Completion"])
|
| 42 |
+
async def chat_completion(
|
| 43 |
+
chat_request: ChatRequest,
|
| 44 |
+
api_key: str = Depends(verify_api_key)
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Handle chat completion requests with optional streaming.
|
| 48 |
+
Removed rate limiting for unrestricted access.
|
| 49 |
+
"""
|
| 50 |
+
# Validate model
|
| 51 |
+
if chat_request.model not in models:
|
| 52 |
+
raise HTTPException(status_code=400, detail="Invalid model selected.")
|
| 53 |
+
|
| 54 |
+
# Check if messages are provided
|
| 55 |
+
if not chat_request.messages:
|
| 56 |
+
raise HTTPException(status_code=400, detail="Messages cannot be empty.")
|
| 57 |
+
|
| 58 |
+
# Convert messages to the format expected by ChatCompletion
|
| 59 |
+
formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages]
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
try:
|
| 62 |
+
if chat_request.streaming:
|
| 63 |
+
# Stream the response
|
| 64 |
+
def event_stream():
|
| 65 |
+
response = ChatCompletion.create(
|
| 66 |
+
model=chat_request.model,
|
| 67 |
+
messages=formatted_messages,
|
| 68 |
+
stream=True
|
|
|
|
|
|
|
| 69 |
)
|
| 70 |
|
| 71 |
+
for chunk in response:
|
| 72 |
+
if isinstance(chunk, dict) and 'choices' in chunk:
|
| 73 |
+
for choice in chunk['choices']:
|
| 74 |
+
if 'delta' in choice and 'content' in choice['delta']:
|
| 75 |
+
yield f"data: {json.dumps({'content': choice['delta']['content']})}\n\n"
|
| 76 |
+
elif 'message' in choice:
|
| 77 |
+
yield f"data: {json.dumps({'content': choice['message']['content']})}\n\n"
|
| 78 |
+
else:
|
| 79 |
+
yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
return StreamingResponse(event_stream(), media_type="text/event-stream")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
else:
|
| 83 |
+
# Non-streaming response
|
| 84 |
+
response = ChatCompletion.create(
|
| 85 |
+
model=chat_request.model,
|
| 86 |
+
messages=formatted_messages,
|
| 87 |
+
stream=False
|
| 88 |
)
|
| 89 |
+
|
| 90 |
+
if isinstance(response, str):
|
| 91 |
+
return ChatResponse(role="assistant", content=response)
|
| 92 |
+
elif isinstance(response, dict) and 'choices' in response:
|
| 93 |
+
return ChatResponse(
|
| 94 |
+
role="assistant",
|
| 95 |
+
content=response['choices'][0]['message']['content']
|
| 96 |
+
)
|
| 97 |
+
else:
|
| 98 |
+
raise HTTPException(status_code=500, detail="Unexpected response structure.")
|
| 99 |
+
|
| 100 |
except Exception as e:
|
| 101 |
raise HTTPException(status_code=500, detail=str(e))
|
| 102 |
|
| 103 |
if __name__ == "__main__":
|
| 104 |
import uvicorn
|
| 105 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|